{"text":"package myaws\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awsutil\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/autoscaling\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ ECSNodeRenewOptions customize the behavior of the Renew command.\ntype ECSNodeRenewOptions struct {\n\tCluster string\n\tAsgName string\n}\n\n\/\/ ECSNodeRenew renew ECS container instances with blue-green deployment.\n\/\/ This method is an automation process to renew your ECS container instances\n\/\/ if you update the AMI. creates new instances, drains the old instances,\n\/\/ and discards the old instances.\nfunc (client *Client) ECSNodeRenew(options ECSNodeRenewOptions) error {\n\tfmt.Fprintf(client.stdout, \"start: ecs node renew\\noptions: %s\\n\", awsutil.Prettify(options))\n\n\tif err := client.printECSStatus(options.Cluster); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ get the current desired capacity\n\tdesiredCapacity, err := client.getAutoScalingGroupDesiredCapacity(options.AsgName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ list the current container instances\n\toldNodes, err := client.findECSNodes(options.Cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(oldNodes) != int(desiredCapacity) {\n\t\treturn errors.Errorf(\"assertion failed: currentCapacity(%d) != desiredCapacity(%d)\", len(oldNodes), desiredCapacity)\n\t}\n\n\t\/\/ Update the desired capacity and wait until new instances are InService\n\t\/\/ We simply double the number of instances here.\n\t\/\/ If you need more flexible control, please implement a strategy such as\n\t\/\/ rolling update.\n\ttargetCapacity := desiredCapacity * 2\n\n\tfmt.Fprintf(client.stdout, \"Update autoscaling group %s (DesiredCapacity: %d => %d)\\n\", options.AsgName, desiredCapacity, targetCapacity)\n\n\terr = client.AutoscalingUpdate(AutoscalingUpdateOptions{\n\t\tAsgName: options.AsgName,\n\t\tDesiredCapacity: targetCapacity,\n\t\tWait: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = client.printECSStatus(options.Cluster); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ A status of instance in autoscaling group is InService doesn't mean the\n\t\/\/ container instance is registered. We should make sure container instances\n\t\/\/ are registered\n\tfmt.Fprintln(client.stdout, \"Wait until ECS container instances are registered...\")\n\terr = client.WaitUntilECSContainerInstancesAreRegistered(options.Cluster, targetCapacity)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = client.printECSStatus(options.Cluster); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ drain old container instances and wait until no task running\n\toldNodeArns := []*string{}\n\tfor _, oldNode := range oldNodes {\n\t\toldNodeArns = append(oldNodeArns, oldNode.ContainerInstanceArn)\n\t}\n\tfmt.Fprintf(client.stdout, \"Drain old container instances and wait until no task running...\\n%v\\n\", awsutil.Prettify(oldNodeArns))\n\terr = client.ECSNodeDrain(ECSNodeDrainOptions{\n\t\tCluster: options.Cluster,\n\t\tContainerInstances: oldNodeArns,\n\t\tWait: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = client.printECSStatus(options.Cluster); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ All old container instances are drained doesn't mean all services are stable.\n\t\/\/ It depends on the deployment strategy of each service.\n\t\/\/ We should make sure all services are stable\n\tfmt.Fprintln(client.stdout, \"Wait until all ECS services stable...\")\n\terr = client.WaitUntilECSAllServicesStable(options.Cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = client.printECSStatus(options.Cluster); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ A stable state for all services does not mean that all targets are healthy.\n\t\/\/ We need to explicitly confirm it.\n\tfmt.Fprintln(client.stdout, \"Wait until all targets healthy...\")\n\terr = client.WaitUntilECSAllTargetsInService(options.Cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = client.printECSStatus(options.Cluster); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Get a list of instance IDs before auto scaling\n\tvar oldInstanceIds []*string\n\tfor _, oldNode := range oldNodes {\n\t\toldInstanceIds = append(oldInstanceIds, oldNode.Ec2InstanceId)\n\t}\n\n\t\/\/ Get a list of instances after auto scaling\n\tallNodes, err := client.findECSNodes(options.Cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Get a list of instance IDs after auto scaling\n\tvar allInstanceIds []*string\n\tfor _, allNode := range allNodes {\n\t\tallInstanceIds = append(allInstanceIds, allNode.Ec2InstanceId)\n\t}\n\n\t\/\/ Select instances to protect from scale in.\n\t\/\/ By setting \"scale-in protection\" to instances created at scale-out,\n\t\/\/ the intended instances (instances created before scale-in) are only terminated at scale-in process.\n\tprotectInstanceIds, err := client.selectInstanceToProtectFromScaleIn(oldInstanceIds, allInstanceIds)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Fprintln(client.stdout, \"Setting scale in protection: \", awsutil.Prettify(protectInstanceIds))\n\t\/\/ set \"scale in protection\" to instances created at scale-out.\n\terr = client.AutoScalingSetInstanceProtection(AutoScalingSetInstanceProtectionOptions{\n\t\toptions.AsgName,\n\t\tprotectInstanceIds,\n\t\ttrue})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ restore the desired capacity and wait until old instances are discarded\n\tfmt.Fprintf(client.stdout, \"Update autoscaling group %s (DesiredCapacity: %d => %d)\\n\", options.AsgName, targetCapacity, desiredCapacity)\n\n\terr = client.AutoscalingUpdate(AutoscalingUpdateOptions{\n\t\tAsgName: options.AsgName,\n\t\tDesiredCapacity: desiredCapacity,\n\t\tWait: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ remove \"scale in protection\" to instances created at scale-out.\n\tfmt.Fprintln(client.stdout, \"Removing scale in protection: \", awsutil.Prettify(protectInstanceIds))\n\terr = client.AutoScalingSetInstanceProtection(AutoScalingSetInstanceProtectionOptions{\n\t\toptions.AsgName,\n\t\tprotectInstanceIds,\n\t\tfalse})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = client.printECSStatus(options.Cluster); err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Fprintln(client.stdout, \"end: ecs node renew\")\n\treturn nil\n}\n\n\/\/ selectInstanceToProtectFromScaleIn selects instance to protect from Scale in.\n\/\/ instance select rule:\n\/\/ instances after scale out - instances before scale out - instances which already set `InstanceProtection==true`\nfunc (client *Client) selectInstanceToProtectFromScaleIn(oldInstanceIds, allInstanceIds []*string) ([]*string, error) {\n\t\/\/ get newly created nodes (allInstanceIds - oldInstanceIds)\n\tnewInstanceIds := difference(allInstanceIds, oldInstanceIds)\n\n\t\/\/ exclude ProtectedFromScaleIn == true nodes\n\tparams := &autoscaling.DescribeAutoScalingInstancesInput{\n\t\tInstanceIds: newInstanceIds,\n\t}\n\tresponse, err := client.AutoScaling.DescribeAutoScalingInstances(params)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"DescribeAutoScalingGroups failed:\")\n\t}\n\n\tvar targetInstanceIds []*string\n\tfor _, instance := range response.AutoScalingInstances {\n\t\tif *instance.ProtectedFromScaleIn == false {\n\t\t\ttargetInstanceIds = append(targetInstanceIds, instance.InstanceId)\n\t\t}\n\t}\n\treturn targetInstanceIds, nil\n}\n\n\/\/ difference returns the elements in `a` that aren't in `b`.\nfunc difference(a, b []*string) []*string {\n\tmb := make(map[string]struct{}, len(b))\n\tfor _, x := range b {\n\t\tmb[*x] = struct{}{}\n\t}\n\tvar diff []*string\n\tfor _, x := range a {\n\t\tif _, ok := mb[*x]; !ok {\n\t\t\tdiff = append(diff, x)\n\t\t}\n\t}\n\treturn diff\n}\nmove instance IDs retrieve process into selectInstanceToProtectFromScaleIn functionpackage myaws\n\nimport (\n\t\"fmt\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ecs\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awsutil\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/autoscaling\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ ECSNodeRenewOptions customize the behavior of the Renew command.\ntype ECSNodeRenewOptions struct {\n\tCluster string\n\tAsgName string\n}\n\n\/\/ ECSNodeRenew renew ECS container instances with blue-green deployment.\n\/\/ This method is an automation process to renew your ECS container instances\n\/\/ if you update the AMI. creates new instances, drains the old instances,\n\/\/ and discards the old instances.\nfunc (client *Client) ECSNodeRenew(options ECSNodeRenewOptions) error {\n\tfmt.Fprintf(client.stdout, \"start: ecs node renew\\noptions: %s\\n\", awsutil.Prettify(options))\n\n\tif err := client.printECSStatus(options.Cluster); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ get the current desired capacity\n\tdesiredCapacity, err := client.getAutoScalingGroupDesiredCapacity(options.AsgName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ list the current container instances\n\toldNodes, err := client.findECSNodes(options.Cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(oldNodes) != int(desiredCapacity) {\n\t\treturn errors.Errorf(\"assertion failed: currentCapacity(%d) != desiredCapacity(%d)\", len(oldNodes), desiredCapacity)\n\t}\n\n\t\/\/ Update the desired capacity and wait until new instances are InService\n\t\/\/ We simply double the number of instances here.\n\t\/\/ If you need more flexible control, please implement a strategy such as\n\t\/\/ rolling update.\n\ttargetCapacity := desiredCapacity * 2\n\n\tfmt.Fprintf(client.stdout, \"Update autoscaling group %s (DesiredCapacity: %d => %d)\\n\", options.AsgName, desiredCapacity, targetCapacity)\n\n\terr = client.AutoscalingUpdate(AutoscalingUpdateOptions{\n\t\tAsgName: options.AsgName,\n\t\tDesiredCapacity: targetCapacity,\n\t\tWait: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = client.printECSStatus(options.Cluster); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ A status of instance in autoscaling group is InService doesn't mean the\n\t\/\/ container instance is registered. We should make sure container instances\n\t\/\/ are registered\n\tfmt.Fprintln(client.stdout, \"Wait until ECS container instances are registered...\")\n\terr = client.WaitUntilECSContainerInstancesAreRegistered(options.Cluster, targetCapacity)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = client.printECSStatus(options.Cluster); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ drain old container instances and wait until no task running\n\toldNodeArns := []*string{}\n\tfor _, oldNode := range oldNodes {\n\t\toldNodeArns = append(oldNodeArns, oldNode.ContainerInstanceArn)\n\t}\n\tfmt.Fprintf(client.stdout, \"Drain old container instances and wait until no task running...\\n%v\\n\", awsutil.Prettify(oldNodeArns))\n\terr = client.ECSNodeDrain(ECSNodeDrainOptions{\n\t\tCluster: options.Cluster,\n\t\tContainerInstances: oldNodeArns,\n\t\tWait: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = client.printECSStatus(options.Cluster); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ All old container instances are drained doesn't mean all services are stable.\n\t\/\/ It depends on the deployment strategy of each service.\n\t\/\/ We should make sure all services are stable\n\tfmt.Fprintln(client.stdout, \"Wait until all ECS services stable...\")\n\terr = client.WaitUntilECSAllServicesStable(options.Cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = client.printECSStatus(options.Cluster); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ A stable state for all services does not mean that all targets are healthy.\n\t\/\/ We need to explicitly confirm it.\n\tfmt.Fprintln(client.stdout, \"Wait until all targets healthy...\")\n\terr = client.WaitUntilECSAllTargetsInService(options.Cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = client.printECSStatus(options.Cluster); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Select instances to protect from scale in.\n\t\/\/ By setting \"scale-in protection\" to instances created at scale-out,\n\t\/\/ the intended instances (instances created before scale-in) are only terminated at scale-in process.\n\tprotectInstanceIds, err := client.selectInstanceToProtectFromScaleIn(oldNodes, options.Cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Fprintln(client.stdout, \"Setting scale in protection: \", awsutil.Prettify(protectInstanceIds))\n\t\/\/ set \"scale in protection\" to instances created at scale-out.\n\terr = client.AutoScalingSetInstanceProtection(AutoScalingSetInstanceProtectionOptions{\n\t\toptions.AsgName,\n\t\tprotectInstanceIds,\n\t\ttrue})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ restore the desired capacity and wait until old instances are discarded\n\tfmt.Fprintf(client.stdout, \"Update autoscaling group %s (DesiredCapacity: %d => %d)\\n\", options.AsgName, targetCapacity, desiredCapacity)\n\n\terr = client.AutoscalingUpdate(AutoscalingUpdateOptions{\n\t\tAsgName: options.AsgName,\n\t\tDesiredCapacity: desiredCapacity,\n\t\tWait: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ remove \"scale in protection\" to instances created at scale-out.\n\tfmt.Fprintln(client.stdout, \"Removing scale in protection: \", awsutil.Prettify(protectInstanceIds))\n\terr = client.AutoScalingSetInstanceProtection(AutoScalingSetInstanceProtectionOptions{\n\t\toptions.AsgName,\n\t\tprotectInstanceIds,\n\t\tfalse})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = client.printECSStatus(options.Cluster); err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Fprintln(client.stdout, \"end: ecs node renew\")\n\treturn nil\n}\n\n\/\/ selectInstanceToProtectFromScaleIn selects instance to protect from Scale in.\n\/\/ instance select rule:\n\/\/ instances after scale out - instances before scale out - instances which already set `InstanceProtection==true`\nfunc (client *Client) selectInstanceToProtectFromScaleIn(oldNodes []*ecs.ContainerInstance, cluster string) ([]*string, error) {\n\t\/\/ Get a list of instance IDs before auto scaling\n\tvar oldInstanceIds []*string\n\tfor _, oldNode := range oldNodes {\n\t\toldInstanceIds = append(oldInstanceIds, oldNode.Ec2InstanceId)\n\t}\n\n\t\/\/ Get a list of instances after auto scaling\n\tallNodes, err := client.findECSNodes(cluster)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get a list of instance IDs after auto scaling\n\tvar allInstanceIds []*string\n\tfor _, allNode := range allNodes {\n\t\tallInstanceIds = append(allInstanceIds, allNode.Ec2InstanceId)\n\t}\n\n\t\/\/ get newly created nodes (allInstanceIds - oldInstanceIds)\n\tnewInstanceIds := difference(allInstanceIds, oldInstanceIds)\n\n\t\/\/ exclude ProtectedFromScaleIn == true nodes\n\tparams := &autoscaling.DescribeAutoScalingInstancesInput{\n\t\tInstanceIds: newInstanceIds,\n\t}\n\tresponse, err := client.AutoScaling.DescribeAutoScalingInstances(params)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"DescribeAutoScalingGroups failed:\")\n\t}\n\n\tvar targetInstanceIds []*string\n\tfor _, instance := range response.AutoScalingInstances {\n\t\tif *instance.ProtectedFromScaleIn == false {\n\t\t\ttargetInstanceIds = append(targetInstanceIds, instance.InstanceId)\n\t\t}\n\t}\n\treturn targetInstanceIds, nil\n}\n\n\/\/ difference returns the elements in `a` that aren't in `b`.\nfunc difference(a, b []*string) []*string {\n\tmb := make(map[string]struct{}, len(b))\n\tfor _, x := range b {\n\t\tmb[*x] = struct{}{}\n\t}\n\tvar diff []*string\n\tfor _, x := range a {\n\t\tif _, ok := mb[*x]; !ok {\n\t\t\tdiff = append(diff, x)\n\t\t}\n\t}\n\treturn diff\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"net\/http\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/gorilla\/handlers\"\n\t\"flag\"\n)\n\ntype User struct {\n\tID string `json:\"id,omitempty\"`\n\tFirstname string `json:\"firstname,omitempty\"`\n\tLastname string `json:\"lastname,omitempty\"`\n\tMail string `json:\"mail,omitempty\"`\n}\n\nvar users []User\n\nfunc GetPersonEndpoint(w http.ResponseWriter, req *http.Request) {\n\tparams := mux.Vars(req)\n\tfor _, item := range users {\n\t\tif item.ID == params[\"id\"] {\n\t\t\tjson.NewEncoder(w).Encode(item)\n\t\t\treturn\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(&User{})\n}\n\nfunc GetPeopleEndpoint(w http.ResponseWriter, req *http.Request) {\n\tjson.NewEncoder(w).Encode(users)\n}\n\nfunc CreatePersonEndpoint(w http.ResponseWriter, req *http.Request) {\n\tparams := mux.Vars(req)\n\tvar user User\n\t_ = json.NewDecoder(req.Body).Decode(&user)\n\tuser.ID = params[\"id\"]\n\tusers = append(users, user)\n\tjson.NewEncoder(w).Encode(users)\n}\n\nfunc DeletePersonEndpoint(w http.ResponseWriter, req *http.Request) {\n\tparams := mux.Vars(req)\n\tfor index, item := range users {\n\t\tif item.ID == params[\"id\"] {\n\t\t\tusers = append(users[:index], users[index + 1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(users)\n}\n\nfunc main() {\n\tport := flag.String(\"port\", \"8080\", \"HTTP Port\")\n\tflag.Parse()\n\trouter := mux.NewRouter()\n\tusers = append(users, User{ID: \"1\", Firstname: \"Ugo\", Lastname: \"Landini\", Mail: \"ulandini@redhat.com\"})\n\tusers = append(users, User{ID: \"2\", Firstname: \"Samuele\", Lastname: \"Dell'Angelo\", Mail: \"sdellang@redhat.com\"})\n\tusers = append(users, User{ID: \"3\", Firstname: \"Andrea\", Lastname: \"Leoncini\", Mail: \"aleoncin@redhat.com\"})\n\tusers = append(users, User{ID: \"4\", Firstname: \"Giuseppe\", Lastname: \"Bonocore\", Mail: \"gbonocor@redhat.com\"})\n\tusers = append(users, User{ID: \"5\", Firstname: \"Filippo\", Lastname: \"Calà\", Mail: \"fcala@redhat.com\"})\n\tusers = append(users, User{ID: \"6\", Firstname: \"Luca\", Lastname: \"Bigotta\", Mail: \"lbigotta@redhat.com\"})\n\n\trouter.HandleFunc(\"\/api\/users\", GetPeopleEndpoint).Methods(\"GET\")\n\trouter.HandleFunc(\"\/api\/users\/{id}\", GetPersonEndpoint).Methods(\"GET\")\n\trouter.HandleFunc(\"\/api\/users\/{id}\", CreatePersonEndpoint).Methods(\"POST\")\n\trouter.HandleFunc(\"\/api\/users\/{id}\", DeletePersonEndpoint).Methods(\"DELETE\")\n\n\tlog.Fatal(http.ListenAndServe(\":\" + *port, handlers.CORS(handlers.AllowedMethods([]string{\"DELETE\", \"POST\", \"GET\", \"HEAD\" }))(router)))\n\n}fixed CORS for preflightpackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"net\/http\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/gorilla\/handlers\"\n\t\"flag\"\n)\n\ntype User struct {\n\tID string `json:\"id,omitempty\"`\n\tFirstname string `json:\"firstname,omitempty\"`\n\tLastname string `json:\"lastname,omitempty\"`\n\tMail string `json:\"mail,omitempty\"`\n}\n\nvar users []User\n\nfunc GetPersonEndpoint(w http.ResponseWriter, req *http.Request) {\n\tparams := mux.Vars(req)\n\tfor _, item := range users {\n\t\tif item.ID == params[\"id\"] {\n\t\t\tjson.NewEncoder(w).Encode(item)\n\t\t\treturn\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(&User{})\n}\n\nfunc GetPeopleEndpoint(w http.ResponseWriter, req *http.Request) {\n\tjson.NewEncoder(w).Encode(users)\n}\n\nfunc CreatePersonEndpoint(w http.ResponseWriter, req *http.Request) {\n\tparams := mux.Vars(req)\n\tvar user User\n\t_ = json.NewDecoder(req.Body).Decode(&user)\n\tuser.ID = params[\"id\"]\n\tusers = append(users, user)\n\tjson.NewEncoder(w).Encode(users)\n}\n\nfunc DeletePersonEndpoint(w http.ResponseWriter, req *http.Request) {\n\tparams := mux.Vars(req)\n\tfor index, item := range users {\n\t\tif item.ID == params[\"id\"] {\n\t\t\tusers = append(users[:index], users[index + 1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(users)\n}\n\nfunc main() {\n\tport := flag.String(\"port\", \"8080\", \"HTTP Port\")\n\tflag.Parse()\n\trouter := mux.NewRouter()\n\tusers = append(users, User{ID: \"1\", Firstname: \"Ugo\", Lastname: \"Landini\", Mail: \"ulandini@redhat.com\"})\n\tusers = append(users, User{ID: \"2\", Firstname: \"Samuele\", Lastname: \"Dell'Angelo\", Mail: \"sdellang@redhat.com\"})\n\tusers = append(users, User{ID: \"3\", Firstname: \"Andrea\", Lastname: \"Leoncini\", Mail: \"aleoncin@redhat.com\"})\n\tusers = append(users, User{ID: \"4\", Firstname: \"Giuseppe\", Lastname: \"Bonocore\", Mail: \"gbonocor@redhat.com\"})\n\tusers = append(users, User{ID: \"5\", Firstname: \"Filippo\", Lastname: \"Calà\", Mail: \"fcala@redhat.com\"})\n\tusers = append(users, User{ID: \"6\", Firstname: \"Luca\", Lastname: \"Bigotta\", Mail: \"lbigotta@redhat.com\"})\n\n\trouter.HandleFunc(\"\/api\/users\", GetPeopleEndpoint).Methods(\"GET\")\n\trouter.HandleFunc(\"\/api\/users\/{id}\", GetPersonEndpoint).Methods(\"GET\")\n\trouter.HandleFunc(\"\/api\/users\/{id}\", CreatePersonEndpoint).Methods(\"POST\")\n\trouter.HandleFunc(\"\/api\/users\/{id}\", DeletePersonEndpoint).Methods(\"DELETE\")\n\n\tlog.Fatal(http.ListenAndServe(\":\" + *port, handlers.CORS(handlers.AllowedMethods([]string{\"DELETE\", \"POST\", \"GET\", \"HEAD\", \"OPTIONS\" }))(router)))\n\n}<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"code.google.com\/p\/weed-fs\/go\/operation\"\n\t\"code.google.com\/p\/weed-fs\/go\/util\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strconv\"\n)\n\nvar (\n\tuploadReplication *string\n\tuploadDir *string\n)\n\nfunc init() {\n\tcmdUpload.Run = runUpload \/\/ break init cycle\n\tcmdUpload.IsDebug = cmdUpload.Flag.Bool(\"debug\", false, \"verbose debug information\")\n\tserver = cmdUpload.Flag.String(\"server\", \"localhost:9333\", \"weedfs master location\")\n\tuploadDir = cmdUpload.Flag.String(\"dir\", \"\", \"Upload the whole folder recursively if specified.\")\n\tuploadReplication = cmdUpload.Flag.String(\"replication\", \"000\", \"replication type(000,001,010,100,110,200)\")\n}\n\nvar cmdUpload = &Command{\n\tUsageLine: \"upload -server=localhost:9333 file1 [file2 file3]\\n upload -server=localhost:9333 -dir=one_directory\",\n\tShort: \"upload one or a list of files\",\n\tLong: `upload one or a list of files, or batch upload one whole folder recursively.\n It uses consecutive file keys for the list of files.\n e.g. If the file1 uses key k, file2 can be read via k_1\n\n `,\n}\n\ntype AssignResult struct {\n\tFid string `json:\"fid\"`\n\tUrl string `json:\"url\"`\n\tPublicUrl string `json:\"publicUrl\"`\n\tCount int\n\tError string `json:\"error\"`\n}\n\nfunc assign(count int) (*AssignResult, error) {\n\tvalues := make(url.Values)\n\tvalues.Add(\"count\", strconv.Itoa(count))\n\tvalues.Add(\"replication\", *uploadReplication)\n\tjsonBlob, err := util.Post(\"http:\/\/\"+*server+\"\/dir\/assign\", values)\n\tdebug(\"assign result :\", string(jsonBlob))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret AssignResult\n\terr = json.Unmarshal(jsonBlob, &ret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ret.Count <= 0 {\n\t\treturn nil, errors.New(ret.Error)\n\t}\n\treturn &ret, nil\n}\n\nfunc upload(filename string, server string, fid string) (int, error) {\n\tdebug(\"Start uploading file:\", filename)\n\tfh, err := os.Open(filename)\n\tif err != nil {\n\t\tdebug(\"Failed to open file:\", filename)\n\t\treturn 0, err\n\t}\n\tfi, fiErr := fh.Stat()\n\tif fiErr != nil {\n\t\tdebug(\"Failed to stat file:\", filename)\n\t\treturn 0, fiErr\n\t}\n\tret, e := operation.Upload(\"http:\/\/\"+server+\"\/\"+fid+\"?ts=\"+strconv.Itoa(int(fi.ModTime().Unix())), path.Base(filename), fh)\n\tif e != nil {\n\t\treturn 0, e\n\t}\n\treturn ret.Size, e\n}\n\ntype SubmitResult struct {\n\tFileName string `json:\"fileName\"`\n\tFileUrl string `json:\"fileUrl\"`\n\tFid string `json:\"fid\"`\n\tSize int `json:\"size\"`\n\tError string `json:\"error\"`\n}\n\nfunc submit(files []string) ([]SubmitResult, error) {\n\tresults := make([]SubmitResult, len(files))\n\tfor index, file := range files {\n\t\tresults[index].FileName = file\n\t}\n\tret, err := assign(len(files))\n\tif err != nil {\n\t\tfor index, _ := range files {\n\t\t\tresults[index].Error = err.Error()\n\t\t}\n\t\treturn results, err\n\t}\n\tfor index, file := range files {\n\t\tfid := ret.Fid\n\t\tif index > 0 {\n\t\t\tfid = fid + \"_\" + strconv.Itoa(index)\n\t\t}\n\t\tresults[index].Size, err = upload(file, ret.PublicUrl, fid)\n\t\tif err != nil {\n\t\t\tfid = \"\"\n\t\t\tresults[index].Error = err.Error()\n\t\t}\n\t\tresults[index].Fid = fid\n\t\tresults[index].FileUrl = ret.PublicUrl + \"\/\" + fid\n\t}\n\treturn results, nil\n}\n\nfunc runUpload(cmd *Command, args []string) bool {\n\tif len(cmdUpload.Flag.Args()) == 0 {\n\t\tif *uploadDir == \"\" {\n\t\t\treturn false\n\t\t}\n\t\tfilepath.Walk(*uploadDir, func(path string, info os.FileInfo, err error) error {\n\t\t\tif !info.IsDir() {\n results, e := submit([]string{path})\n\t\t\t\tbytes, _ := json.Marshal(results)\n\t\t\t\tfmt.Println(string(bytes))\n\t\t\t\tif e != nil {\n\t\t\t\t return e\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn err\n\t\t})\n\t} else {\n\t\tresults, _ := submit(args)\n\t\tbytes, _ := json.Marshal(results)\n\t\tfmt.Println(string(bytes))\n\t}\n\treturn true\n}\nbetter error message if directory is not foundpackage main\n\nimport (\n\t\"code.google.com\/p\/weed-fs\/go\/operation\"\n\t\"code.google.com\/p\/weed-fs\/go\/util\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strconv\"\n)\n\nvar (\n\tuploadReplication *string\n\tuploadDir *string\n)\n\nfunc init() {\n\tcmdUpload.Run = runUpload \/\/ break init cycle\n\tcmdUpload.IsDebug = cmdUpload.Flag.Bool(\"debug\", false, \"verbose debug information\")\n\tserver = cmdUpload.Flag.String(\"server\", \"localhost:9333\", \"weedfs master location\")\n\tuploadDir = cmdUpload.Flag.String(\"dir\", \"\", \"Upload the whole folder recursively if specified.\")\n\tuploadReplication = cmdUpload.Flag.String(\"replication\", \"000\", \"replication type(000,001,010,100,110,200)\")\n}\n\nvar cmdUpload = &Command{\n\tUsageLine: \"upload -server=localhost:9333 file1 [file2 file3]\\n upload -server=localhost:9333 -dir=one_directory\",\n\tShort: \"upload one or a list of files\",\n\tLong: `upload one or a list of files, or batch upload one whole folder recursively.\n It uses consecutive file keys for the list of files.\n e.g. If the file1 uses key k, file2 can be read via k_1\n\n `,\n}\n\ntype AssignResult struct {\n\tFid string `json:\"fid\"`\n\tUrl string `json:\"url\"`\n\tPublicUrl string `json:\"publicUrl\"`\n\tCount int\n\tError string `json:\"error\"`\n}\n\nfunc assign(count int) (*AssignResult, error) {\n\tvalues := make(url.Values)\n\tvalues.Add(\"count\", strconv.Itoa(count))\n\tvalues.Add(\"replication\", *uploadReplication)\n\tjsonBlob, err := util.Post(\"http:\/\/\"+*server+\"\/dir\/assign\", values)\n\tdebug(\"assign result :\", string(jsonBlob))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret AssignResult\n\terr = json.Unmarshal(jsonBlob, &ret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ret.Count <= 0 {\n\t\treturn nil, errors.New(ret.Error)\n\t}\n\treturn &ret, nil\n}\n\nfunc upload(filename string, server string, fid string) (int, error) {\n\tdebug(\"Start uploading file:\", filename)\n\tfh, err := os.Open(filename)\n\tif err != nil {\n\t\tdebug(\"Failed to open file:\", filename)\n\t\treturn 0, err\n\t}\n\tfi, fiErr := fh.Stat()\n\tif fiErr != nil {\n\t\tdebug(\"Failed to stat file:\", filename)\n\t\treturn 0, fiErr\n\t}\n\tret, e := operation.Upload(\"http:\/\/\"+server+\"\/\"+fid+\"?ts=\"+strconv.Itoa(int(fi.ModTime().Unix())), path.Base(filename), fh)\n\tif e != nil {\n\t\treturn 0, e\n\t}\n\treturn ret.Size, e\n}\n\ntype SubmitResult struct {\n\tFileName string `json:\"fileName\"`\n\tFileUrl string `json:\"fileUrl\"`\n\tFid string `json:\"fid\"`\n\tSize int `json:\"size\"`\n\tError string `json:\"error\"`\n}\n\nfunc submit(files []string) ([]SubmitResult, error) {\n\tresults := make([]SubmitResult, len(files))\n\tfor index, file := range files {\n\t\tresults[index].FileName = file\n\t}\n\tret, err := assign(len(files))\n\tif err != nil {\n\t\tfor index, _ := range files {\n\t\t\tresults[index].Error = err.Error()\n\t\t}\n\t\treturn results, err\n\t}\n\tfor index, file := range files {\n\t\tfid := ret.Fid\n\t\tif index > 0 {\n\t\t\tfid = fid + \"_\" + strconv.Itoa(index)\n\t\t}\n\t\tresults[index].Size, err = upload(file, ret.PublicUrl, fid)\n\t\tif err != nil {\n\t\t\tfid = \"\"\n\t\t\tresults[index].Error = err.Error()\n\t\t}\n\t\tresults[index].Fid = fid\n\t\tresults[index].FileUrl = ret.PublicUrl + \"\/\" + fid\n\t}\n\treturn results, nil\n}\n\nfunc runUpload(cmd *Command, args []string) bool {\n\tif len(cmdUpload.Flag.Args()) == 0 {\n\t\tif *uploadDir == \"\" {\n\t\t\treturn false\n\t\t}\n\t\tfilepath.Walk(*uploadDir, func(path string, info os.FileInfo, err error) error {\n\t\t\tif err == nil {\n\t\t\t\tif !info.IsDir() {\n\t\t\t\t\tresults, e := submit([]string{path})\n\t\t\t\t\tbytes, _ := json.Marshal(results)\n\t\t\t\t\tfmt.Println(string(bytes))\n\t\t\t\t\tif e != nil {\n\t\t\t\t\t\treturn e\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\treturn err\n\t\t})\n\t} else {\n\t\tresults, _ := submit(args)\n\t\tbytes, _ := json.Marshal(results)\n\t\tfmt.Println(string(bytes))\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2011 The \"GoJscript\" Authors\n\/\/\n\/\/ Use of this source code is governed by the BSD 2-Clause License\n\/\/ that can be found in the LICENSE file.\n\/\/\n\/\/ This software is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES\n\/\/ OR CONDITIONS OF ANY KIND, either express or implied. See the License\n\/\/ for more details.\n\npackage gojs\n\nimport \"testing\"\n\nconst DIR = \"..\/test\/\"\n\nfunc init() {\n\tMaxMessage = 100 \/\/ to show all errors\n}\n\nfunc TestConst(t *testing.T) { compile(\"const.go\", t) }\nfunc TestVar(t *testing.T) { compile(\"var.go\", t) }\nfunc TestType(t *testing.T) { compile(\"type.go\", t) }\nfunc TestFunc(t *testing.T) { compile(\"func.go\", t) }\nfunc TestControl(t *testing.T) { compile(\"control.go\", t) }\n\/\/func TestOp(t *testing.T) { compile(\"operator.go\", t) }\n\n\/\/ == Errors\n\/\/\n\/\/ os: import from core library\n\/\/ ..\/test\/error_decl.go:13:10: complex128 type\n\/\/ ..\/test\/error_decl.go:14:10: complex128 type\n\/\/ ..\/test\/error_decl.go:15:10: complex128 type\n\/\/ ..\/test\/error_decl.go:16:10: complex128 type\n\/\/MORE ERRORS\nfunc ExampleCompile_decl() { Compile(DIR + \"error_decl.go\") }\n\n\/\/ == Errors\n\/\/\n\/\/ ..\/test\/error_stmt.go:6:13: channel type\n\/\/ ..\/test\/error_stmt.go:8:2: goroutine\n\/\/ ..\/test\/error_stmt.go:9:2: defer statement\n\/\/ ..\/test\/error_stmt.go:11:2: built-in function panic()\n\/\/ ..\/test\/error_stmt.go:12:2: built-in function recover()\n\/\/ ..\/test\/error_stmt.go:18:1: use of label\nfunc ExampleCompile_stmt () { Compile(DIR + \"error_stmt.go\") }\n\n\/\/ * * *\n\nfunc compile(filename string, t *testing.T) {\n\tif err := Compile(DIR + filename); err != nil {\n\t\tt.Fatal(\"expected parse file\")\n\t}\n}\nUse example functions to checking warnings too.\/\/ Copyright 2011 The \"GoJscript\" Authors\n\/\/\n\/\/ Use of this source code is governed by the BSD 2-Clause License\n\/\/ that can be found in the LICENSE file.\n\/\/\n\/\/ This software is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES\n\/\/ OR CONDITIONS OF ANY KIND, either express or implied. See the License\n\/\/ for more details.\n\npackage gojs\n\nimport \"testing\"\n\nconst DIR = \"..\/test\/\"\n\nfunc init() {\n\tMaxMessage = 100 \/\/ to show all errors\n}\n\nfunc TestConst(t *testing.T) { compile(\"const.go\", t) }\nfunc TestVar(t *testing.T) { compile(\"var.go\", t) }\nfunc TestType(t *testing.T) { compile(\"type.go\", t) }\nfunc TestFunc(t *testing.T) { compile(\"func.go\", t) }\n\/\/func TestOp(t *testing.T) { compile(\"operator.go\", t) }\n\n\/\/ == Warnings\n\/\/\n\/\/ ..\/test\/control.go:82:2: 'default' clause above 'case' clause in switch statement\nfunc ExampleCompile_control() { Compile(DIR + \"control.go\") }\n\n\/\/ == Errors\n\/\/\n\/\/ os: import from core library\n\/\/ ..\/test\/error_decl.go:13:10: complex128 type\n\/\/ ..\/test\/error_decl.go:14:10: complex128 type\n\/\/ ..\/test\/error_decl.go:15:10: complex128 type\n\/\/ ..\/test\/error_decl.go:16:10: complex128 type\nfunc ExampleCompile_decl() { Compile(DIR + \"error_decl.go\") }\n\n\/\/ == Errors\n\/\/\n\/\/ ..\/test\/error_stmt.go:6:13: channel type\n\/\/ ..\/test\/error_stmt.go:8:2: goroutine\n\/\/ ..\/test\/error_stmt.go:9:2: defer statement\n\/\/ ..\/test\/error_stmt.go:11:2: built-in function panic()\n\/\/ ..\/test\/error_stmt.go:12:2: built-in function recover()\n\/\/ ..\/test\/error_stmt.go:18:1: use of label\nfunc ExampleCompile_stmt () { Compile(DIR + \"error_stmt.go\") }\n\n\/\/ * * *\n\nfunc compile(filename string, t *testing.T) {\n\tif err := Compile(DIR + filename); err != nil {\n\t\tt.Fatal(\"expected parse file\")\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\n\/\/ This file contains a demo of using the Product service by creating a\n\/\/ sample product with a random offerId, inserting it, and then\n\/\/ retrieving it (to show that it was indeed inserted).\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/api\/content\/v2\"\n)\n\nfunc productDemo(ctx context.Context, service *content.APIService, config *merchantInfo) {\n\tif config.IsMCA {\n\t\tfmt.Println(\"This demo cannot be run on a multi-client account.\")\n\t\treturn\n\t}\n\tif config.WebsiteURL == \"\" {\n\t\tfmt.Println(\"This demo requires the account to have a configured website.\")\n\t\treturn\n\t}\n\tofferID := fmt.Sprintf(\"book#test%d\", rand.Int())\n\tproduct := createSampleProduct(config, offerID)\n\n\tproducts := content.NewProductsService(service)\n\n\tfmt.Printf(\"Inserting product with offerId %s... \", offerID)\n\tproductInfo, err := products.Insert(config.MerchantID, product).Do()\n\tif err != nil {\n\t\tdumpAPIErrorAndStop(err, \"Insertion failed\")\n\t}\n\tfmt.Printf(\"done.\\n\")\n\tcheckContentErrors(productInfo.Warnings, false)\n\tproductID := productInfo.Id\n\n\tfmt.Printf(\"Listing products:\\n\")\n\tlistCall := products.List(config.MerchantID)\n\t\/\/ Enable this to see even invalid offers:\n\tif false {\n\t\tlistCall.IncludeInvalidInsertedItems(true)\n\t}\n\t\/\/ Enable this to change the number of results listed by\n\t\/\/ per page:\n\tif false {\n\t\tlistCall.MaxResults(100)\n\t}\n\tif err := listCall.Pages(ctx, printProductsPage); err != nil {\n\t\tdumpAPIErrorAndStop(err, \"Listing products failed\")\n\t}\n\tfmt.Printf(\"\\n\")\n\n\tfmt.Printf(\"Retrieving product ID %s...\", productID)\n\tproductInfo, err = products.Get(config.MerchantID, productID).Do()\n\tif err != nil {\n\t\tdumpAPIErrorAndStop(err, \"Retrieval failed\")\n\t}\n\tfmt.Printf(\"done.\\n\")\n\tfmt.Printf(\"Retrieved product %s with title %s\\n\",\n\t\tproductInfo.Id, productInfo.Title)\n\n\tfmt.Printf(\"Deleting product ID %s...\", productID)\n\tif err := products.Delete(config.MerchantID, productID).Do(); err != nil {\n\t\tdumpAPIErrorAndStop(err, \"Deletion failed\")\n\t}\n\tfmt.Printf(\"done.\\n\")\n}\n\nfunc printProductsPage(res *content.ProductsListResponse) error {\n\tfor _, product := range res.Resources {\n\t\tfmt.Printf(\" - Offer %s: %s\\n\",\n\t\t\tproduct.OfferId, product.Title)\n\t}\n\treturn nil\n}\n\nfunc createSampleProduct(config *merchantInfo, offerID string) *content.Product {\n\twebsiteURL := config.WebsiteURL\n\tif websiteURL == \"\" {\n\t\twebsiteURL = \"http:\/\/my-book-shop.com\"\n\t}\n\tproductPrice := content.Price{Currency: \"USD\", Value: \"2.50\"}\n\tshippingPrice := content.Price{Currency: \"USD\", Value: \"0.99\"}\n\tshippingWeight := content.ProductShippingWeight{\n\t\tValue: 200.0,\n\t\tUnit: \"grams\",\n\t}\n\tshippingInfo := content.ProductShipping{\n\t\tCountry: \"US\",\n\t\tService: \"Standard shipping\",\n\t\tPrice: &shippingPrice,\n\t}\n\tproduct := content.Product{\n\t\tOfferId: offerID,\n\t\tTitle: \"A Tale of Two Cities\",\n\t\tDescription: \"A classic novel about the French Revolution\",\n\t\tLink: websiteURL + \"\/tale-of-two-cities.html\",\n\t\tImageLink: websiteURL + \"\/tale-of-two-cities.jpg\",\n\t\tContentLanguage: \"en\",\n\t\tTargetCountry: \"US\",\n\t\tChannel: \"online\",\n\t\tAvailability: \"in stock\",\n\t\tCondition: \"new\",\n\t\tGoogleProductCategory: \"Media > Books\",\n\t\tGtin: \"9780007350896\",\n\t\tPrice: &productPrice,\n\t\tShipping: [](*content.ProductShipping){&shippingInfo},\n\t\tShippingWeight: &shippingWeight,\n\t}\n\treturn &product\n}\nJust dump the full JSON object(s) returned from Productstatuses.package main\n\n\/\/ This file contains a demo of using the Product service by creating a\n\/\/ sample product with a random offerId, inserting it, and then\n\/\/ retrieving it (to show that it was indeed inserted).\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/api\/content\/v2\"\n)\n\nfunc productDemo(ctx context.Context, service *content.APIService, config *merchantInfo) {\n\tif config.IsMCA {\n\t\tfmt.Println(\"This demo cannot be run on a multi-client account.\")\n\t\treturn\n\t}\n\tif config.WebsiteURL == \"\" {\n\t\tfmt.Println(\"This demo requires the account to have a configured website.\")\n\t\treturn\n\t}\n\tofferID := fmt.Sprintf(\"book#test%d\", rand.Int())\n\tproduct := createSampleProduct(config, offerID)\n\n\tproducts := content.NewProductsService(service)\n\n\tfmt.Printf(\"Inserting product with offerId %s... \", offerID)\n\tproductInfo, err := products.Insert(config.MerchantID, product).Do()\n\tif err != nil {\n\t\tdumpAPIErrorAndStop(err, \"Insertion failed\")\n\t}\n\tfmt.Printf(\"done.\\n\")\n\tcheckContentErrors(productInfo.Warnings, false)\n\tproductID := productInfo.Id\n\n\tfmt.Printf(\"Listing products:\\n\")\n\tlistCall := products.List(config.MerchantID)\n\t\/\/ Enable this to see even invalid offers:\n\tif false {\n\t\tlistCall.IncludeInvalidInsertedItems(true)\n\t}\n\t\/\/ Enable this to change the number of results listed by\n\t\/\/ per page:\n\tif false {\n\t\tlistCall.MaxResults(100)\n\t}\n\tif err := listCall.Pages(ctx, printProductsPage); err != nil {\n\t\tdumpAPIErrorAndStop(err, \"Listing products failed\")\n\t}\n\tfmt.Printf(\"\\n\")\n\n\tfmt.Printf(\"Retrieving product ID %s...\", productID)\n\tproductInfo, err = products.Get(config.MerchantID, productID).Do()\n\tif err != nil {\n\t\tdumpAPIErrorAndStop(err, \"Retrieval failed\")\n\t}\n\tfmt.Printf(\"done.\\n\")\n\tfmt.Printf(\"Retrieved product %s with title %s\\n\",\n\t\tproductInfo.Id, productInfo.Title)\n\n\tfmt.Printf(\"Deleting product ID %s...\", productID)\n\tif err := products.Delete(config.MerchantID, productID).Do(); err != nil {\n\t\tdumpAPIErrorAndStop(err, \"Deletion failed\")\n\t}\n\tfmt.Printf(\"done.\\n\")\n}\n\nfunc printProductsPage(res *content.ProductsListResponse) error {\n\tfor _, product := range res.Resources {\n\t\tfmt.Printf(\" - Offer %s: %s\\n\",\n\t\t\tproduct.OfferId, product.Title)\n\t}\n\treturn nil\n}\n\nfunc createSampleProduct(config *merchantInfo, offerID string) *content.Product {\n\twebsiteURL := config.WebsiteURL\n\tif websiteURL == \"\" {\n\t\twebsiteURL = \"http:\/\/my-book-shop.com\"\n\t}\n\tproductPrice := content.Price{Currency: \"USD\", Value: \"2.50\"}\n\tshippingPrice := content.Price{Currency: \"USD\", Value: \"0.99\"}\n\tshippingWeight := content.ProductShippingWeight{\n\t\tValue: 200.0,\n\t\tUnit: \"grams\",\n\t}\n\tshippingInfo := content.ProductShipping{\n\t\tCountry: \"US\",\n\t\tService: \"Standard shipping\",\n\t\tPrice: &shippingPrice,\n\t}\n\tproduct := content.Product{\n\t\tOfferId: offerID,\n\t\tTitle: \"A Tale of Two Cities\",\n\t\tDescription: \"A classic novel about the French Revolution\",\n\t\tLink: websiteURL + \"\/tale-of-two-cities.html\",\n\t\tImageLink: websiteURL + \"\/tale-of-two-cities.jpg\",\n\t\tContentLanguage: \"en\",\n\t\tTargetCountry: \"US\",\n\t\tChannel: \"online\",\n\t\tAvailability: \"in stock\",\n\t\tCondition: \"new\",\n\t\tGoogleProductCategory: \"Media > Books\",\n\t\tGtin: \"9780007350896\",\n\t\tPrice: &productPrice,\n\t\tShipping: [](*content.ProductShipping){&shippingInfo},\n\t\tShippingWeight: &shippingWeight,\n\t}\n\treturn &product\n}\n<|endoftext|>"} {"text":"\/*\n * Copyright 2019 Dgraph Labs, Inc. and Contributors\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage cmd\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\thumanize \"github.com\/dustin\/go-humanize\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/dgraph-io\/badger\/v2\"\n\t\"github.com\/dgraph-io\/badger\/v2\/pb\"\n\t\"github.com\/dgraph-io\/badger\/v2\/y\"\n)\n\nvar writeBenchCmd = &cobra.Command{\n\tUse: \"write\",\n\tShort: \"Writes random data to Badger to benchmark write speed.\",\n\tLong: `\nThis command writes random data to Badger to benchmark write speed. Useful for testing and\nperformance analysis.\n`,\n\tRunE: writeBench,\n}\n\nvar (\n\tkeySz int\n\tvalSz int\n\tnumKeys float64\n\tforce bool\n\tsorted bool\n\tshowLogs bool\n\n\tsizeWritten uint64\n\tentriesWritten uint64\n)\n\nconst (\n\tmil float64 = 1e6\n)\n\nfunc init() {\n\tbenchCmd.AddCommand(writeBenchCmd)\n\twriteBenchCmd.Flags().IntVarP(&keySz, \"key-size\", \"k\", 32, \"Size of key\")\n\twriteBenchCmd.Flags().IntVarP(&valSz, \"val-size\", \"v\", 128, \"Size of value\")\n\twriteBenchCmd.Flags().Float64VarP(&numKeys, \"keys-mil\", \"m\", 10.0,\n\t\t\"Number of keys to add in millions\")\n\twriteBenchCmd.Flags().BoolVarP(&force, \"force-compact\", \"f\", true,\n\t\t\"Force compact level 0 on close.\")\n\twriteBenchCmd.Flags().BoolVarP(&sorted, \"sorted\", \"s\", false, \"Write keys in sorted order.\")\n\twriteBenchCmd.Flags().BoolVarP(&showLogs, \"logs\", \"l\", false, \"Show Badger logs.\")\n}\n\nfunc writeRandom(db *badger.DB, num uint64) error {\n\tvalue := make([]byte, valSz)\n\ty.Check2(rand.Read(value))\n\n\tes := uint64(keySz + valSz) \/\/ entry size is keySz + valSz\n\tbatch := db.NewWriteBatch()\n\tfor i := uint64(1); i <= num; i++ {\n\t\tkey := make([]byte, keySz)\n\t\ty.Check2(rand.Read(key))\n\t\tif err := batch.Set(key, value); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tatomic.AddUint64(&entriesWritten, 1)\n\t\tatomic.AddUint64(&sizeWritten, es)\n\t}\n\treturn batch.Flush()\n}\n\nfunc writeSorted(db *badger.DB, num uint64) error {\n\tvalue := make([]byte, valSz)\n\ty.Check2(rand.Read(value))\n\tes := 8 + valSz \/\/ key size is 8 bytes and value size is valSz\n\n\twriter := db.NewStreamWriter()\n\tif err := writer.Prepare(); err != nil {\n\t\treturn err\n\t}\n\n\twg := &sync.WaitGroup{}\n\twriteCh := make(chan *pb.KVList, 3)\n\twriteRange := func(start, end uint64, streamId uint32) {\n\t\t\/\/ end is not included.\n\t\tdefer wg.Done()\n\t\tkvs := &pb.KVList{}\n\t\tvar sz int\n\t\tfor i := start; i < end; i++ {\n\t\t\tkey := make([]byte, 8)\n\t\t\tbinary.BigEndian.PutUint64(key, i)\n\t\t\tkvs.Kv = append(kvs.Kv, &pb.KV{\n\t\t\t\tKey: key,\n\t\t\t\tValue: value,\n\t\t\t\tVersion: 1,\n\t\t\t\tStreamId: streamId,\n\t\t\t})\n\n\t\t\tsz += es\n\t\t\tatomic.AddUint64(&entriesWritten, 1)\n\t\t\tatomic.AddUint64(&sizeWritten, uint64(es))\n\n\t\t\tif sz >= 4<<20 { \/\/ 4 MB\n\t\t\t\twriteCh <- kvs\n\t\t\t\tkvs = &pb.KVList{}\n\t\t\t\tsz = 0\n\t\t\t}\n\t\t}\n\t\twriteCh <- kvs\n\t}\n\n\t\/\/ Let's create some streams.\n\twidth := num \/ 16\n\tstreamID := uint32(0)\n\tfor start := uint64(0); start < num; start += width {\n\t\tend := start + width\n\t\tif end > num {\n\t\t\tend = num\n\t\t}\n\t\tstreamID++\n\t\twg.Add(1)\n\t\tgo writeRange(start, end, streamID)\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(writeCh)\n\t}()\n\tlog.Printf(\"Max StreamId used: %d. Width: %d\\n\", streamID, width)\n\tfor kvs := range writeCh {\n\t\tif err := writer.Write(kvs); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tlog.Println(\"DONE streaming. Flushing...\")\n\treturn writer.Flush()\n}\n\nfunc writeBench(cmd *cobra.Command, args []string) error {\n\topt := badger.DefaultOptions(sstDir).\n\t\tWithValueDir(vlogDir).\n\t\tWithTruncate(truncate).\n\t\tWithSyncWrites(false).\n\t\tWithCompactL0OnClose(force)\n\n\tif !showLogs {\n\t\topt = opt.WithLogger(nil)\n\t}\n\n\tdb, err := badger.Open(opt)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tstart := time.Now()\n\t\terr := db.Close()\n\t\tlog.Printf(\"DB.Close. Error: %v. Time taken to close: %s\", err, time.Since(start))\n\t}()\n\n\tfmt.Println(\"*********************************************************\")\n\tfmt.Println(\"Starting to benchmark Writes\")\n\tfmt.Println(\"*********************************************************\")\n\n\tstartTime = time.Now()\n\tnum := uint64(numKeys * mil)\n\tc := y.NewCloser(1)\n\tgo reportStats(c)\n\n\tif sorted {\n\t\terr = writeSorted(db, num)\n\t} else {\n\t\terr = writeRandom(db, num)\n\t}\n\n\tc.SignalAndWait()\n\treturn err\n}\n\nfunc reportStats(c *y.Closer) {\n\tdefer c.Done()\n\n\tt := time.NewTicker(time.Second)\n\tdefer t.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-c.HasBeenClosed():\n\t\t\treturn\n\t\tcase <-t.C:\n\t\t\tdur := time.Since(startTime)\n\t\t\tsz := atomic.LoadUint64(&sizeWritten)\n\t\t\tentries := atomic.LoadUint64(&entriesWritten)\n\t\t\tbytesRate := sz \/ uint64(dur.Seconds())\n\t\t\tentriesRate := entries \/ uint64(dur.Seconds())\n\t\t\tfmt.Printf(\"Time elapsed: %s, bytes written: %s, speed: %s\/sec, \"+\n\t\t\t\t\"entries written: %d, speed: %d\/sec\\n\", y.FixedDuration(time.Since(startTime)),\n\t\t\t\thumanize.Bytes(sz), humanize.Bytes(bytesRate), entries, entriesRate)\n\t\t}\n\t}\n}\nadd more flags to write benchmark (#1423)\/*\n * Copyright 2019 Dgraph Labs, Inc. and Contributors\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage cmd\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\thumanize \"github.com\/dustin\/go-humanize\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/dgraph-io\/badger\/v2\"\n\t\"github.com\/dgraph-io\/badger\/v2\/options\"\n\t\"github.com\/dgraph-io\/badger\/v2\/pb\"\n\t\"github.com\/dgraph-io\/badger\/v2\/y\"\n)\n\nvar writeBenchCmd = &cobra.Command{\n\tUse: \"write\",\n\tShort: \"Writes random data to Badger to benchmark write speed.\",\n\tLong: `\nThis command writes random data to Badger to benchmark write speed. Useful for testing and\nperformance analysis.\n`,\n\tRunE: writeBench,\n}\n\nvar (\n\tkeySz int\n\tvalSz int\n\tnumKeys float64\n\tforce bool\n\tsorted bool\n\tshowLogs bool\n\n\tsizeWritten uint64\n\tentriesWritten uint64\n\n\tvalueThreshold int\n\tnumVersions int\n\tmaxCacheSize int64\n\tkeepBlockIdxInCache bool\n\tkeepBlocksInCache bool\n\tmaxBfCacheSize int64\n\tvlogMaxEntries uint32\n\tloadBloomsOnOpen bool\n\tdetectConflicts bool\n\tcompression bool\n)\n\nconst (\n\tmil float64 = 1e6\n)\n\nfunc init() {\n\tbenchCmd.AddCommand(writeBenchCmd)\n\twriteBenchCmd.Flags().IntVarP(&keySz, \"key-size\", \"k\", 32, \"Size of key\")\n\twriteBenchCmd.Flags().IntVarP(&valSz, \"val-size\", \"v\", 128, \"Size of value\")\n\twriteBenchCmd.Flags().Float64VarP(&numKeys, \"keys-mil\", \"m\", 10.0,\n\t\t\"Number of keys to add in millions\")\n\twriteBenchCmd.Flags().BoolVarP(&force, \"force-compact\", \"f\", true,\n\t\t\"Force compact level 0 on close.\")\n\twriteBenchCmd.Flags().BoolVarP(&sorted, \"sorted\", \"s\", false, \"Write keys in sorted order.\")\n\twriteBenchCmd.Flags().BoolVarP(&showLogs, \"logs\", \"l\", false, \"Show Badger logs.\")\n\twriteBenchCmd.Flags().IntVarP(&valueThreshold, \"value-th\", \"t\", 1<<10, \"Value threshold\")\n\twriteBenchCmd.Flags().IntVarP(&numVersions, \"num-version\", \"n\", 1, \"Number of versions to keep\")\n\twriteBenchCmd.Flags().Int64VarP(&maxCacheSize, \"max-cache\", \"C\", 1<<30, \"Max size of cache\")\n\twriteBenchCmd.Flags().BoolVarP(&keepBlockIdxInCache, \"keep-bidx\", \"b\", true,\n\t\t\"Keep block indices in cache\")\n\twriteBenchCmd.Flags().BoolVarP(&keepBlocksInCache, \"keep-blocks\", \"B\", true,\n\t\t\"Keep blocks in cache\")\n\twriteBenchCmd.Flags().Int64VarP(&maxBfCacheSize, \"max-bf-cache\", \"c\", 500<<20,\n\t\t\"Maximum Bloom Filter Cache Size\")\n\twriteBenchCmd.Flags().Uint32Var(&vlogMaxEntries, \"vlog-maxe\", 10000, \"Value log Max Entries\")\n\twriteBenchCmd.Flags().StringVarP(&encryptionKey, \"encryption-key\", \"e\", \"\",\n\t\t\"If it is true, badger will encrypt all the data stored on the disk.\")\n\twriteBenchCmd.Flags().StringVar(&loadingMode, \"loading-mode\", \"mmap\",\n\t\t\"Mode for accessing SSTables\")\n\twriteBenchCmd.Flags().BoolVar(&loadBloomsOnOpen, \"load-blooms\", false,\n\t\t\"Load Bloom filter on DB open.\")\n\twriteBenchCmd.Flags().BoolVar(&detectConflicts, \"conficts\", false,\n\t\t\"If true, it badger will detect the conflicts\")\n\twriteBenchCmd.Flags().BoolVar(&compression, \"compression\", false,\n\t\t\"If true, badger will use ZSTD mode\")\n}\n\nfunc writeRandom(db *badger.DB, num uint64) error {\n\tvalue := make([]byte, valSz)\n\ty.Check2(rand.Read(value))\n\n\tes := uint64(keySz + valSz) \/\/ entry size is keySz + valSz\n\tbatch := db.NewWriteBatch()\n\tfor i := uint64(1); i <= num; i++ {\n\t\tkey := make([]byte, keySz)\n\t\ty.Check2(rand.Read(key))\n\t\tif err := batch.Set(key, value); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tatomic.AddUint64(&entriesWritten, 1)\n\t\tatomic.AddUint64(&sizeWritten, es)\n\t}\n\treturn batch.Flush()\n}\n\nfunc writeSorted(db *badger.DB, num uint64) error {\n\tvalue := make([]byte, valSz)\n\ty.Check2(rand.Read(value))\n\tes := 8 + valSz \/\/ key size is 8 bytes and value size is valSz\n\n\twriter := db.NewStreamWriter()\n\tif err := writer.Prepare(); err != nil {\n\t\treturn err\n\t}\n\n\twg := &sync.WaitGroup{}\n\twriteCh := make(chan *pb.KVList, 3)\n\twriteRange := func(start, end uint64, streamId uint32) {\n\t\t\/\/ end is not included.\n\t\tdefer wg.Done()\n\t\tkvs := &pb.KVList{}\n\t\tvar sz int\n\t\tfor i := start; i < end; i++ {\n\t\t\tkey := make([]byte, 8)\n\t\t\tbinary.BigEndian.PutUint64(key, i)\n\t\t\tkvs.Kv = append(kvs.Kv, &pb.KV{\n\t\t\t\tKey: key,\n\t\t\t\tValue: value,\n\t\t\t\tVersion: 1,\n\t\t\t\tStreamId: streamId,\n\t\t\t})\n\n\t\t\tsz += es\n\t\t\tatomic.AddUint64(&entriesWritten, 1)\n\t\t\tatomic.AddUint64(&sizeWritten, uint64(es))\n\n\t\t\tif sz >= 4<<20 { \/\/ 4 MB\n\t\t\t\twriteCh <- kvs\n\t\t\t\tkvs = &pb.KVList{}\n\t\t\t\tsz = 0\n\t\t\t}\n\t\t}\n\t\twriteCh <- kvs\n\t}\n\n\t\/\/ Let's create some streams.\n\twidth := num \/ 16\n\tstreamID := uint32(0)\n\tfor start := uint64(0); start < num; start += width {\n\t\tend := start + width\n\t\tif end > num {\n\t\t\tend = num\n\t\t}\n\t\tstreamID++\n\t\twg.Add(1)\n\t\tgo writeRange(start, end, streamID)\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(writeCh)\n\t}()\n\tlog.Printf(\"Max StreamId used: %d. Width: %d\\n\", streamID, width)\n\tfor kvs := range writeCh {\n\t\tif err := writer.Write(kvs); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tlog.Println(\"DONE streaming. Flushing...\")\n\treturn writer.Flush()\n}\n\nfunc writeBench(cmd *cobra.Command, args []string) error {\n\tvar cmode options.CompressionType\n\tif compression {\n\t\tcmode = options.ZSTD\n\t} else {\n\t\tcmode = options.None\n\t}\n\tmode := getLoadingMode(loadingMode)\n\topt := badger.DefaultOptions(sstDir).\n\t\tWithValueDir(vlogDir).\n\t\tWithTruncate(truncate).\n\t\tWithSyncWrites(false).\n\t\tWithCompactL0OnClose(force).\n\t\tWithValueThreshold(valueThreshold).\n\t\tWithNumVersionsToKeep(numVersions).\n\t\tWithMaxCacheSize(maxCacheSize).\n\t\tWithKeepBlockIndicesInCache(keepBlockIdxInCache).\n\t\tWithKeepBlocksInCache(keepBlocksInCache).\n\t\tWithMaxBfCacheSize(maxBfCacheSize).\n\t\tWithValueLogMaxEntries(vlogMaxEntries).\n\t\tWithTableLoadingMode(mode).\n\t\tWithEncryptionKey([]byte(encryptionKey)).\n\t\tWithLoadBloomsOnOpen(loadBloomsOnOpen).\n\t\tWithDetectConflicts(detectConflicts).\n\t\tWithCompression(cmode)\n\n\tif !showLogs {\n\t\topt = opt.WithLogger(nil)\n\t}\n\n\tdb, err := badger.Open(opt)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tstart := time.Now()\n\t\terr := db.Close()\n\t\tlog.Printf(\"DB.Close. Error: %v. Time taken to close: %s\", err, time.Since(start))\n\t}()\n\n\tfmt.Println(\"*********************************************************\")\n\tfmt.Println(\"Starting to benchmark Writes\")\n\tfmt.Println(\"*********************************************************\")\n\n\tstartTime = time.Now()\n\tnum := uint64(numKeys * mil)\n\tc := y.NewCloser(1)\n\tgo reportStats(c)\n\n\tif sorted {\n\t\terr = writeSorted(db, num)\n\t} else {\n\t\terr = writeRandom(db, num)\n\t}\n\n\tc.SignalAndWait()\n\treturn err\n}\n\nfunc reportStats(c *y.Closer) {\n\tdefer c.Done()\n\n\tt := time.NewTicker(time.Second)\n\tdefer t.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-c.HasBeenClosed():\n\t\t\treturn\n\t\tcase <-t.C:\n\t\t\tdur := time.Since(startTime)\n\t\t\tsz := atomic.LoadUint64(&sizeWritten)\n\t\t\tentries := atomic.LoadUint64(&entriesWritten)\n\t\t\tbytesRate := sz \/ uint64(dur.Seconds())\n\t\t\tentriesRate := entries \/ uint64(dur.Seconds())\n\t\t\tfmt.Printf(\"Time elapsed: %s, bytes written: %s, speed: %s\/sec, \"+\n\t\t\t\t\"entries written: %d, speed: %d\/sec\\n\", y.FixedDuration(time.Since(startTime)),\n\t\t\t\thumanize.Bytes(sz), humanize.Bytes(bytesRate), entries, entriesRate)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package goop\n\nimport (\n\t\"code.google.com\/p\/go.net\/html\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc Test_FindAllElement(t *testing.T) {\n\n}\n\nfunc Test_NewGoopNode(t *testing.T) {\n\tchild := &html.Node{\n\t\tType: 0x3,\n\t\tDataAtom: 0x27604,\n\t\tData: \"html\",\n\t}\n\tgN := NewGoopNode(child)\n\tif !nodeEqual(child, gN.Node) {\n\t\tt.Errorf(\"nodes not equal, expected: %v, got %v\", child, gN)\n\t}\n}\n\ntype goopNodeTest struct {\n\tinput string\n\tnode *GoopNode\n}\n\ntype goopTest struct {\n\tinput string\n\tgoop *Goop\n}\n\nfunc htmlNodeBoilerPlate(n *html.Node) *html.Node {\n\tdoc := &html.Node{\n\t\tType: 0x2,\n\t}\n\n\thtmlNode := &html.Node{\n\t\tParent: doc,\n\t\tType: 0x3,\n\t\tDataAtom: 0x27604,\n\t\tData: \"html\",\n\t}\n\tdoc.FirstChild = htmlNode\n\tdoc.LastChild = htmlNode\n\n\thead := &html.Node{\n\t\tParent: htmlNode,\n\t\tType: 0x3,\n\t\tDataAtom: 0x2fa04,\n\t\tData: \"head\",\n\t}\n\n\tbody := &html.Node{\n\t\tParent: htmlNode,\n\t\tPrevSibling: head,\n\t\tType: 0x3,\n\t\tDataAtom: 0x2f04,\n\t\tData: \"body\",\n\t}\n\thead.NextSibling = body\n\n\thtmlNode.FirstChild = head\n\thtmlNode.LastChild = body\n\n\tbody.FirstChild = n\n\tbody.LastChild = n\n\n\tn.Parent = body\n\n\treturn doc\n}\n\nfunc Test_BuildGoop(t *testing.T) {\n\t\/*\tparent := &html.Node{\n\t\t\tType: 0x2,\n\t\t}\n\t\tchild := &html.Node{\n\t\t\tType: 0x3,\n\t\t\tDataAtom: 0x27604,\n\t\t\tData: \"html\",\n\t\t}\n\t\thead := &html.Node{\n\t\t\tParent: child,\n\t\t\tType: 0x3,\n\t\t\tDataAtom: 0x2fa04,\n\t\t\tData: \"head\",\n\t\t}\n\n\t\tbody := &html.Node{\n\t\t\tParent: child,\n\t\t\tPrevSibling: head,\n\t\t\tType: 0x3,\n\t\t\tDataAtom: 0x2f04,\n\t\t\tData: \"body\",\n\t\t}\n\t\thead.NextSibling = body\n\t*\/\n\tdiv := &html.Node{\n\t\tType: 0x3,\n\t\tDataAtom: 0x10703,\n\t\tData: \"div\",\n\t}\n\t\/\/body.FirstChild = div\n\t\/\/body.LastChild = div\n\n\tfoo := &html.Node{\n\t\tParent: div,\n\t\tType: 0x1,\n\t\tData: \"Foo\",\n\t}\n\tdiv.FirstChild = foo\n\tdiv.LastChild = foo\n\n\ttests := []goopTest{\n\t\tgoopTest{\n\t\t\t\"
Foo<\/div>\",\n\t\t\t&Goop{\n\t\t\t\tRoot: &GoopNode{\n\t\t\t\t\tNode: htmlNodeBoilerPlate(div),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tg, err := BuildGoop(strings.NewReader(test.input))\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"error occured while building some tasty goop: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !nodeEqual(test.goop.Root.Node, g.Root.Node) {\n\t\t\tt.Errorf(\"goop built: %v doesnt match expected %v\\n\", g, test.goop)\n\t\t}\n\t}\n}\n\nfunc nodeEqual(n1, n2 *html.Node) bool {\n\tif n1 == nil || n2 == nil {\n\t\treturn true\n\t}\n\tif (n1 != nil && n2 == nil) || (n1 == nil && n2 != nil) {\n\t\treturn false\n\t}\n\n\t\/\/ TODO(ttacon): go through node's own siblings\n\n\tc1 := n1.FirstChild\n\tc2 := n2.FirstChild\n\tfor c1 != nil && c2 != nil {\n\t\tif c1 == nil || c2 == nil {\n\t\t\treturn false\n\t\t}\n\t\tif !nodeEqual(c1, c2) {\n\t\t\treturn false\n\t\t}\n\t\tc1 = c1.NextSibling\n\t\tc2 = c2.NextSibling\n\t}\n\n\treturn n1.Type == n2.Type &&\n\t\tn1.Data == n2.Data &&\n\t\tn1.DataAtom == n2.DataAtom\n}\n\nfunc Test_GoopFind(t *testing.T) {\n}\n\ntype tokenizeTest struct {\n\tinput string\n\toutput [][]string\n}\n\nfunc Test_tokenize(t *testing.T) {\n\ttests := []tokenizeTest{\n\t\ttokenizeTest{\n\t\t\t\"div#id.class0.class1.class2\",\n\t\t\t[][]string{\n\t\t\t\t[]string{\n\t\t\t\t\t\"div\",\n\t\t\t\t},\n\t\t\t\t[]string{\n\t\t\t\t\t\"id\",\n\t\t\t\t},\n\t\t\t\t[]string{\n\t\t\t\t\t\"class0\",\n\t\t\t\t\t\"class1\",\n\t\t\t\t\t\"class2\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttokenizeTest{\n\t\t\t\"#id.class0.class1.class2\",\n\t\t\t[][]string{\n\t\t\t\t[]string{},\n\t\t\t\t[]string{\n\t\t\t\t\t\"id\",\n\t\t\t\t},\n\t\t\t\t[]string{\n\t\t\t\t\t\"class0\",\n\t\t\t\t\t\"class1\",\n\t\t\t\t\t\"class2\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttokenizeTest{\n\t\t\t\".class0.class1.class2\",\n\t\t\t[][]string{\n\t\t\t\t[]string{},\n\t\t\t\t[]string{},\n\t\t\t\t[]string{\n\t\t\t\t\t\"class0\",\n\t\t\t\t\t\"class1\",\n\t\t\t\t\t\"class2\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\/*\t\ttokenizeTest{\n\t\t\t\t\"div#id#id2.class0.class1.class2\",\n\t\t\t\t[][]string{\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"div\",\n\t\t\t\t\t},\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"id\",\n\t\t\t\t\t},\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"class0\",\n\t\t\t\t\t\t\"class1\",\n\t\t\t\t\t\t\"class2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},*\/\n\t\ttokenizeTest{\n\t\t\t\"a.class0\",\n\t\t\t[][]string{\n\t\t\t\t[]string{\n\t\t\t\t\t\"a\",\n\t\t\t\t},\n\t\t\t\t[]string{},\n\t\t\t\t[]string{\n\t\t\t\t\t\"class0\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tvals := tokenize(test.input)\n\t\tif !sliceEquality(vals, test.output) {\n\t\t\tt.Errorf(\"tokenization failed, expected: %v, got: %v\", test.output, vals)\n\t\t}\n\t}\n}\n\nfunc sliceEquality(s1 [][]string, s2 [][]string) bool {\n\tif len(s1) != len(s2) {\n\t\treturn false\n\t}\n\tfor i, val1 := range s1 {\n\t\tval2 := s2[i]\n\t\tif len(val1) != len(val2) {\n\t\t\treturn false\n\t\t}\n\t\tfor j, v1 := range val1 {\n\t\t\tif v1 != val2[j] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\nfunc Test_GoopNodeFind(t *testing.T) {\n\n}\n\nfunc Test_GoopNodeHasClasses(t *testing.T) {\n\n}\n\nfunc Test_GoopNodeIsElement(t *testing.T) {\n\n}\n\nfunc Test_GoopFindAllElements(t *testing.T) {\n\n}\n\nfunc Test_GoopNodeFindAllElements(t *testing.T) {\n\n}\n\nfunc Test_GoopNodeSearchByElement(t *testing.T) {\n\n}\n\nfunc Test_GoopFindAllWithClass(t *testing.T) {\n\n}\n\nfunc Test_GoopNodeSearchByClass(t *testing.T) {\n\n}\n\nfunc Test_GoopFindById(t *testing.T) {\n\n}\n\nfunc Test_GoopNodeFindById(t *testing.T) {\n\n}\n\nfunc Test_Attributes(t *testing.T) {\n\n}\nRemove commented out sectionpackage goop\n\nimport (\n\t\"code.google.com\/p\/go.net\/html\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc Test_FindAllElement(t *testing.T) {\n\n}\n\nfunc Test_NewGoopNode(t *testing.T) {\n\tchild := &html.Node{\n\t\tType: 0x3,\n\t\tDataAtom: 0x27604,\n\t\tData: \"html\",\n\t}\n\tgN := NewGoopNode(child)\n\tif !nodeEqual(child, gN.Node) {\n\t\tt.Errorf(\"nodes not equal, expected: %v, got %v\", child, gN)\n\t}\n}\n\ntype goopNodeTest struct {\n\tinput string\n\tnode *GoopNode\n}\n\ntype goopTest struct {\n\tinput string\n\tgoop *Goop\n}\n\nfunc htmlNodeBoilerPlate(n *html.Node) *html.Node {\n\tdoc := &html.Node{\n\t\tType: 0x2,\n\t}\n\n\thtmlNode := &html.Node{\n\t\tParent: doc,\n\t\tType: 0x3,\n\t\tDataAtom: 0x27604,\n\t\tData: \"html\",\n\t}\n\tdoc.FirstChild = htmlNode\n\tdoc.LastChild = htmlNode\n\n\thead := &html.Node{\n\t\tParent: htmlNode,\n\t\tType: 0x3,\n\t\tDataAtom: 0x2fa04,\n\t\tData: \"head\",\n\t}\n\n\tbody := &html.Node{\n\t\tParent: htmlNode,\n\t\tPrevSibling: head,\n\t\tType: 0x3,\n\t\tDataAtom: 0x2f04,\n\t\tData: \"body\",\n\t}\n\thead.NextSibling = body\n\n\thtmlNode.FirstChild = head\n\thtmlNode.LastChild = body\n\n\tbody.FirstChild = n\n\tbody.LastChild = n\n\n\tn.Parent = body\n\n\treturn doc\n}\n\nfunc Test_BuildGoop(t *testing.T) {\n\tdiv := &html.Node{\n\t\tType: 0x3,\n\t\tDataAtom: 0x10703,\n\t\tData: \"div\",\n\t}\n\n\tfoo := &html.Node{\n\t\tParent: div,\n\t\tType: 0x1,\n\t\tData: \"Foo\",\n\t}\n\tdiv.FirstChild = foo\n\tdiv.LastChild = foo\n\n\ttests := []goopTest{\n\t\tgoopTest{\n\t\t\t\"
Foo<\/div>\",\n\t\t\t&Goop{\n\t\t\t\tRoot: &GoopNode{\n\t\t\t\t\tNode: htmlNodeBoilerPlate(div),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tg, err := BuildGoop(strings.NewReader(test.input))\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"error occured while building some tasty goop: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !nodeEqual(test.goop.Root.Node, g.Root.Node) {\n\t\t\tt.Errorf(\"goop built: %v doesnt match expected %v\\n\", g, test.goop)\n\t\t}\n\t}\n}\n\nfunc nodeEqual(n1, n2 *html.Node) bool {\n\tif n1 == nil || n2 == nil {\n\t\treturn true\n\t}\n\tif (n1 != nil && n2 == nil) || (n1 == nil && n2 != nil) {\n\t\treturn false\n\t}\n\n\t\/\/ TODO(ttacon): go through node's own siblings\n\n\tc1 := n1.FirstChild\n\tc2 := n2.FirstChild\n\tfor c1 != nil && c2 != nil {\n\t\tif c1 == nil || c2 == nil {\n\t\t\treturn false\n\t\t}\n\t\tif !nodeEqual(c1, c2) {\n\t\t\treturn false\n\t\t}\n\t\tc1 = c1.NextSibling\n\t\tc2 = c2.NextSibling\n\t}\n\n\treturn n1.Type == n2.Type &&\n\t\tn1.Data == n2.Data &&\n\t\tn1.DataAtom == n2.DataAtom\n}\n\nfunc Test_GoopFind(t *testing.T) {\n}\n\ntype tokenizeTest struct {\n\tinput string\n\toutput [][]string\n}\n\nfunc Test_tokenize(t *testing.T) {\n\ttests := []tokenizeTest{\n\t\ttokenizeTest{\n\t\t\t\"div#id.class0.class1.class2\",\n\t\t\t[][]string{\n\t\t\t\t[]string{\n\t\t\t\t\t\"div\",\n\t\t\t\t},\n\t\t\t\t[]string{\n\t\t\t\t\t\"id\",\n\t\t\t\t},\n\t\t\t\t[]string{\n\t\t\t\t\t\"class0\",\n\t\t\t\t\t\"class1\",\n\t\t\t\t\t\"class2\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttokenizeTest{\n\t\t\t\"#id.class0.class1.class2\",\n\t\t\t[][]string{\n\t\t\t\t[]string{},\n\t\t\t\t[]string{\n\t\t\t\t\t\"id\",\n\t\t\t\t},\n\t\t\t\t[]string{\n\t\t\t\t\t\"class0\",\n\t\t\t\t\t\"class1\",\n\t\t\t\t\t\"class2\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttokenizeTest{\n\t\t\t\".class0.class1.class2\",\n\t\t\t[][]string{\n\t\t\t\t[]string{},\n\t\t\t\t[]string{},\n\t\t\t\t[]string{\n\t\t\t\t\t\"class0\",\n\t\t\t\t\t\"class1\",\n\t\t\t\t\t\"class2\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\/*\t\ttokenizeTest{\n\t\t\t\t\"div#id#id2.class0.class1.class2\",\n\t\t\t\t[][]string{\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"div\",\n\t\t\t\t\t},\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"id\",\n\t\t\t\t\t},\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"class0\",\n\t\t\t\t\t\t\"class1\",\n\t\t\t\t\t\t\"class2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},*\/\n\t\ttokenizeTest{\n\t\t\t\"a.class0\",\n\t\t\t[][]string{\n\t\t\t\t[]string{\n\t\t\t\t\t\"a\",\n\t\t\t\t},\n\t\t\t\t[]string{},\n\t\t\t\t[]string{\n\t\t\t\t\t\"class0\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tvals := tokenize(test.input)\n\t\tif !sliceEquality(vals, test.output) {\n\t\t\tt.Errorf(\"tokenization failed, expected: %v, got: %v\", test.output, vals)\n\t\t}\n\t}\n}\n\nfunc sliceEquality(s1 [][]string, s2 [][]string) bool {\n\tif len(s1) != len(s2) {\n\t\treturn false\n\t}\n\tfor i, val1 := range s1 {\n\t\tval2 := s2[i]\n\t\tif len(val1) != len(val2) {\n\t\t\treturn false\n\t\t}\n\t\tfor j, v1 := range val1 {\n\t\t\tif v1 != val2[j] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\nfunc Test_GoopNodeFind(t *testing.T) {\n\n}\n\nfunc Test_GoopNodeHasClasses(t *testing.T) {\n\n}\n\nfunc Test_GoopNodeIsElement(t *testing.T) {\n\n}\n\nfunc Test_GoopFindAllElements(t *testing.T) {\n\n}\n\nfunc Test_GoopNodeFindAllElements(t *testing.T) {\n\n}\n\nfunc Test_GoopNodeSearchByElement(t *testing.T) {\n\n}\n\nfunc Test_GoopFindAllWithClass(t *testing.T) {\n\n}\n\nfunc Test_GoopNodeSearchByClass(t *testing.T) {\n\n}\n\nfunc Test_GoopFindById(t *testing.T) {\n\n}\n\nfunc Test_GoopNodeFindById(t *testing.T) {\n\n}\n\nfunc Test_Attributes(t *testing.T) {\n\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\/\/\"github.com\/geobe\/go4j\/poi\"\n\tmodel \"github.com\/geobe\/go4web\/gorm1\/model2\"\n\t\"github.com\/jinzhu\/gorm\"\n\t_ \"github.com\/jinzhu\/gorm\/dialects\/postgres\"\n\t\"strconv\"\n)\n\n\/\/ Demoprogramm für polymorphe Assoziationen:\n\/\/ für dieses Beispiel wird das geänderte model2 package verwendet\nfunc main() {\n\tdb, err := gorm.Open(\"postgres\", \"user=oosy dbname=gorm5 password=oosy2016 sslmode=disable\")\n\tif err != nil {\n\t\tpanic(\"failed to connect database\")\n\t}\n\tdefer db.Close()\n\n\t\/\/ Migrate the schema\n\tdb.AutoMigrate(&model.City{}, &model.Attraction{}, &model.Destination{}, &model.Trip{}, &model.Person{})\n\n\t\/\/ Datenbank leeren\n\tdb.Delete(model.Person{})\n\tdb.Delete(model.Trip{})\n\t\/\/db.Delete(model.City{})\n\t\/\/db.Delete(model.Attraction{})\n\tdb.Delete(model.Destination{})\n\n\t\/\/for _, aCity := range poi.GermanCities {\n\t\/\/\tcity := model.New(aCity)\n\t\/\/\tdb.Create(&city)\n\t\/\/}\n\t\/\/\n\t\/\/for _, attr := range model.GermanAttractions {\n\t\/\/\tdb.Create(&attr)\n\t\/\/}\n\n\tkirk := model.SomePersons[0]\n\tkirk.Trips = append(kirk.Trips, model.SomeTrips[0], model.SomeTrips[2])\n\n\tvar dests []model.Destination\n\tvar cities []model.City\n\tdb.Find(&cities, \"name in ('Köln', 'München', 'Düsseldorf')\")\n\tfor i, c := range cities {\n\t\tdest := model.Destination{Reason: \"Karneval-\" + strconv.Itoa(i)}\n\t\tc.Destination = append(c.Destination, dest)\n\t\tdb.Save(&c)\n\t}\n\n\tdest := model.Destination{Reason: \"skurriles Schloß\"}\n\tvar att model.Attraction\n\tdb.First(&att, \"name like 'Neuschw%'\")\n\tatt.Destination = append(att.Destination, dest)\n\tdb.Save(&att)\n\n\tdb.Find(&dests)\n\tfor _, dest := range dests {\n\t\tvar city model.City\n\t\tvar attr model.Attraction\n\t\tfmt.Printf(\"Reiseziel %s: \", dest.Reason)\n\t\t\/\/ ausführliche Variante 1:\n\t\t\/\/ Polymorphes Objekt vollständig lesen\n\t\tif \"cities\" == dest.DestType {\n\t\t\tdb.First(&city, dest.DestID)\n\t\t\tfmt.Printf(\"City %s\\n\", city.Name)\n\t\t} else {\n\t\t\tdb.First(&attr, dest.DestID)\n\t\t\tfmt.Printf(\"Attraction %s\\n\", attr.Name)\n\t\t}\n\t\t\/\/ kompakte Variante 2: nur die Werte\n\t\t\/\/ lesen, die gebraucht werden\n\t\tvar any struct {\n\t\t\tName string\n\t\t}\n\t\tdb.Table(dest.DestType).\n\t\t\tWhere(\"ID = ?\", dest.DestID).Scan(&any)\n\t\tfmt.Printf(\"\\t%s\\n\", any.Name)\n\t}\n\n\tkirk.Trips[0].Destinations = dests\n\n\tdb.Save(&kirk)\n\n\t\/\/ query\n\tvar kirki model.Person\n\n\tdb.Preload(\"Trips\").\n\t\tPreload(\"Trips.Destinations\").\n\t\tFirst(&kirki, kirk.ID)\n\n\tfmt.Printf(\"Person %s, %d Trips, 1. Trip %s hat %d Stationen:\\n\",\n\t\tkirki.Name, len(kirki.Trips), kirki.Trips[0].Comment,\n\t\tlen(kirki.Trips[0].Destinations))\n\tfor _, kdest := range kirki.Trips[0].Destinations {\n\t\tvar any struct {\n\t\t\tDescription string\n\t\t\tName string\n\t\t}\n\t\tdb.Table(kdest.DestType).Where(\"ID = ?\", kdest.DestID).Scan(&any)\n\t\tfmt.Printf(\"\\t%s: %s %s\\n\", kdest.Reason,\n\t\t\tany.Description, any.Name)\n\t}\n\n\t\/\/fmt.Println(kirk)\n\t\/\/fmt.Println(kiki)\n\n}\nPolymorphismus-Beispiel Testcode entferntpackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/geobe\/go4j\/poi\"\n\tmodel \"github.com\/geobe\/go4web\/gorm1\/model2\"\n\t\"github.com\/jinzhu\/gorm\"\n\t_ \"github.com\/jinzhu\/gorm\/dialects\/postgres\"\n\t\"strconv\"\n)\n\n\/\/ Demoprogramm für polymorphe Assoziationen:\n\/\/ für dieses Beispiel wird das geänderte model2 package verwendet\nfunc main() {\n\tdb, err := gorm.Open(\"postgres\", \"user=oosy dbname=gorm5 password=oosy2016 sslmode=disable\")\n\tif err != nil {\n\t\tpanic(\"failed to connect database\")\n\t}\n\tdefer db.Close()\n\n\t\/\/ Migrate the schema\n\tdb.AutoMigrate(&model.City{}, &model.Attraction{}, &model.Destination{}, &model.Trip{}, &model.Person{})\n\n\t\/\/ Datenbank leeren\n\tdb.Delete(model.Person{})\n\tdb.Delete(model.Trip{})\n\tdb.Delete(model.City{})\n\tdb.Delete(model.Attraction{})\n\tdb.Delete(model.Destination{})\n\n\tfor _, aCity := range poi.GermanCities {\n\t\tcity := model.New(aCity)\n\t\tdb.Create(&city)\n\t}\n\n\tfor _, attr := range model.GermanAttractions {\n\t\tdb.Create(&attr)\n\t}\n\n\tkirk := model.SomePersons[0]\n\tkirk.Trips = append(kirk.Trips, model.SomeTrips[0], model.SomeTrips[2])\n\n\tvar dests []model.Destination\n\tvar cities []model.City\n\tdb.Find(&cities, \"name in ('Köln', 'München', 'Düsseldorf')\")\n\tfor i, c := range cities {\n\t\tdest := model.Destination{Reason: \"Karneval-\" + strconv.Itoa(i)}\n\t\tc.Destination = append(c.Destination, dest)\n\t\tdb.Save(&c)\n\t}\n\n\tdest := model.Destination{Reason: \"skurriles Schloß\"}\n\tvar att model.Attraction\n\tdb.First(&att, \"name like 'Neuschw%'\")\n\tatt.Destination = append(att.Destination, dest)\n\tdb.Save(&att)\n\n\tdb.Find(&dests)\n\tfor _, dest := range dests {\n\t\tvar city model.City\n\t\tvar attr model.Attraction\n\t\tfmt.Printf(\"Reiseziel %s: \", dest.Reason)\n\t\t\/\/ ausführliche Variante 1:\n\t\t\/\/ Polymorphes Objekt vollständig lesen\n\t\tif \"cities\" == dest.DestType {\n\t\t\tdb.First(&city, dest.DestID)\n\t\t\tfmt.Printf(\"City %s\\n\", city.Name)\n\t\t} else {\n\t\t\tdb.First(&attr, dest.DestID)\n\t\t\tfmt.Printf(\"Attraction %s\\n\", attr.Name)\n\t\t}\n\t\t\/\/ kompakte Variante 2: nur die Werte\n\t\t\/\/ lesen, die gebraucht werden\n\t\tvar any struct {\n\t\t\tName string\n\t\t}\n\t\tdb.Table(dest.DestType).\n\t\t\tWhere(\"ID = ?\", dest.DestID).Scan(&any)\n\t\tfmt.Printf(\"\\t%s\\n\", any.Name)\n\t}\n\n\tkirk.Trips[0].Destinations = dests\n\n\tdb.Save(&kirk)\n\n\t\/\/ query\n\tvar kirki model.Person\n\n\tdb.Preload(\"Trips\").\n\t\tPreload(\"Trips.Destinations\").\n\t\tFirst(&kirki, kirk.ID)\n\n\tfmt.Printf(\"Person %s, %d Trips, 1. Trip %s hat %d Stationen:\\n\",\n\t\tkirki.Name, len(kirki.Trips), kirki.Trips[0].Comment,\n\t\tlen(kirki.Trips[0].Destinations))\n\tfor _, kdest := range kirki.Trips[0].Destinations {\n\t\tvar any struct {\n\t\t\tDescription string\n\t\t\tName string\n\t\t}\n\t\tdb.Table(kdest.DestType).Where(\"ID = ?\", kdest.DestID).Scan(&any)\n\t\tfmt.Printf(\"\\t%s: %s %s\\n\", kdest.Reason,\n\t\t\tany.Description, any.Name)\n\t}\n\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Google LLC. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gossip\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n\n\tct \"github.com\/google\/certificate-transparency-go\"\n)\n\nvar defaultNumPollinationsToReturn = flag.Int(\"default_num_pollinations_to_return\", 10,\n\t\"Number of randomly selected STH pollination entries to return for sth-pollination requests.\")\n\ntype clock interface {\n\tNow() time.Time\n}\n\ntype realClock struct{}\n\nfunc (realClock) Now() time.Time {\n\treturn time.Now()\n}\n\n\/\/ SignatureVerifierMap is a map of SignatureVerifier by LogID\ntype SignatureVerifierMap map[ct.SHA256Hash]ct.SignatureVerifier\n\n\/\/ Handler for the gossip HTTP requests.\ntype Handler struct {\n\tstorage *Storage\n\tverifiers SignatureVerifierMap\n\tclock clock\n}\n\nfunc writeWrongMethodResponse(rw *http.ResponseWriter, allowed string) {\n\t(*rw).Header().Add(\"Allow\", allowed)\n\t(*rw).WriteHeader(http.StatusMethodNotAllowed)\n}\n\nfunc writeErrorResponse(rw *http.ResponseWriter, status int, body string) {\n\t(*rw).WriteHeader(status)\n\t(*rw).Write([]byte(body))\n}\n\n\/\/ HandleSCTFeedback handles requests POSTed to ...\/sct-feedback.\n\/\/ It attempts to store the provided SCT Feedback\nfunc (h *Handler) HandleSCTFeedback(rw http.ResponseWriter, req *http.Request) {\n\tif req.Method != \"POST\" {\n\t\twriteWrongMethodResponse(&rw, \"POST\")\n\t\treturn\n\t}\n\n\tdecoder := json.NewDecoder(req.Body)\n\tvar feedback SCTFeedback\n\tif err := decoder.Decode(&feedback); err != nil {\n\t\twriteErrorResponse(&rw, http.StatusBadRequest, fmt.Sprintf(\"Invalid SCT Feedback received: %v\", err))\n\t\treturn\n\t}\n\n\t\/\/ TODO(alcutter): 5.1.1 Validate leaf chains up to a trusted root\n\t\/\/ TODO(alcutter): 5.1.1\/2 Verify each SCT is valid and from a known log, discard those which aren't\n\t\/\/ TODO(alcutter): 5.1.1\/3 Discard leaves for domains other than ours.\n\tif err := h.storage.AddSCTFeedback(feedback); err != nil {\n\t\twriteErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf(\"Unable to store feedback: %v\", err))\n\t\treturn\n\t}\n\trw.WriteHeader(http.StatusOK)\n}\n\n\/\/ HandleSTHPollination handles requests POSTed to ...\/sth-pollination.\n\/\/ It attempts to store the provided pollination info, and returns a random set of\n\/\/ pollination data from the last 14 days (i.e. \"fresh\" by the definition of the gossip RFC.)\nfunc (h *Handler) HandleSTHPollination(rw http.ResponseWriter, req *http.Request) {\n\tif req.Method != \"POST\" {\n\t\twriteWrongMethodResponse(&rw, \"POST\")\n\t\treturn\n\t}\n\n\tdecoder := json.NewDecoder(req.Body)\n\tvar p STHPollination\n\tif err := decoder.Decode(&p); err != nil {\n\t\twriteErrorResponse(&rw, http.StatusBadRequest, fmt.Sprintf(\"Invalid STH Pollination received: %v\", err))\n\t\treturn\n\t}\n\n\tsthToKeep := make([]ct.SignedTreeHead, 0, len(p.STHs))\n\tfor _, sth := range p.STHs {\n\t\tv, found := h.verifiers[sth.LogID]\n\t\tif !found {\n\t\t\tlog.Printf(\"Pollination entry for unknown logID: %s\", sth.LogID.Base64String())\n\t\t\tcontinue\n\t\t}\n\t\tif err := v.VerifySTHSignature(sth); err != nil {\n\t\t\tlog.Printf(\"Failed to verify STH, dropping: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tsthToKeep = append(sthToKeep, sth)\n\t}\n\tp.STHs = sthToKeep\n\n\terr := h.storage.AddSTHPollination(p)\n\tif err != nil {\n\t\twriteErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf(\"Couldn't store pollination: %v\", err))\n\t\treturn\n\t}\n\n\tfreshTime := h.clock.Now().AddDate(0, 0, -14)\n\trp, err := h.storage.GetRandomSTHPollination(freshTime, *defaultNumPollinationsToReturn)\n\tif err != nil {\n\t\twriteErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf(\"Couldn't fetch pollination to return: %v\", err))\n\t\treturn\n\t}\n\n\tencoder := json.NewEncoder(rw)\n\tif err := encoder.Encode(*rp); err != nil {\n\t\twriteErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf(\"Couldn't encode pollination to return: %v\", err))\n\t\treturn\n\t}\n}\n\n\/\/ NewHandler creates a new Handler object, taking a pointer a Storage object to\n\/\/ use for storing and retrieving feedback and pollination data, and a\n\/\/ SignatureVerifierMap for verifying signatures from known logs.\nfunc NewHandler(s *Storage, v SignatureVerifierMap) Handler {\n\treturn Handler{\n\t\tstorage: s,\n\t\tverifiers: v,\n\t\tclock: realClock{},\n\t}\n}\n\n\/\/ NewHandler creates a new Handler object, taking a pointer a Storage object to\n\/\/ use for storing and retrieving feedback and pollination data, and a\n\/\/ SignatureVerifierMap for verifying signatures from known logs.\nfunc newHandlerWithClock(s *Storage, v SignatureVerifierMap, c clock) Handler {\n\treturn Handler{\n\t\tstorage: s,\n\t\tverifiers: v,\n\t\tclock: c,\n\t}\n}\nFix xss vuln in gossip hub\/\/ Copyright 2015 Google LLC. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gossip\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n\n\tct \"github.com\/google\/certificate-transparency-go\"\n)\n\nvar defaultNumPollinationsToReturn = flag.Int(\"default_num_pollinations_to_return\", 10,\n\t\"Number of randomly selected STH pollination entries to return for sth-pollination requests.\")\n\ntype clock interface {\n\tNow() time.Time\n}\n\ntype realClock struct{}\n\nfunc (realClock) Now() time.Time {\n\treturn time.Now()\n}\n\n\/\/ SignatureVerifierMap is a map of SignatureVerifier by LogID\ntype SignatureVerifierMap map[ct.SHA256Hash]ct.SignatureVerifier\n\n\/\/ Handler for the gossip HTTP requests.\ntype Handler struct {\n\tstorage *Storage\n\tverifiers SignatureVerifierMap\n\tclock clock\n}\n\nfunc writeWrongMethodResponse(rw *http.ResponseWriter, allowed string) {\n\t(*rw).Header().Add(\"Allow\", allowed)\n\t(*rw).WriteHeader(http.StatusMethodNotAllowed)\n}\n\n\/\/ errTmpl is used to escape error body text to avoid reflection attacks.\nvar errTmpl = template.Must(template.New(\"error\").Parse(`
{{.msg}}<\/div>`))\n\nfunc writeErrorResponse(rw *http.ResponseWriter, status int, body string) {\n\t(*rw).WriteHeader(status)\n\terrTmpl.Execute(*rw, map[string]interface{}{\n\t\t\"msg\": body,\n\t})\n}\n\n\/\/ HandleSCTFeedback handles requests POSTed to ...\/sct-feedback.\n\/\/ It attempts to store the provided SCT Feedback\nfunc (h *Handler) HandleSCTFeedback(rw http.ResponseWriter, req *http.Request) {\n\tif req.Method != \"POST\" {\n\t\twriteWrongMethodResponse(&rw, \"POST\")\n\t\treturn\n\t}\n\n\tdecoder := json.NewDecoder(req.Body)\n\tvar feedback SCTFeedback\n\tif err := decoder.Decode(&feedback); err != nil {\n\t\twriteErrorResponse(&rw, http.StatusBadRequest, fmt.Sprintf(\"Invalid SCT Feedback received: %v\", err))\n\t\treturn\n\t}\n\n\t\/\/ TODO(alcutter): 5.1.1 Validate leaf chains up to a trusted root\n\t\/\/ TODO(alcutter): 5.1.1\/2 Verify each SCT is valid and from a known log, discard those which aren't\n\t\/\/ TODO(alcutter): 5.1.1\/3 Discard leaves for domains other than ours.\n\tif err := h.storage.AddSCTFeedback(feedback); err != nil {\n\t\twriteErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf(\"Unable to store feedback: %v\", err))\n\t\treturn\n\t}\n\trw.WriteHeader(http.StatusOK)\n}\n\n\/\/ HandleSTHPollination handles requests POSTed to ...\/sth-pollination.\n\/\/ It attempts to store the provided pollination info, and returns a random set of\n\/\/ pollination data from the last 14 days (i.e. \"fresh\" by the definition of the gossip RFC.)\nfunc (h *Handler) HandleSTHPollination(rw http.ResponseWriter, req *http.Request) {\n\tif req.Method != \"POST\" {\n\t\twriteWrongMethodResponse(&rw, \"POST\")\n\t\treturn\n\t}\n\n\tdecoder := json.NewDecoder(req.Body)\n\tvar p STHPollination\n\tif err := decoder.Decode(&p); err != nil {\n\t\twriteErrorResponse(&rw, http.StatusBadRequest, fmt.Sprintf(\"Invalid STH Pollination received: %v\", err))\n\t\treturn\n\t}\n\n\tsthToKeep := make([]ct.SignedTreeHead, 0, len(p.STHs))\n\tfor _, sth := range p.STHs {\n\t\tv, found := h.verifiers[sth.LogID]\n\t\tif !found {\n\t\t\tlog.Printf(\"Pollination entry for unknown logID: %s\", sth.LogID.Base64String())\n\t\t\tcontinue\n\t\t}\n\t\tif err := v.VerifySTHSignature(sth); err != nil {\n\t\t\tlog.Printf(\"Failed to verify STH, dropping: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tsthToKeep = append(sthToKeep, sth)\n\t}\n\tp.STHs = sthToKeep\n\n\terr := h.storage.AddSTHPollination(p)\n\tif err != nil {\n\t\twriteErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf(\"Couldn't store pollination: %v\", err))\n\t\treturn\n\t}\n\n\tfreshTime := h.clock.Now().AddDate(0, 0, -14)\n\trp, err := h.storage.GetRandomSTHPollination(freshTime, *defaultNumPollinationsToReturn)\n\tif err != nil {\n\t\twriteErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf(\"Couldn't fetch pollination to return: %v\", err))\n\t\treturn\n\t}\n\n\tencoder := json.NewEncoder(rw)\n\tif err := encoder.Encode(*rp); err != nil {\n\t\twriteErrorResponse(&rw, http.StatusInternalServerError, fmt.Sprintf(\"Couldn't encode pollination to return: %v\", err))\n\t\treturn\n\t}\n}\n\n\/\/ NewHandler creates a new Handler object, taking a pointer a Storage object to\n\/\/ use for storing and retrieving feedback and pollination data, and a\n\/\/ SignatureVerifierMap for verifying signatures from known logs.\nfunc NewHandler(s *Storage, v SignatureVerifierMap) Handler {\n\treturn Handler{\n\t\tstorage: s,\n\t\tverifiers: v,\n\t\tclock: realClock{},\n\t}\n}\n\n\/\/ NewHandler creates a new Handler object, taking a pointer a Storage object to\n\/\/ use for storing and retrieving feedback and pollination data, and a\n\/\/ SignatureVerifierMap for verifying signatures from known logs.\nfunc newHandlerWithClock(s *Storage, v SignatureVerifierMap, c clock) Handler {\n\treturn Handler{\n\t\tstorage: s,\n\t\tverifiers: v,\n\t\tclock: c,\n\t}\n}\n<|endoftext|>"} {"text":"package cast\n\nfunc Float64(v interface{}) (float64, error) {\n\n\tswitch value := v.(type) {\n\tcase float64er:\n\t\treturn value.Float64()\n\tcase float32:\n\t\treturn float64(value), nil\n\tcase float64:\n\t\treturn float64(value), nil\n\tcase uint8:\n\t\treturn float64(value), nil\n\tcase uint16:\n\t\treturn float64(value), nil\n\tcase uint32:\n\t\treturn float64(value), nil\n\tcase int8:\n\t\treturn float64(value), nil\n\tcase int16:\n\t\treturn float64(value), nil\n\tcase int32:\n\t\treturn float64(value), nil\n\tdefault:\n\t\treturn 0, internalCannotCastComplainer{expectedType:\"float64\", actualType:typeof(value)}\n\t}\n}\n\n\/\/ MustFloat64 is like Float64, expect panic()s on an error.\nfunc MustFloat64(v interface{}) float64 {\n\n\tx, err := Float64(v)\n\tif nil != err {\n\t\tpanic(err)\n\t}\n\n\treturn x\n}\n\ntype float64er interface {\n\tFloat64() (float64, error)\n}\nimproved cast.Float64()package cast\n\n\/\/ Float64 will return an float64 when `v` is of type float64, float32, int32, int16, int8, uint32, uint16, uint8 or has a method:\n\/\/\n\/\/\ttype interface {\n\/\/\t\tFloat64() (float64, error)\n\/\/\t}\n\/\/\n\/\/ ... that returns successfully, or has a method:\n\/\/\n\/\/\ttype interface {\n\/\/\t\tFloat32() (float32, error)\n\/\/\t}\n\/\/\n\/\/ ... that returns successfully, or has a method:\n\/\/\n\/\/\ttype interface {\n\/\/\t\tInt32() (int32, error)\n\/\/\t}\n\/\/\n\/\/ ... that returns successfully, or has a method:\n\/\/\n\/\/\ttype interface {\n\/\/\t\tInt16() (int16, error)\n\/\/\t}\n\/\/\n\/\/ ... that returns successfully, or has a method:\n\/\/\n\/\/\ttype interface {\n\/\/\t\tInt8() (int8, error)\n\/\/\t}\n\/\/\n\/\/ ... that returns successfully, or has a method:\n\/\/\n\/\/\ttype interface {\n\/\/\t\tUint32() (uint32, error)\n\/\/\t}\n\/\/\n\/\/ ... that returns successfully, or has a method:\n\/\/\n\/\/\ttype interface {\n\/\/\t\tUint16() (uint16, error)\n\/\/\t}\n\/\/\n\/\/ ... that returns successfully, or has a method:\n\/\/\n\/\/\ttype interface {\n\/\/\t\tUint8() (uint8, error)\n\/\/\t}\n\/\/\n\/\/ ... that returns successfully.\n\/\/\n\/\/ Else it will return an error.\nfunc Float64(v interface{}) (float64, error) {\n\n\tswitch value := v.(type) {\n\tcase float64er:\n\t\treturn value.Float64()\n\tcase float32er:\n\t\treturn func()(float64, error){\n\t\t\tcasted, err := value.Float32()\n\t\t\tif nil != err {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\treturn float64(casted), nil\n\t\t}()\n\tcase int32er:\n\t\treturn func()(float64, error){\n\t\t\tcasted, err := value.Int32()\n\t\t\tif nil != err {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\treturn float64(casted), nil\n\t\t}()\n\tcase int16er:\n\t\treturn func()(float64, error){\n\t\t\tcasted, err := value.Int16()\n\t\t\tif nil != err {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\treturn float64(casted), nil\n\t\t}()\n\tcase int8er:\n\t\treturn func()(float64, error){\n\t\t\tcasted, err := value.Int8()\n\t\t\tif nil != err {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\treturn float64(casted), nil\n\t\t}()\n\tcase uint32er:\n\t\treturn func()(float64, error){\n\t\t\tcasted, err := value.Uint32()\n\t\t\tif nil != err {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\treturn float64(casted), nil\n\t\t}()\n\tcase uint16er:\n\t\treturn func()(float64, error){\n\t\t\tcasted, err := value.Uint16()\n\t\t\tif nil != err {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\treturn float64(casted), nil\n\t\t}()\n\tcase uint8er:\n\t\treturn func()(float64, error){\n\t\t\tcasted, err := value.Uint8()\n\t\t\tif nil != err {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\treturn float64(casted), nil\n\t\t}()\n\tcase float64:\n\t\treturn float64(value), nil\n\tcase float32:\n\t\treturn float64(value), nil\n\tcase int32:\n\t\treturn float64(value), nil\n\tcase int16:\n\t\treturn float64(value), nil\n\tcase int8:\n\t\treturn float64(value), nil\n\tcase uint32:\n\t\treturn float64(value), nil\n\tcase uint16:\n\t\treturn float64(value), nil\n\tcase uint8:\n\t\treturn float64(value), nil\n\tdefault:\n\t\treturn 0, internalCannotCastComplainer{expectedType:\"float64\", actualType:typeof(value)}\n\t}\n}\n\n\/\/ MustFloat64 is like Float64, expect panic()s on an error.\nfunc MustFloat64(v interface{}) float64 {\n\n\tx, err := Float64(v)\n\tif nil != err {\n\t\tpanic(err)\n\t}\n\n\treturn x\n}\n\ntype float64er interface {\n\tFloat64() (float64, error)\n}\n<|endoftext|>"} {"text":"package tracking\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst (\n\thost = \"http:\/\/api.mixpanel.com\"\n\ttrackPath = \"track\"\n\tengagePath = \"engage\"\n)\n\n\/\/ engage constants\nconst (\n\tEngageSet = \"set\"\n\tEngageSetOnce = \"set_once\"\n\tEngageAdd = \"add\"\n\tEngageAppend = \"append\"\n\tEngageUnion = \"union\"\n\tEngageUnset = \"unset\"\n\tEngageDelete = \"delete\"\n)\n\ntype client struct {\n\ttoken string\n}\n\ntype eventData struct {\n\tEvent string `json:\"event\"`\n\tProps map[string]interface{} `json:\"properties\"`\n}\n\ntype engageData struct {\n\tToken string `json:\"$token\"`\n\tTime int64 `json:\"$time\"`\n\tId int64 `json:\"$distinct_id\"`\n\tIp string `json:\"$ip,omitempty\"`\n\tSet interface{} `json:\"$set,omitempty\"`\n\tSetOnce interface{} `json:\"$set_once,omitempty\"`\n\tAdd interface{} `json:\"$add,omitempty\"`\n\tAppend interface{} `json:\"$append,omitempty\"`\n\tUnion interface{} `json:\"$union,omitempty\"`\n\tUnset interface{} `json:\"$unset,omitempty\"`\n\tDelete interface{} `json:\"$delete,omitempty\"`\n}\n\nfunc New(token string) *client {\n\treturn &client{\n\t\ttoken: token,\n\t}\n}\n\nfunc (mp *client) Track(uid int64, e string, p map[string]interface{}, params ...map[string]interface{}) bool {\n\tdata := &eventData{\n\t\tEvent: e,\n\t\tProps: map[string]interface{}{\n\t\t\t\"time\": time.Now().Unix(),\n\t\t\t\"token\": mp.token,\n\t\t},\n\t}\n\tif uid != 0 {\n\t\tdata.Props[\"distinct_id\"] = strconv.Itoa(int(uid))\n\t}\n\tfor k, v := range p {\n\t\tdata.Props[k] = v\n\t}\n\n\tmarshaledData, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tu := fmt.Sprintf(\"%s\/%s\/?data=%s\", host, trackPath,\n\t\tbase64.StdEncoding.EncodeToString(marshaledData))\n\n\tparameters := url.Values{}\n\t\/\/ iterate over any query parameters\n\tfor _, val := range params {\n\t\tfor k, v := range val {\n\t\t\tif str, ok := v.(string); ok {\n\t\t\t\t\/* act on str *\/\n\t\t\t\tparameters.Add(k, str)\n\t\t\t} else {\n\t\t\t\t\/* not string - int? *\/\n\t\t\t\tif in, ok := v.(int); ok {\n\t\t\t\t\tparameters.Add(k, strconv.Itoa(in))\n\t\t\t\t} else {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n\t\/\/ append encoded params to url if any\n\tif qs := parameters.Encode(); qs != \"\" {\n\t\tu += \"&\" + qs\n\t}\n\t\/\/ send request\n\t_, err = http.Get(u)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (mp *client) Engage(uid int64, p map[string]interface{}, ip string) error {\n\tprofileData := &engageData{\n\t\tToken: mp.token,\n\t\tTime: time.Now().Unix(),\n\t}\n\tif uid != 0 {\n\t\tprofileData.Id = uid\n\t}\n\tif ip != \"\" {\n\t\tprofileData.Ip = ip\n\t}\n\t\/\/ should probably just add separate methods for each of these\n\tfor k, v := range p {\n\t\tswitch k {\n\t\tcase EngageSet:\n\t\t\tprofileData.Set = v\n\t\t\tbreak\n\t\tcase EngageSetOnce:\n\t\t\tprofileData.SetOnce = v\n\t\t\tbreak\n\t\tcase EngageAdd:\n\t\t\tprofileData.Add = v\n\t\t\tbreak\n\t\tcase EngageAppend:\n\t\t\tprofileData.Append = v\n\t\t\tbreak\n\t\tcase EngageUnion:\n\t\t\tprofileData.Union = v\n\t\t\tbreak\n\t\tcase EngageUnset:\n\t\t\tprofileData.Unset = v\n\t\t\tbreak\n\t\tcase EngageDelete:\n\t\t\tprofileData.Delete = v\n\t\t\tbreak\n\t\t}\n\t}\n\n\tmarshalledData, err := json.Marshal(profileData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\turl := fmt.Sprintf(\"%s\/%s\/?data=%s\", host, engagePath, base64.StdEncoding.EncodeToString(marshalledData))\n\n\t_, err = http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\nexplicitly close response body on http.Get reqspackage tracking\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst (\n\thost = \"http:\/\/api.mixpanel.com\"\n\ttrackPath = \"track\"\n\tengagePath = \"engage\"\n)\n\n\/\/ engage constants\nconst (\n\tEngageSet = \"set\"\n\tEngageSetOnce = \"set_once\"\n\tEngageAdd = \"add\"\n\tEngageAppend = \"append\"\n\tEngageUnion = \"union\"\n\tEngageUnset = \"unset\"\n\tEngageDelete = \"delete\"\n)\n\ntype client struct {\n\ttoken string\n}\n\ntype eventData struct {\n\tEvent string `json:\"event\"`\n\tProps map[string]interface{} `json:\"properties\"`\n}\n\ntype engageData struct {\n\tToken string `json:\"$token\"`\n\tTime int64 `json:\"$time\"`\n\tId int64 `json:\"$distinct_id\"`\n\tIp string `json:\"$ip,omitempty\"`\n\tSet interface{} `json:\"$set,omitempty\"`\n\tSetOnce interface{} `json:\"$set_once,omitempty\"`\n\tAdd interface{} `json:\"$add,omitempty\"`\n\tAppend interface{} `json:\"$append,omitempty\"`\n\tUnion interface{} `json:\"$union,omitempty\"`\n\tUnset interface{} `json:\"$unset,omitempty\"`\n\tDelete interface{} `json:\"$delete,omitempty\"`\n}\n\nfunc New(token string) *client {\n\treturn &client{\n\t\ttoken: token,\n\t}\n}\n\nfunc (mp *client) Track(uid int64, e string, p map[string]interface{}, params ...map[string]interface{}) bool {\n\tdata := &eventData{\n\t\tEvent: e,\n\t\tProps: map[string]interface{}{\n\t\t\t\"time\": time.Now().Unix(),\n\t\t\t\"token\": mp.token,\n\t\t},\n\t}\n\tif uid != 0 {\n\t\tdata.Props[\"distinct_id\"] = strconv.Itoa(int(uid))\n\t}\n\tfor k, v := range p {\n\t\tdata.Props[k] = v\n\t}\n\n\tmarshaledData, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tu := fmt.Sprintf(\"%s\/%s\/?data=%s\", host, trackPath,\n\t\tbase64.StdEncoding.EncodeToString(marshaledData))\n\n\tparameters := url.Values{}\n\t\/\/ iterate over any query parameters\n\tfor _, val := range params {\n\t\tfor k, v := range val {\n\t\t\tif str, ok := v.(string); ok {\n\t\t\t\t\/* act on str *\/\n\t\t\t\tparameters.Add(k, str)\n\t\t\t} else {\n\t\t\t\t\/* not string - int? *\/\n\t\t\t\tif in, ok := v.(int); ok {\n\t\t\t\t\tparameters.Add(k, strconv.Itoa(in))\n\t\t\t\t} else {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n\t\/\/ append encoded params to url if any\n\tif qs := parameters.Encode(); qs != \"\" {\n\t\tu += \"&\" + qs\n\t}\n\t\/\/ send request\n\tresp, err := http.Get(u)\n\tif err != nil {\n\t\treturn false\n\t}\n\tresp.Body.Close()\n\treturn true\n}\n\nfunc (mp *client) Engage(uid int64, p map[string]interface{}, ip string) error {\n\tprofileData := &engageData{\n\t\tToken: mp.token,\n\t\tTime: time.Now().Unix(),\n\t}\n\tif uid != 0 {\n\t\tprofileData.Id = uid\n\t}\n\tif ip != \"\" {\n\t\tprofileData.Ip = ip\n\t}\n\t\/\/ should probably just add separate methods for each of these\n\tfor k, v := range p {\n\t\tswitch k {\n\t\tcase EngageSet:\n\t\t\tprofileData.Set = v\n\t\t\tbreak\n\t\tcase EngageSetOnce:\n\t\t\tprofileData.SetOnce = v\n\t\t\tbreak\n\t\tcase EngageAdd:\n\t\t\tprofileData.Add = v\n\t\t\tbreak\n\t\tcase EngageAppend:\n\t\t\tprofileData.Append = v\n\t\t\tbreak\n\t\tcase EngageUnion:\n\t\t\tprofileData.Union = v\n\t\t\tbreak\n\t\tcase EngageUnset:\n\t\t\tprofileData.Unset = v\n\t\t\tbreak\n\t\tcase EngageDelete:\n\t\t\tprofileData.Delete = v\n\t\t\tbreak\n\t\t}\n\t}\n\n\tmarshalledData, err := json.Marshal(profileData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\turl := fmt.Sprintf(\"%s\/%s\/?data=%s\", host, engagePath, base64.StdEncoding.EncodeToString(marshalledData))\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp.Body.Close()\n\treturn nil\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"sync\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n\t\"github.com\/sdgoij\/gobbb\"\n)\n\nfunc HandleConnect(c *Client, event WsEvent) error {\n\turl, secret := \"\", \"\"\n\tif u, t := event.Data[\"url\"]; t && nil != u {\n\t\turl = u.(string)\n\t}\n\tif s, t := event.Data[\"secret\"]; t && nil != s {\n\t\tsecret = s.(string)\n\t}\n\tb3, err := bbb.New(url, secret)\n\tev := WsEvent{\"connected\", WsEventData{\n\t\t\"status\": \"success\",\n\t\t\"version\": \"\",\n\t}}\n\tif err == nil {\n\t\tif version := b3.ServerVersion(); \"\" == version {\n\t\t\tev.Data[\"status\"] = \"failure\"\n\t\t} else {\n\t\t\tev.Data[\"version\"] = version\n\t\t\tc.b3 = b3\n\t\t}\n\t}\n\tev.Data[\"error\"] = err.Error()\n\tc.events <- ev\n\treturn err\n}\n\nfunc HandleCreate(c *Client, event WsEvent) error { return nil }\nfunc HandleJoinURL(c *Client, event WsEvent) error { return nil }\nfunc HandleEnd(c *Client, event WsEvent) error { return nil }\n\nvar handler *WsEventHandler = &WsEventHandler{\n\th: map[string]WsEventHandlerFunc{\n\t\t\"connect\": HandleConnect,\n\t\t\"create\": HandleCreate,\n\t\t\"joinURL\": HandleJoinURL,\n\t\t\"end\": HandleEnd,\n\t},\n\tc: map[*Client]struct{}{},\n}\n\nfunc init() {\n\thttp.Handle(\"\/ws\", websocket.Server{Handler: HandleWS})\n}\n\nfunc HandleWS(ws *websocket.Conn) {\n\tremoteAddr := ws.Request().RemoteAddr\n\tlog.Printf(\"Connection from %s opened\", remoteAddr)\n\n\tclient := &Client{\n\t\taddress: remoteAddr,\n\t\tconn: ws,\n\t\tdone: make(chan struct{}),\n\t\tevents: make(chan WsEvent),\n\t}\n\n\thandler.AddClient(client)\n\n\tdefer func() {\n\t\tlog.Println(\"Connection from %s closed\", remoteAddr)\n\t\thandler.RemoveClient(client)\n\t}()\n\n\tgo client.Writer()\n\tclient.Reader()\n}\n\ntype Client struct {\n\taddress string\n\tconn *websocket.Conn\n\tb3 bbb.BigBlueButton\n\tdone chan struct{}\n\tevents chan WsEvent\n\thandler *WsEventHandler\n\n\tId string\n}\n\nfunc (c *Client) Reader() {\n\tfor {\n\t\tvar ev WsEvent\n\t\tif err := websocket.JSON.Receive(c.conn, &ev); nil != err {\n\t\t\tif io.EOF == err {\n\t\t\t\tlog.Printf(\"Reader[%s]: %s\", c.address, err)\n\t\t\t\tc.done <- struct{}{}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err := c.handler.Handle(c, ev); nil != err {\n\t\t\tlog.Printf(\"Reader[%s]: %s\", c.address, err)\n\t\t}\n\t}\n}\n\nfunc (c *Client) Writer() {\n\tfor {\n\t\tselect {\n\t\tcase e := <-c.events:\n\t\t\tlog.Printf(\"Writer[%s]: %#v\", c.address, e)\n\t\t\tif err := websocket.JSON.Send(c.conn, e); nil != err {\n\t\t\t\tlog.Printf(\"Writer[%s]: %s\", c.address, err)\n\t\t\t}\n\t\tcase <-c.done:\n\t\t\tlog.Printf(\"Writer[%s]: exit\", c.address)\n\t\t\treturn\n\t\t}\n\t}\n}\n\ntype WsEventData map[string]interface{}\n\ntype WsEvent struct {\n\tEvent string `json:\"event\"`\n\tData WsEventData `json:\"data\"`\n}\n\ntype WsEventHandlerFunc func(*Client, WsEvent) error\n\ntype WsEventHandler struct {\n\th map[string]WsEventHandlerFunc\n\tc map[*Client]struct{}\n\tm sync.RWMutex\n}\n\nfunc (ws *WsEventHandler) Handle(c *Client, ev WsEvent) error {\n\tif h, t := ws.h[ev.Event]; t {\n\t\treturn h(c, ev)\n\t}\n\treturn newWsEventHandlerNotFound(ev.Event)\n}\n\nfunc (ws *WsEventHandler) AddClient(c *Client) {\n\tws.m.Lock()\n\tdefer ws.m.Unlock()\n\tif _, t := ws.c[c]; !t {\n\t\tws.c[c] = struct{}{}\n\t\tc.handler = ws\n\t}\n}\n\nfunc (ws *WsEventHandler) RemoveClient(c *Client) {\n\tws.m.Lock()\n\tdefer ws.m.Unlock()\n\tif _, t := ws.c[c]; t {\n\t\tdelete(ws.c, c)\n\t\tc.handler = nil\n\t}\n}\n\nfunc (ws *WsEventHandler) Broadcast(event WsEvent) error {\n\tws.m.RLock()\n\tdefer ws.m.RUnlock()\n\tfor peer, _ := range ws.c {\n\t\tpeer.events <- event\n\t}\n\treturn nil\n}\n\ntype WsEventHandlerNotFound string\n\nfunc (e WsEventHandlerNotFound) Error() string {\n\treturn \"Event Handler '\" + string(e) + \"' not found!\"\n}\n\nfunc newWsEventHandlerNotFound(e string) WsEventHandlerNotFound {\n\treturn WsEventHandlerNotFound(e)\n}\nImplement HandleCreate (WebSocket handler)package main\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"sync\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n\t\"github.com\/sdgoij\/gobbb\"\n)\n\nfunc HandleConnect(c *Client, event WsEvent) error {\n\turl, secret := \"\", \"\"\n\tif u, t := event.Data[\"url\"]; t && nil != u {\n\t\turl = u.(string)\n\t}\n\tif s, t := event.Data[\"secret\"]; t && nil != s {\n\t\tsecret = s.(string)\n\t}\n\tb3, err := bbb.New(url, secret)\n\tev := WsEvent{\"connected\", WsEventData{\n\t\t\"status\": \"success\",\n\t\t\"version\": \"\",\n\t}}\n\tif err == nil {\n\t\tif version := b3.ServerVersion(); \"\" == version {\n\t\t\tev.Data[\"status\"] = \"failure\"\n\t\t} else {\n\t\t\tev.Data[\"version\"] = version\n\t\t\tc.b3 = b3\n\t\t}\n\t}\n\tev.Data[\"error\"] = err.Error()\n\tc.events <- ev\n\treturn err\n}\n\nfunc HandleCreate(c *Client, event WsEvent) error {\n\tid := \"\"\n\tif i, t := event.Data[\"id\"]; t && nil != i {\n\t\tid = i.(string)\n\t}\n\tm, err := c.b3.Create(id, bbb.EmptyOptions)\n\tif nil != err {\n\t\treturn err\n\t}\n\tc.events <- WsEvent{\"created\", WsEventData{\n\t\t\"id\": m.Id,\n\t\t\"created\": m.CreateTime.Unix(),\n\t\t\"attendeePW\": m.AttendeePW,\n\t\t\"moderatorPW\": m.ModeratorPW,\n\t\t\"forcedEnd\": m.ForcedEnd,\n\t}}\n\treturn nil\n}\n\nfunc HandleJoinURL(c *Client, event WsEvent) error { return nil }\nfunc HandleEnd(c *Client, event WsEvent) error { return nil }\n\nvar handler *WsEventHandler = &WsEventHandler{\n\th: map[string]WsEventHandlerFunc{\n\t\t\"connect\": HandleConnect,\n\t\t\"create\": HandleCreate,\n\t\t\"joinURL\": HandleJoinURL,\n\t\t\"end\": HandleEnd,\n\t},\n\tc: map[*Client]struct{}{},\n}\n\nfunc init() {\n\thttp.Handle(\"\/ws\", websocket.Server{Handler: HandleWS})\n}\n\nfunc HandleWS(ws *websocket.Conn) {\n\tremoteAddr := ws.Request().RemoteAddr\n\tlog.Printf(\"Connection from %s opened\", remoteAddr)\n\n\tclient := &Client{\n\t\taddress: remoteAddr,\n\t\tconn: ws,\n\t\tdone: make(chan struct{}),\n\t\tevents: make(chan WsEvent),\n\t}\n\n\thandler.AddClient(client)\n\n\tdefer func() {\n\t\tlog.Printf(\"Connection from %s closed\", remoteAddr)\n\t\thandler.RemoveClient(client)\n\t}()\n\n\tgo client.Writer()\n\tclient.Reader()\n}\n\ntype Client struct {\n\taddress string\n\tconn *websocket.Conn\n\tb3 bbb.BigBlueButton\n\tdone chan struct{}\n\tevents chan WsEvent\n\thandler *WsEventHandler\n\n\tId string\n}\n\nfunc (c *Client) Reader() {\n\tfor {\n\t\tvar ev WsEvent\n\t\tif err := websocket.JSON.Receive(c.conn, &ev); nil != err {\n\t\t\tif io.EOF == err {\n\t\t\t\tlog.Printf(\"Reader[%s]: %s\", c.address, err)\n\t\t\t\tc.done <- struct{}{}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err := c.handler.Handle(c, ev); nil != err {\n\t\t\tlog.Printf(\"Reader[%s]: %s\", c.address, err)\n\t\t}\n\t}\n}\n\nfunc (c *Client) Writer() {\n\tfor {\n\t\tselect {\n\t\tcase e := <-c.events:\n\t\t\tlog.Printf(\"Writer[%s]: %#v\", c.address, e)\n\t\t\tif err := websocket.JSON.Send(c.conn, e); nil != err {\n\t\t\t\tlog.Printf(\"Writer[%s]: %s\", c.address, err)\n\t\t\t}\n\t\tcase <-c.done:\n\t\t\tlog.Printf(\"Writer[%s]: exit\", c.address)\n\t\t\treturn\n\t\t}\n\t}\n}\n\ntype WsEventData map[string]interface{}\n\ntype WsEvent struct {\n\tEvent string `json:\"event\"`\n\tData WsEventData `json:\"data\"`\n}\n\ntype WsEventHandlerFunc func(*Client, WsEvent) error\n\ntype WsEventHandler struct {\n\th map[string]WsEventHandlerFunc\n\tc map[*Client]struct{}\n\tm sync.RWMutex\n}\n\nfunc (ws *WsEventHandler) Handle(c *Client, ev WsEvent) error {\n\tif h, t := ws.h[ev.Event]; t {\n\t\treturn h(c, ev)\n\t}\n\treturn newWsEventHandlerNotFound(ev.Event)\n}\n\nfunc (ws *WsEventHandler) AddClient(c *Client) {\n\tws.m.Lock()\n\tdefer ws.m.Unlock()\n\tif _, t := ws.c[c]; !t {\n\t\tws.c[c] = struct{}{}\n\t\tc.handler = ws\n\t}\n}\n\nfunc (ws *WsEventHandler) RemoveClient(c *Client) {\n\tws.m.Lock()\n\tdefer ws.m.Unlock()\n\tif _, t := ws.c[c]; t {\n\t\tdelete(ws.c, c)\n\t\tc.handler = nil\n\t}\n}\n\nfunc (ws *WsEventHandler) Broadcast(event WsEvent) error {\n\tws.m.RLock()\n\tdefer ws.m.RUnlock()\n\tfor peer, _ := range ws.c {\n\t\tpeer.events <- event\n\t}\n\treturn nil\n}\n\ntype WsEventHandlerNotFound string\n\nfunc (e WsEventHandlerNotFound) Error() string {\n\treturn \"Event Handler '\" + string(e) + \"' not found!\"\n}\n\nfunc newWsEventHandlerNotFound(e string) WsEventHandlerNotFound {\n\treturn WsEventHandlerNotFound(e)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage ssh_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/juju\/testing\"\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"launchpad.net\/juju-core\/cmd\"\n\t\"launchpad.net\/juju-core\/utils\/ssh\"\n)\n\ntype SSHCommandSuite struct {\n\ttesting.IsolationSuite\n\toriginalPath string\n\ttestbin string\n\tfakessh string\n\tfakescp string\n\techoCommand string\n\techoScript string\n\tclient ssh.Client\n}\n\nvar _ = gc.Suite(&SSHCommandSuite{})\n\nfunc (s *SSHCommandSuite) SetUpSuite(c *gc.C) {\n\ts.IsolationSuite.SetUpSuite(c)\n\ts.echoCommand = \"\/bin\/echo\"\n\ts.echoScript = fmt.Sprintf(\"#!\/bin\/sh\\n%s $0 \\\"$@\\\" | \/usr\/bin\/tee $0.args\", s.echoCommand)\n}\n\nfunc (s *SSHCommandSuite) SetUpTest(c *gc.C) {\n\ts.IsolationSuite.SetUpTest(c)\n\ts.testbin = c.MkDir()\n\ts.fakessh = filepath.Join(s.testbin, \"ssh\")\n\ts.fakescp = filepath.Join(s.testbin, \"scp\")\n\terr := ioutil.WriteFile(s.fakessh, []byte(s.echoScript), 0755)\n\tc.Assert(err, gc.IsNil)\n\terr = ioutil.WriteFile(s.fakescp, []byte(s.echoScript), 0755)\n\tc.Assert(err, gc.IsNil)\n\ts.PatchEnvPathPrepend(s.testbin)\n\ts.client, err = ssh.NewOpenSSHClient()\n\tc.Assert(err, gc.IsNil)\n\ts.PatchValue(ssh.DefaultIdentities, nil)\n}\n\nfunc (s *SSHCommandSuite) command(args ...string) *ssh.Cmd {\n\treturn s.commandOptions(args, nil)\n}\n\nfunc (s *SSHCommandSuite) commandOptions(args []string, opts *ssh.Options) *ssh.Cmd {\n\treturn s.client.Command(\"localhost\", args, opts)\n}\n\nfunc (s *SSHCommandSuite) assertCommandArgs(c *gc.C, cmd *ssh.Cmd, expected string) {\n\tout, err := cmd.Output()\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(strings.TrimSpace(string(out)), gc.Equals, expected)\n}\n\nfunc (s *SSHCommandSuite) TestDefaultClient(c *gc.C) {\n\tssh.InitDefaultClient()\n\tc.Assert(ssh.DefaultClient, gc.FitsTypeOf, &ssh.OpenSSHClient{})\n\ts.PatchEnvironment(\"PATH\", \"\")\n\tssh.InitDefaultClient()\n\tc.Assert(ssh.DefaultClient, gc.FitsTypeOf, &ssh.GoCryptoClient{})\n}\n\nfunc (s *SSHCommandSuite) TestCommandSSHPass(c *gc.C) {\n\t\/\/ First create a fake sshpass, but don't set $SSHPASS\n\tfakesshpass := filepath.Join(s.testbin, \"sshpass\")\n\terr := ioutil.WriteFile(fakesshpass, []byte(s.echoScript), 0755)\n\ts.assertCommandArgs(c, s.command(s.echoCommand, \"123\"),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no localhost %s 123\",\n\t\t\ts.fakessh, s.echoCommand),\n\t)\n\t\/\/ Now set $SSHPASS.\n\ts.PatchEnvironment(\"SSHPASS\", \"anyoldthing\")\n\ts.assertCommandArgs(c, s.command(s.echoCommand, \"123\"),\n\t\tfmt.Sprintf(\"%s -e ssh -o StrictHostKeyChecking no -o PasswordAuthentication no localhost %s 123\",\n\t\t\tfakesshpass, s.echoCommand),\n\t)\n\t\/\/ Finally, remove sshpass from $PATH.\n\terr = os.Remove(fakesshpass)\n\tc.Assert(err, gc.IsNil)\n\ts.assertCommandArgs(c, s.command(s.echoCommand, \"123\"),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no localhost %s 123\",\n\t\t\ts.fakessh, s.echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommand(c *gc.C) {\n\ts.assertCommandArgs(c, s.command(s.echoCommand, \"123\"),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no localhost %s 123\",\n\t\t\ts.fakessh, s.echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommandEnablePTY(c *gc.C) {\n\tvar opts ssh.Options\n\topts.EnablePTY()\n\ts.assertCommandArgs(c, s.commandOptions([]string{s.echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no -t -t localhost %s 123\",\n\t\t\ts.fakessh, s.echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommandAllowPasswordAuthentication(c *gc.C) {\n\tvar opts ssh.Options\n\topts.AllowPasswordAuthentication()\n\ts.assertCommandArgs(c, s.commandOptions([]string{s.echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no localhost %s 123\",\n\t\t\ts.fakessh, s.echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommandIdentities(c *gc.C) {\n\tvar opts ssh.Options\n\topts.SetIdentities(\"x\", \"y\")\n\ts.assertCommandArgs(c, s.commandOptions([]string{s.echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no -i x -i y localhost %s 123\",\n\t\t\ts.fakessh, s.echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommandPort(c *gc.C) {\n\tvar opts ssh.Options\n\topts.SetPort(2022)\n\ts.assertCommandArgs(c, s.commandOptions([]string{s.echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no -p 2022 localhost %s 123\",\n\t\t\ts.fakessh, s.echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCopy(c *gc.C) {\n\tvar opts ssh.Options\n\topts.EnablePTY()\n\topts.AllowPasswordAuthentication()\n\topts.SetIdentities(\"x\", \"y\")\n\topts.SetPort(2022)\n\terr := s.client.Copy([]string{\"\/tmp\/blah\", \"foo@bar.com:baz\"}, &opts)\n\tc.Assert(err, gc.IsNil)\n\tout, err := ioutil.ReadFile(s.fakescp + \".args\")\n\tc.Assert(err, gc.IsNil)\n\t\/\/ EnablePTY has no effect for Copy\n\tc.Assert(string(out), gc.Equals, s.fakescp+\" -o StrictHostKeyChecking no -i x -i y -P 2022 \/tmp\/blah foo@bar.com:baz\\n\")\n\n\t\/\/ Try passing extra args\n\terr = s.client.Copy([]string{\"\/tmp\/blah\", \"foo@bar.com:baz\", \"-r\", \"-v\"}, &opts)\n\tc.Assert(err, gc.IsNil)\n\tout, err = ioutil.ReadFile(s.fakescp + \".args\")\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(string(out), gc.Equals, s.fakescp+\" -o StrictHostKeyChecking no -i x -i y -P 2022 \/tmp\/blah foo@bar.com:baz -r -v\\n\")\n\n\t\/\/ Try interspersing extra args\n\terr = s.client.Copy([]string{\"-r\", \"\/tmp\/blah\", \"-v\", \"foo@bar.com:baz\"}, &opts)\n\tc.Assert(err, gc.IsNil)\n\tout, err = ioutil.ReadFile(s.fakescp + \".args\")\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(string(out), gc.Equals, s.fakescp+\" -o StrictHostKeyChecking no -i x -i y -P 2022 -r \/tmp\/blah -v foo@bar.com:baz\\n\")\n}\n\nfunc (s *SSHCommandSuite) TestCommandClientKeys(c *gc.C) {\n\tdefer overrideGenerateKey(c).Restore()\n\tclientKeysDir := c.MkDir()\n\tdefer ssh.ClearClientKeys()\n\terr := ssh.LoadClientKeys(clientKeysDir)\n\tc.Assert(err, gc.IsNil)\n\tck := filepath.Join(clientKeysDir, \"juju_id_rsa\")\n\tvar opts ssh.Options\n\topts.SetIdentities(\"x\", \"y\")\n\ts.assertCommandArgs(c, s.commandOptions([]string{s.echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no -i x -i y -i %s localhost %s 123\",\n\t\t\ts.fakessh, ck, s.echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommandError(c *gc.C) {\n\tvar opts ssh.Options\n\terr := ioutil.WriteFile(s.fakessh, []byte(\"#!\/bin\/sh\\nexit 42\"), 0755)\n\tc.Assert(err, gc.IsNil)\n\tcommand := s.client.Command(\"ignored\", []string{s.echoCommand, \"foo\"}, &opts)\n\terr = command.Run()\n\tc.Assert(cmd.IsRcPassthroughError(err), gc.Equals, true)\n}\n\nfunc (s *SSHCommandSuite) TestCommandDefaultIdentities(c *gc.C) {\n\tvar opts ssh.Options\n\ttempdir := c.MkDir()\n\tdef1 := filepath.Join(tempdir, \"def1\")\n\tdef2 := filepath.Join(tempdir, \"def2\")\n\ts.PatchValue(ssh.DefaultIdentities, []string{def1, def2})\n\t\/\/ If no identities are specified, then the defaults aren't added.\n\ts.assertCommandArgs(c, s.commandOptions([]string{s.echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no localhost %s 123\",\n\t\t\ts.fakessh, s.echoCommand),\n\t)\n\t\/\/ If identities are specified, then the defaults are must added.\n\t\/\/ Only the defaults that exist on disk will be added.\n\terr := ioutil.WriteFile(def2, nil, 0644)\n\tc.Assert(err, gc.IsNil)\n\topts.SetIdentities(\"x\", \"y\")\n\ts.assertCommandArgs(c, s.commandOptions([]string{s.echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no -i x -i y -i %s localhost %s 123\",\n\t\t\ts.fakessh, def2, s.echoCommand),\n\t)\n}\nConstant echo commands.\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage ssh_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/juju\/testing\"\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"launchpad.net\/juju-core\/cmd\"\n\t\"launchpad.net\/juju-core\/utils\/ssh\"\n)\n\nconst (\n\techoCommand = \"\/bin\/echo\"\n\techoScript = \"#!\/bin\/sh\\n\" + echoCommand + \" $0 \\\"$@\\\" | \/usr\/bin\/tee $0.args\"\n)\n\ntype SSHCommandSuite struct {\n\ttesting.IsolationSuite\n\toriginalPath string\n\ttestbin string\n\tfakessh string\n\tfakescp string\n\tclient ssh.Client\n}\n\nvar _ = gc.Suite(&SSHCommandSuite{})\n\nfunc (s *SSHCommandSuite) SetUpTest(c *gc.C) {\n\ts.IsolationSuite.SetUpTest(c)\n\ts.testbin = c.MkDir()\n\ts.fakessh = filepath.Join(s.testbin, \"ssh\")\n\ts.fakescp = filepath.Join(s.testbin, \"scp\")\n\terr := ioutil.WriteFile(s.fakessh, []byte(echoScript), 0755)\n\tc.Assert(err, gc.IsNil)\n\terr = ioutil.WriteFile(s.fakescp, []byte(echoScript), 0755)\n\tc.Assert(err, gc.IsNil)\n\ts.PatchEnvPathPrepend(s.testbin)\n\ts.client, err = ssh.NewOpenSSHClient()\n\tc.Assert(err, gc.IsNil)\n\ts.PatchValue(ssh.DefaultIdentities, nil)\n}\n\nfunc (s *SSHCommandSuite) command(args ...string) *ssh.Cmd {\n\treturn s.commandOptions(args, nil)\n}\n\nfunc (s *SSHCommandSuite) commandOptions(args []string, opts *ssh.Options) *ssh.Cmd {\n\treturn s.client.Command(\"localhost\", args, opts)\n}\n\nfunc (s *SSHCommandSuite) assertCommandArgs(c *gc.C, cmd *ssh.Cmd, expected string) {\n\tout, err := cmd.Output()\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(strings.TrimSpace(string(out)), gc.Equals, expected)\n}\n\nfunc (s *SSHCommandSuite) TestDefaultClient(c *gc.C) {\n\tssh.InitDefaultClient()\n\tc.Assert(ssh.DefaultClient, gc.FitsTypeOf, &ssh.OpenSSHClient{})\n\ts.PatchEnvironment(\"PATH\", \"\")\n\tssh.InitDefaultClient()\n\tc.Assert(ssh.DefaultClient, gc.FitsTypeOf, &ssh.GoCryptoClient{})\n}\n\nfunc (s *SSHCommandSuite) TestCommandSSHPass(c *gc.C) {\n\t\/\/ First create a fake sshpass, but don't set $SSHPASS\n\tfakesshpass := filepath.Join(s.testbin, \"sshpass\")\n\terr := ioutil.WriteFile(fakesshpass, []byte(echoScript), 0755)\n\ts.assertCommandArgs(c, s.command(echoCommand, \"123\"),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n\t\/\/ Now set $SSHPASS.\n\ts.PatchEnvironment(\"SSHPASS\", \"anyoldthing\")\n\ts.assertCommandArgs(c, s.command(echoCommand, \"123\"),\n\t\tfmt.Sprintf(\"%s -e ssh -o StrictHostKeyChecking no -o PasswordAuthentication no localhost %s 123\",\n\t\t\tfakesshpass, echoCommand),\n\t)\n\t\/\/ Finally, remove sshpass from $PATH.\n\terr = os.Remove(fakesshpass)\n\tc.Assert(err, gc.IsNil)\n\ts.assertCommandArgs(c, s.command(echoCommand, \"123\"),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommand(c *gc.C) {\n\ts.assertCommandArgs(c, s.command(echoCommand, \"123\"),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommandEnablePTY(c *gc.C) {\n\tvar opts ssh.Options\n\topts.EnablePTY()\n\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no -t -t localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommandAllowPasswordAuthentication(c *gc.C) {\n\tvar opts ssh.Options\n\topts.AllowPasswordAuthentication()\n\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommandIdentities(c *gc.C) {\n\tvar opts ssh.Options\n\topts.SetIdentities(\"x\", \"y\")\n\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no -i x -i y localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommandPort(c *gc.C) {\n\tvar opts ssh.Options\n\topts.SetPort(2022)\n\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no -p 2022 localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCopy(c *gc.C) {\n\tvar opts ssh.Options\n\topts.EnablePTY()\n\topts.AllowPasswordAuthentication()\n\topts.SetIdentities(\"x\", \"y\")\n\topts.SetPort(2022)\n\terr := s.client.Copy([]string{\"\/tmp\/blah\", \"foo@bar.com:baz\"}, &opts)\n\tc.Assert(err, gc.IsNil)\n\tout, err := ioutil.ReadFile(s.fakescp + \".args\")\n\tc.Assert(err, gc.IsNil)\n\t\/\/ EnablePTY has no effect for Copy\n\tc.Assert(string(out), gc.Equals, s.fakescp+\" -o StrictHostKeyChecking no -i x -i y -P 2022 \/tmp\/blah foo@bar.com:baz\\n\")\n\n\t\/\/ Try passing extra args\n\terr = s.client.Copy([]string{\"\/tmp\/blah\", \"foo@bar.com:baz\", \"-r\", \"-v\"}, &opts)\n\tc.Assert(err, gc.IsNil)\n\tout, err = ioutil.ReadFile(s.fakescp + \".args\")\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(string(out), gc.Equals, s.fakescp+\" -o StrictHostKeyChecking no -i x -i y -P 2022 \/tmp\/blah foo@bar.com:baz -r -v\\n\")\n\n\t\/\/ Try interspersing extra args\n\terr = s.client.Copy([]string{\"-r\", \"\/tmp\/blah\", \"-v\", \"foo@bar.com:baz\"}, &opts)\n\tc.Assert(err, gc.IsNil)\n\tout, err = ioutil.ReadFile(s.fakescp + \".args\")\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(string(out), gc.Equals, s.fakescp+\" -o StrictHostKeyChecking no -i x -i y -P 2022 -r \/tmp\/blah -v foo@bar.com:baz\\n\")\n}\n\nfunc (s *SSHCommandSuite) TestCommandClientKeys(c *gc.C) {\n\tdefer overrideGenerateKey(c).Restore()\n\tclientKeysDir := c.MkDir()\n\tdefer ssh.ClearClientKeys()\n\terr := ssh.LoadClientKeys(clientKeysDir)\n\tc.Assert(err, gc.IsNil)\n\tck := filepath.Join(clientKeysDir, \"juju_id_rsa\")\n\tvar opts ssh.Options\n\topts.SetIdentities(\"x\", \"y\")\n\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no -i x -i y -i %s localhost %s 123\",\n\t\t\ts.fakessh, ck, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommandError(c *gc.C) {\n\tvar opts ssh.Options\n\terr := ioutil.WriteFile(s.fakessh, []byte(\"#!\/bin\/sh\\nexit 42\"), 0755)\n\tc.Assert(err, gc.IsNil)\n\tcommand := s.client.Command(\"ignored\", []string{echoCommand, \"foo\"}, &opts)\n\terr = command.Run()\n\tc.Assert(cmd.IsRcPassthroughError(err), gc.Equals, true)\n}\n\nfunc (s *SSHCommandSuite) TestCommandDefaultIdentities(c *gc.C) {\n\tvar opts ssh.Options\n\ttempdir := c.MkDir()\n\tdef1 := filepath.Join(tempdir, \"def1\")\n\tdef2 := filepath.Join(tempdir, \"def2\")\n\ts.PatchValue(ssh.DefaultIdentities, []string{def1, def2})\n\t\/\/ If no identities are specified, then the defaults aren't added.\n\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n\t\/\/ If identities are specified, then the defaults are must added.\n\t\/\/ Only the defaults that exist on disk will be added.\n\terr := ioutil.WriteFile(def2, nil, 0644)\n\tc.Assert(err, gc.IsNil)\n\topts.SetIdentities(\"x\", \"y\")\n\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o StrictHostKeyChecking no -o PasswordAuthentication no -i x -i y -i %s localhost %s 123\",\n\t\t\ts.fakessh, def2, echoCommand),\n\t)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/ Author: jacobsa@google.com (Aaron Jacobs)\n\npackage fs\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/syncutil\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"bazil.org\/fuse\"\n\tfusefs \"bazil.org\/fuse\/fs\"\n)\n\n\/\/ A remote object's name and metadata, along with a local temporary file that\n\/\/ contains its contents (when initialized).\n\/\/\n\/\/ TODO(jacobsa): After becoming comfortable with the representation of dir and\n\/\/ its concurrency protection, audit this file and make sure it is up to par.\ntype file struct {\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Dependencies\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tlogger *log.Logger\n\tbucket gcs.Bucket\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Constant data\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tobjectName string\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tmu syncutil.InvariantMutex\n\n\t\/\/ A local temporary file containing the current contents of the logical\n\t\/\/ file. Lazily created. When non-ni, this is authoritative.\n\ttempFile *os.File \/\/ GUARDED_BY(mu)\n\n\t\/\/ Set to true when we need to flush tempFile to GCS before allowing the user\n\t\/\/ to successfully close the file. false implies that the GCS object is up to\n\t\/\/ date (or has been modified only by a foreign machine).\n\t\/\/\n\t\/\/ INVARIANT: If true, then tempFile != nil\n\ttempFileDirty bool \/\/ GUARDED_BY(mu)\n\n\t\/\/ When tempFile == nil, the current size of the object named objectName on\n\t\/\/ GCS, as far as we are aware.\n\t\/\/\n\t\/\/ INVARIANT: If tempFile != nil, then remoteSize == 0\n\tremoteSize uint64 \/\/ GUARDED_BY(mu)\n}\n\n\/\/ Make sure file implements the interfaces we think it does.\nvar (\n\t_ fusefs.Node = &file{}\n\n\t_ fusefs.Handle = &file{}\n\t_ fusefs.HandleFlusher = &file{}\n\t_ fusefs.HandleReader = &file{}\n\t_ fusefs.HandleReleaser = &file{}\n\t_ fusefs.HandleWriter = &file{}\n)\n\nfunc newFile(\n\tlogger *log.Logger,\n\tbucket gcs.Bucket,\n\tobjectName string,\n\tremoteSize uint64) *file {\n\tf := &file{\n\t\tlogger: logger,\n\t\tbucket: bucket,\n\t\tobjectName: objectName,\n\t\tremoteSize: remoteSize,\n\t}\n\n\tf.mu = syncutil.NewInvariantMutex(func() { f.checkInvariants() })\n\n\treturn f\n}\n\nfunc (f *file) checkInvariants() {\n\tif f.tempFileDirty && f.tempFile == nil {\n\t\tpanic(\"Expected !tempFileDirty when tempFile == nil.\")\n\t}\n\n\tif f.tempFile != nil && f.remoteSize != 0 {\n\t\tpanic(\"Expected remoteSize == 0 when tempFile != nil.\")\n\t}\n}\n\nfunc (f *file) Attr() fuse.Attr {\n\treturn fuse.Attr{\n\t\t\/\/ TODO(jacobsa): Expose ACLs from GCS?\n\t\tMode: 0400,\n\t\t\/\/ TODO(jacobsa): Catch the bug here (that this may be wrong when\n\t\t\/\/ f.tempFile != nil) with a test, then fix it.\n\t\tSize: f.remoteSize,\n\t}\n}\n\n\/\/ If the file contents have not yet been fetched to a temporary file, fetch\n\/\/ them.\n\/\/\n\/\/ EXCLUSIVE_LOCKS_REQUIRED(f.mu)\nfunc (f *file) ensureTempFile(ctx context.Context) error {\n\t\/\/ Do we already have a file?\n\tif f.tempFile != nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Create a temporary file.\n\ttempFile, err := ioutil.TempFile(\"\", \"gcsfuse\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"ioutil.TempFile: %v\", err)\n\t}\n\n\t\/\/ Create a reader for the object.\n\treadCloser, err := f.bucket.NewReader(ctx, f.objectName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"bucket.NewReader: %v\", err)\n\t}\n\n\tdefer readCloser.Close()\n\n\t\/\/ Copy the object contents into the file.\n\tif _, err := io.Copy(tempFile, readCloser); err != nil {\n\t\treturn fmt.Errorf(\"io.Copy: %v\", err)\n\t}\n\n\t\/\/ Save the file for later.\n\tf.tempFile = tempFile\n\n\t\/\/ remoteSize is no longer authoritative.\n\tf.remoteSize = 0\n\n\treturn nil\n}\n\n\/\/ Throw away the local temporary file, if any.\n\/\/\n\/\/ LOCKS_EXCLUDED(f.mu)\nfunc (f *file) Release(ctx context.Context, req *fuse.ReleaseRequest) error {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\t\/\/ Is there a file to close?\n\tif f.tempFile == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Close it, after grabbing its path.\n\tpath := f.tempFile.Name()\n\tif err := f.tempFile.Close(); err != nil {\n\t\tf.logger.Println(\"Error closing temp file:\", err)\n\t}\n\n\t\/\/ Attempt to delete it.\n\tif err := os.Remove(path); err != nil {\n\t\tf.logger.Println(\"Error deleting temp file:\", err)\n\t}\n\n\tf.tempFile = nil\n\treturn nil\n}\n\n\/\/ Ensure that the local temporary file is initialized, then read from it.\n\/\/\n\/\/ LOCKS_EXCLUDED(f.mu)\nfunc (f *file) Read(\n\tctx context.Context,\n\treq *fuse.ReadRequest,\n\tresp *fuse.ReadResponse) error {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\t\/\/ Ensure the temp file is present.\n\tif err := f.ensureTempFile(ctx); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Allocate a response buffer.\n\tresp.Data = make([]byte, req.Size)\n\n\t\/\/ Read the data.\n\tn, err := f.tempFile.ReadAt(resp.Data, req.Offset)\n\tresp.Data = resp.Data[:n]\n\n\t\/\/ Special case: read(2) doesn't return EOF errors.\n\tif err == io.EOF {\n\t\terr = nil\n\t}\n\n\treturn err\n}\n\n\/\/ Ensure that the local temporary file is initialized, then write to it.\n\/\/\n\/\/ LOCKS_EXCLUDED(f.mu)\nfunc (f *file) Write(\n\tctx context.Context,\n\treq *fuse.WriteRequest,\n\tresp *fuse.WriteResponse) (err error) {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\t\/\/ Ensure the temp file is present. If it's not, grab the current contents\n\t\/\/ from GCS.\n\tif err = f.ensureTempFile(ctx); err != nil {\n\t\terr = fmt.Errorf(\"ensureTempFile: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Write to the temp file.\n\tresp.Size, err = f.tempFile.WriteAt(req.Data, req.Offset)\n\treturn\n}\n\n\/\/ Put the temporary file back in the bucket if it's dirty.\n\/\/\n\/\/ LOCKS_EXCLUDED(f.mu)\nfunc (f *file) Flush(\n\tctx context.Context,\n\treq *fuse.FlushRequest) (err error) {\n\t\/\/ Is there anything interesting for us to do?\n\tif !f.tempFileDirty {\n\t\treturn\n\t}\n\n\terr = errors.New(\"TODO(jacobsa): file.Flush.\")\n\treturn\n}\nFixed handling of tempFileDirty.\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/ Author: jacobsa@google.com (Aaron Jacobs)\n\npackage fs\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/syncutil\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"bazil.org\/fuse\"\n\tfusefs \"bazil.org\/fuse\/fs\"\n)\n\n\/\/ A remote object's name and metadata, along with a local temporary file that\n\/\/ contains its contents (when initialized).\n\/\/\n\/\/ TODO(jacobsa): After becoming comfortable with the representation of dir and\n\/\/ its concurrency protection, audit this file and make sure it is up to par.\ntype file struct {\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Dependencies\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tlogger *log.Logger\n\tbucket gcs.Bucket\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Constant data\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tobjectName string\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tmu syncutil.InvariantMutex\n\n\t\/\/ A local temporary file containing the current contents of the logical\n\t\/\/ file. Lazily created. When non-ni, this is authoritative.\n\ttempFile *os.File \/\/ GUARDED_BY(mu)\n\n\t\/\/ Set to true when we need to flush tempFile to GCS before allowing the user\n\t\/\/ to successfully close the file. false implies that the GCS object is up to\n\t\/\/ date (or has been modified only by a foreign machine).\n\t\/\/\n\t\/\/ INVARIANT: If true, then tempFile != nil\n\ttempFileDirty bool \/\/ GUARDED_BY(mu)\n\n\t\/\/ When tempFile == nil, the current size of the object named objectName on\n\t\/\/ GCS, as far as we are aware.\n\t\/\/\n\t\/\/ INVARIANT: If tempFile != nil, then remoteSize == 0\n\tremoteSize uint64 \/\/ GUARDED_BY(mu)\n}\n\n\/\/ Make sure file implements the interfaces we think it does.\nvar (\n\t_ fusefs.Node = &file{}\n\n\t_ fusefs.Handle = &file{}\n\t_ fusefs.HandleFlusher = &file{}\n\t_ fusefs.HandleReader = &file{}\n\t_ fusefs.HandleReleaser = &file{}\n\t_ fusefs.HandleWriter = &file{}\n)\n\nfunc newFile(\n\tlogger *log.Logger,\n\tbucket gcs.Bucket,\n\tobjectName string,\n\tremoteSize uint64) *file {\n\tf := &file{\n\t\tlogger: logger,\n\t\tbucket: bucket,\n\t\tobjectName: objectName,\n\t\tremoteSize: remoteSize,\n\t}\n\n\tf.mu = syncutil.NewInvariantMutex(func() { f.checkInvariants() })\n\n\treturn f\n}\n\nfunc (f *file) checkInvariants() {\n\tif f.tempFileDirty && f.tempFile == nil {\n\t\tpanic(\"Expected !tempFileDirty when tempFile == nil.\")\n\t}\n\n\tif f.tempFile != nil && f.remoteSize != 0 {\n\t\tpanic(\"Expected remoteSize == 0 when tempFile != nil.\")\n\t}\n}\n\nfunc (f *file) Attr() fuse.Attr {\n\treturn fuse.Attr{\n\t\t\/\/ TODO(jacobsa): Expose ACLs from GCS?\n\t\tMode: 0400,\n\t\t\/\/ TODO(jacobsa): Catch the bug here (that this may be wrong when\n\t\t\/\/ f.tempFile != nil) with a test, then fix it.\n\t\tSize: f.remoteSize,\n\t}\n}\n\n\/\/ If the file contents have not yet been fetched to a temporary file, fetch\n\/\/ them.\n\/\/\n\/\/ EXCLUSIVE_LOCKS_REQUIRED(f.mu)\nfunc (f *file) ensureTempFile(ctx context.Context) error {\n\t\/\/ Do we already have a file?\n\tif f.tempFile != nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Create a temporary file.\n\ttempFile, err := ioutil.TempFile(\"\", \"gcsfuse\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"ioutil.TempFile: %v\", err)\n\t}\n\n\t\/\/ Create a reader for the object.\n\treadCloser, err := f.bucket.NewReader(ctx, f.objectName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"bucket.NewReader: %v\", err)\n\t}\n\n\tdefer readCloser.Close()\n\n\t\/\/ Copy the object contents into the file.\n\tif _, err := io.Copy(tempFile, readCloser); err != nil {\n\t\treturn fmt.Errorf(\"io.Copy: %v\", err)\n\t}\n\n\t\/\/ Save the file for later.\n\tf.tempFile = tempFile\n\n\t\/\/ remoteSize is no longer authoritative.\n\tf.remoteSize = 0\n\n\treturn nil\n}\n\n\/\/ Throw away the local temporary file, if any.\n\/\/\n\/\/ LOCKS_EXCLUDED(f.mu)\nfunc (f *file) Release(ctx context.Context, req *fuse.ReleaseRequest) error {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\t\/\/ Is there a file to close?\n\tif f.tempFile == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Close it, after grabbing its path.\n\tpath := f.tempFile.Name()\n\tif err := f.tempFile.Close(); err != nil {\n\t\tf.logger.Println(\"Error closing temp file:\", err)\n\t}\n\n\t\/\/ Attempt to delete it.\n\tif err := os.Remove(path); err != nil {\n\t\tf.logger.Println(\"Error deleting temp file:\", err)\n\t}\n\n\tf.tempFile = nil\n\tf.tempFileDirty = false\n\n\treturn nil\n}\n\n\/\/ Ensure that the local temporary file is initialized, then read from it.\n\/\/\n\/\/ LOCKS_EXCLUDED(f.mu)\nfunc (f *file) Read(\n\tctx context.Context,\n\treq *fuse.ReadRequest,\n\tresp *fuse.ReadResponse) error {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\t\/\/ Ensure the temp file is present.\n\tif err := f.ensureTempFile(ctx); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Allocate a response buffer.\n\tresp.Data = make([]byte, req.Size)\n\n\t\/\/ Read the data.\n\tn, err := f.tempFile.ReadAt(resp.Data, req.Offset)\n\tresp.Data = resp.Data[:n]\n\n\t\/\/ Special case: read(2) doesn't return EOF errors.\n\tif err == io.EOF {\n\t\terr = nil\n\t}\n\n\treturn err\n}\n\n\/\/ Ensure that the local temporary file is initialized, then write to it.\n\/\/\n\/\/ LOCKS_EXCLUDED(f.mu)\nfunc (f *file) Write(\n\tctx context.Context,\n\treq *fuse.WriteRequest,\n\tresp *fuse.WriteResponse) (err error) {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\t\/\/ Ensure the temp file is present. If it's not, grab the current contents\n\t\/\/ from GCS.\n\tif err = f.ensureTempFile(ctx); err != nil {\n\t\terr = fmt.Errorf(\"ensureTempFile: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Mark us dirty.\n\tf.tempFileDirty = true\n\n\t\/\/ Write to the temp file.\n\tresp.Size, err = f.tempFile.WriteAt(req.Data, req.Offset)\n\n\treturn\n}\n\n\/\/ Put the temporary file back in the bucket if it's dirty.\n\/\/\n\/\/ LOCKS_EXCLUDED(f.mu)\nfunc (f *file) Flush(\n\tctx context.Context,\n\treq *fuse.FlushRequest) (err error) {\n\t\/\/ Is there anything interesting for us to do?\n\tif !f.tempFileDirty {\n\t\treturn\n\t}\n\n\terr = errors.New(\"TODO(jacobsa): file.Flush.\")\n\treturn\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2014 tsuru-client authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"github.com\/tsuru\/tsuru\/cmd\"\n\t\"github.com\/tsuru\/tsuru\/cmd\/testing\"\n\t\"github.com\/tsuru\/tsuru\/cmd\/tsuru-base\"\n\t\"launchpad.net\/gocheck\"\n)\n\nfunc (s *S) TestDeployInfo(c *gocheck.C) {\n\tdesc := `Deploys set of files and\/or directories to tsuru server. Some examples of calls are:\n\ntsuru deploy .\ntsuru deploy myfile.jar Procfile\n`\n\texpected := &cmd.Info{\n\t\tName: \"deploy\",\n\t\tUsage: \"deploy [-a\/--app ] [file-or-dir-2] ... [file-or-dir-n]\",\n\t\tDesc: desc,\n\t\tMinArgs: 1,\n\t}\n\tcmd := deploy{}\n\tc.Assert(cmd.Info(), gocheck.DeepEquals, expected)\n}\n\nfunc (s *S) TestDeployRun(c *gocheck.C) {\n\tvar called bool\n\tvar buf bytes.Buffer\n\terr := targz(nil, &buf, \"testdata\")\n\tc.Assert(err, gocheck.IsNil)\n\ttrans := testing.ConditionalTransport{\n\t\tTransport: testing.Transport{Message: \"deploy worked\\nOK\\n\", Status: http.StatusOK},\n\t\tCondFunc: func(req *http.Request) bool {\n\t\t\tdefer req.Body.Close()\n\t\t\tcalled = true\n\t\t\tfile, _, err := req.FormFile(\"file\")\n\t\t\tc.Assert(err, gocheck.IsNil)\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tc.Assert(err, gocheck.IsNil)\n\t\t\tc.Assert(content, gocheck.DeepEquals, buf.Bytes())\n\t\t\treturn req.Method == \"POST\" && req.URL.Path == \"\/apps\/secret\/deploy\"\n\t\t},\n\t}\n\tclient := cmd.NewClient(&http.Client{Transport: &trans}, nil, manager)\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t\tArgs: []string{\"testdata\", \"..\"},\n\t}\n\tfake := FakeGuesser{name: \"secret\"}\n\tguessCommand := tsuru.GuessingCommand{G: &fake}\n\tcmd := deploy{GuessingCommand: guessCommand}\n\terr = cmd.Run(&context, client)\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(called, gocheck.Equals, true)\n}\n\nfunc (s *S) TestDeployRunNotOK(c *gocheck.C) {\n\ttrans := testing.Transport{Message: \"deploy worked\\n\", Status: http.StatusOK}\n\tclient := cmd.NewClient(&http.Client{Transport: &trans}, nil, manager)\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t\tArgs: []string{\"testdata\", \"..\"},\n\t}\n\tfake := FakeGuesser{name: \"secret\"}\n\tguessCommand := tsuru.GuessingCommand{G: &fake}\n\tcommand := deploy{GuessingCommand: guessCommand}\n\terr := command.Run(&context, client)\n\tc.Assert(err, gocheck.Equals, cmd.ErrAbortCommand)\n}\n\nfunc (s *S) TestDeployRunFileNotFound(c *gocheck.C) {\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t\tArgs: []string{\"\/tmp\/something\/that\/doesnt\/really\/exist\/im\/sure\"},\n\t}\n\tfake := FakeGuesser{name: \"secret\"}\n\tguessCommand := tsuru.GuessingCommand{G: &fake}\n\tcommand := deploy{GuessingCommand: guessCommand}\n\terr := command.Run(&context, nil)\n\tc.Assert(err, gocheck.NotNil)\n}\n\nfunc (s *S) TestDeployRunRequestFailure(c *gocheck.C) {\n\ttrans := testing.Transport{Message: \"app not found\\n\", Status: http.StatusNotFound}\n\tclient := cmd.NewClient(&http.Client{Transport: &trans}, nil, manager)\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t\tArgs: []string{\"testdata\", \"..\"},\n\t}\n\tfake := FakeGuesser{name: \"secret\"}\n\tguessCommand := tsuru.GuessingCommand{G: &fake}\n\tcommand := deploy{GuessingCommand: guessCommand}\n\terr := command.Run(&context, client)\n\tc.Assert(err, gocheck.NotNil)\n\tc.Assert(err.Error(), gocheck.Equals, \"app not found\\n\")\n}\n\nfunc (s *S) TestTargz(c *gocheck.C) {\n\tvar buf bytes.Buffer\n\tctx := cmd.Context{Stderr: &buf}\n\tvar gzipBuf, tarBuf bytes.Buffer\n\terr := targz(&ctx, &gzipBuf, \"testdata\", \"..\")\n\tc.Assert(err, gocheck.IsNil)\n\tgzipReader, err := gzip.NewReader(&gzipBuf)\n\tc.Assert(err, gocheck.IsNil)\n\t_, err = io.Copy(&tarBuf, gzipReader)\n\tc.Assert(err, gocheck.IsNil)\n\ttarReader := tar.NewReader(&tarBuf)\n\tvar headers []string\n\tvar contents []string\n\tfor header, err := tarReader.Next(); err == nil; header, err = tarReader.Next() {\n\t\theaders = append(headers, header.Name)\n\t\tif !header.FileInfo().IsDir() {\n\t\t\tcontent, err := ioutil.ReadAll(tarReader)\n\t\t\tc.Assert(err, gocheck.IsNil)\n\t\t\tcontents = append(contents, string(content))\n\t\t}\n\t}\n\texpected := []string{\n\t\t\"testdata\", \"testdata\/directory\", \"testdata\/directory\/file.txt\",\n\t\t\"testdata\/file1.txt\", \"testdata\/file2.txt\",\n\t}\n\tc.Assert(headers, gocheck.DeepEquals, expected)\n\texpectedContents := []string{\"wat\\n\", \"something happened\\n\", \"twice\\n\"}\n\tc.Assert(contents, gocheck.DeepEquals, expectedContents)\n\tc.Assert(buf.String(), gocheck.Equals, `Warning: skipping \"..\"`)\n}\n\nfunc (s *S) TestTargzFailure(c *gocheck.C) {\n\tvar stderr bytes.Buffer\n\tctx := cmd.Context{Stderr: &stderr}\n\tvar buf bytes.Buffer\n\terr := targz(&ctx, &buf, \"\/tmp\/something\/that\/definitely\/doesnt\/exist\/right\", \"testdata\")\n\tc.Assert(err, gocheck.NotNil)\n\tc.Assert(err.Error(), gocheck.Equals, \"stat \/tmp\/something\/that\/definitely\/doesnt\/exist\/right: no such file or directory\")\n}\ntsuru\/deploy_test: sort strings before comparing\/\/ Copyright 2014 tsuru-client authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"sort\"\n\n\t\"github.com\/tsuru\/tsuru\/cmd\"\n\t\"github.com\/tsuru\/tsuru\/cmd\/testing\"\n\t\"github.com\/tsuru\/tsuru\/cmd\/tsuru-base\"\n\t\"launchpad.net\/gocheck\"\n)\n\nfunc (s *S) TestDeployInfo(c *gocheck.C) {\n\tdesc := `Deploys set of files and\/or directories to tsuru server. Some examples of calls are:\n\ntsuru deploy .\ntsuru deploy myfile.jar Procfile\n`\n\texpected := &cmd.Info{\n\t\tName: \"deploy\",\n\t\tUsage: \"deploy [-a\/--app ] [file-or-dir-2] ... [file-or-dir-n]\",\n\t\tDesc: desc,\n\t\tMinArgs: 1,\n\t}\n\tcmd := deploy{}\n\tc.Assert(cmd.Info(), gocheck.DeepEquals, expected)\n}\n\nfunc (s *S) TestDeployRun(c *gocheck.C) {\n\tvar called bool\n\tvar buf bytes.Buffer\n\terr := targz(nil, &buf, \"testdata\")\n\tc.Assert(err, gocheck.IsNil)\n\ttrans := testing.ConditionalTransport{\n\t\tTransport: testing.Transport{Message: \"deploy worked\\nOK\\n\", Status: http.StatusOK},\n\t\tCondFunc: func(req *http.Request) bool {\n\t\t\tdefer req.Body.Close()\n\t\t\tcalled = true\n\t\t\tfile, _, err := req.FormFile(\"file\")\n\t\t\tc.Assert(err, gocheck.IsNil)\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tc.Assert(err, gocheck.IsNil)\n\t\t\tc.Assert(content, gocheck.DeepEquals, buf.Bytes())\n\t\t\treturn req.Method == \"POST\" && req.URL.Path == \"\/apps\/secret\/deploy\"\n\t\t},\n\t}\n\tclient := cmd.NewClient(&http.Client{Transport: &trans}, nil, manager)\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t\tArgs: []string{\"testdata\", \"..\"},\n\t}\n\tfake := FakeGuesser{name: \"secret\"}\n\tguessCommand := tsuru.GuessingCommand{G: &fake}\n\tcmd := deploy{GuessingCommand: guessCommand}\n\terr = cmd.Run(&context, client)\n\tc.Assert(err, gocheck.IsNil)\n\tc.Assert(called, gocheck.Equals, true)\n}\n\nfunc (s *S) TestDeployRunNotOK(c *gocheck.C) {\n\ttrans := testing.Transport{Message: \"deploy worked\\n\", Status: http.StatusOK}\n\tclient := cmd.NewClient(&http.Client{Transport: &trans}, nil, manager)\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t\tArgs: []string{\"testdata\", \"..\"},\n\t}\n\tfake := FakeGuesser{name: \"secret\"}\n\tguessCommand := tsuru.GuessingCommand{G: &fake}\n\tcommand := deploy{GuessingCommand: guessCommand}\n\terr := command.Run(&context, client)\n\tc.Assert(err, gocheck.Equals, cmd.ErrAbortCommand)\n}\n\nfunc (s *S) TestDeployRunFileNotFound(c *gocheck.C) {\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t\tArgs: []string{\"\/tmp\/something\/that\/doesnt\/really\/exist\/im\/sure\"},\n\t}\n\tfake := FakeGuesser{name: \"secret\"}\n\tguessCommand := tsuru.GuessingCommand{G: &fake}\n\tcommand := deploy{GuessingCommand: guessCommand}\n\terr := command.Run(&context, nil)\n\tc.Assert(err, gocheck.NotNil)\n}\n\nfunc (s *S) TestDeployRunRequestFailure(c *gocheck.C) {\n\ttrans := testing.Transport{Message: \"app not found\\n\", Status: http.StatusNotFound}\n\tclient := cmd.NewClient(&http.Client{Transport: &trans}, nil, manager)\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t\tArgs: []string{\"testdata\", \"..\"},\n\t}\n\tfake := FakeGuesser{name: \"secret\"}\n\tguessCommand := tsuru.GuessingCommand{G: &fake}\n\tcommand := deploy{GuessingCommand: guessCommand}\n\terr := command.Run(&context, client)\n\tc.Assert(err, gocheck.NotNil)\n\tc.Assert(err.Error(), gocheck.Equals, \"app not found\\n\")\n}\n\nfunc (s *S) TestTargz(c *gocheck.C) {\n\tvar buf bytes.Buffer\n\tctx := cmd.Context{Stderr: &buf}\n\tvar gzipBuf, tarBuf bytes.Buffer\n\terr := targz(&ctx, &gzipBuf, \"testdata\", \"..\")\n\tc.Assert(err, gocheck.IsNil)\n\tgzipReader, err := gzip.NewReader(&gzipBuf)\n\tc.Assert(err, gocheck.IsNil)\n\t_, err = io.Copy(&tarBuf, gzipReader)\n\tc.Assert(err, gocheck.IsNil)\n\ttarReader := tar.NewReader(&tarBuf)\n\tvar headers []string\n\tvar contents []string\n\tfor header, err := tarReader.Next(); err == nil; header, err = tarReader.Next() {\n\t\theaders = append(headers, header.Name)\n\t\tif !header.FileInfo().IsDir() {\n\t\t\tcontent, err := ioutil.ReadAll(tarReader)\n\t\t\tc.Assert(err, gocheck.IsNil)\n\t\t\tcontents = append(contents, string(content))\n\t\t}\n\t}\n\texpected := []string{\n\t\t\"testdata\", \"testdata\/directory\", \"testdata\/directory\/file.txt\",\n\t\t\"testdata\/file1.txt\", \"testdata\/file2.txt\",\n\t}\n\tsort.Strings(expected)\n\tsort.Strings(headers)\n\tc.Assert(headers, gocheck.DeepEquals, expected)\n\texpectedContents := []string{\"wat\\n\", \"something happened\\n\", \"twice\\n\"}\n\tsort.Strings(expectedContents)\n\tsort.Strings(contents)\n\tc.Assert(contents, gocheck.DeepEquals, expectedContents)\n\tc.Assert(buf.String(), gocheck.Equals, `Warning: skipping \"..\"`)\n}\n\nfunc (s *S) TestTargzFailure(c *gocheck.C) {\n\tvar stderr bytes.Buffer\n\tctx := cmd.Context{Stderr: &stderr}\n\tvar buf bytes.Buffer\n\terr := targz(&ctx, &buf, \"\/tmp\/something\/that\/definitely\/doesnt\/exist\/right\", \"testdata\")\n\tc.Assert(err, gocheck.NotNil)\n\tc.Assert(err.Error(), gocheck.Equals, \"stat \/tmp\/something\/that\/definitely\/doesnt\/exist\/right: no such file or directory\")\n}\n<|endoftext|>"} {"text":"package govuk_crawler_worker_test\n\nimport (\n\t. \"github.com\/alphagov\/govuk_crawler_worker\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"github.com\/fzzy\/radix\/redis\"\n)\n\nvar _ = Describe(\"TTLHashSet\", func() {\n\tprefix := \"govuk_mirror_crawler_test\"\n\n\tIt(\"returns an error when asking for a TTLHashSet object that can't connect to redis\", func() {\n\t\tttlHashSet, err := NewTTLHashSet(prefix, \"127.0.0.1:20000\")\n\n\t\tExpect(err).ToNot(BeNil())\n\t\tExpect(ttlHashSet).To(BeNil())\n\t})\n\n\tDescribe(\"Working with a redis service\", func() {\n\t\tvar (\n\t\t\tttlHashSet *TTLHashSet\n\t\t\tttlHashSetErr error\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tttlHashSet, ttlHashSetErr = NewTTLHashSet(prefix, \"127.0.0.1:6379\")\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tExpect(ttlHashSet.Close()).To(BeNil())\n\t\t\tExpect(purgeAllKeys(prefix, \"127.0.0.1:6379\"))\n\t\t})\n\n\t\tIt(\"should connect successfully with no errors\", func() {\n\t\t\tExpect(ttlHashSetErr).To(BeNil())\n\t\t\tExpect(ttlHashSet).NotTo(BeNil())\n\t\t})\n\n\t\tIt(\"should return false when a key doesn't exist\", func() {\n\t\t\texists, err := ttlHashSet.Exists(\"foobar\")\n\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(exists).To(Equal(false))\n\t\t})\n\n\t\tIt(\"exposes a way of adding a key to redis\", func() {\n\t\t\tkey := \"foo.bar.baz\"\n\t\t\tadded, addedErr := ttlHashSet.Add(key)\n\n\t\t\tExpect(addedErr).To(BeNil())\n\t\t\tExpect(added).To(Equal(true))\n\n\t\t\texists, existsErr := ttlHashSet.Exists(key)\n\n\t\t\tExpect(existsErr).To(BeNil())\n\t\t\tExpect(exists).To(Equal(true))\n\t\t})\n\n\t\tDescribe(\"TTL()\", func() {\n\t\t\tIt(\"should return a negative TTL on a non-existent key\", func() {\n\t\t\t\tttl, err := ttlHashSet.TTL(\"this.key.does.not.exist\")\n\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t\tExpect(ttl).To(Equal(-2))\n\t\t\t})\n\n\t\t\tIt(\"should expose a positive TTL on key that exists\", func() {\n\t\t\t\tkey := \"some.ttl.key\"\n\t\t\t\tadded, addErr := ttlHashSet.Add(key)\n\n\t\t\t\tExpect(addErr).To(BeNil())\n\t\t\t\tExpect(added).To(Equal(true))\n\n\t\t\t\tttl, err := ttlHashSet.TTL(key)\n\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t\tExpect(ttl).To(BeNumerically(\">\", 1000))\n\t\t\t})\n\t\t})\n\t})\n})\n\nfunc purgeAllKeys(prefix string, address string) error {\n\tclient, err := redis.Dial(\"tcp\", address)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkeys, err := client.Cmd(\"KEYS\", prefix + \"*\").List()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treply := client.Cmd(\"DEL\", keys)\n\tif reply.Err != nil {\n\t\treturn reply.Err\n\t}\n\n\treturn nil\n}\nRun code through `go fmt`package govuk_crawler_worker_test\n\nimport (\n\t. \"github.com\/alphagov\/govuk_crawler_worker\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"github.com\/fzzy\/radix\/redis\"\n)\n\nvar _ = Describe(\"TTLHashSet\", func() {\n\tprefix := \"govuk_mirror_crawler_test\"\n\n\tIt(\"returns an error when asking for a TTLHashSet object that can't connect to redis\", func() {\n\t\tttlHashSet, err := NewTTLHashSet(prefix, \"127.0.0.1:20000\")\n\n\t\tExpect(err).ToNot(BeNil())\n\t\tExpect(ttlHashSet).To(BeNil())\n\t})\n\n\tDescribe(\"Working with a redis service\", func() {\n\t\tvar (\n\t\t\tttlHashSet *TTLHashSet\n\t\t\tttlHashSetErr error\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tttlHashSet, ttlHashSetErr = NewTTLHashSet(prefix, \"127.0.0.1:6379\")\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tExpect(ttlHashSet.Close()).To(BeNil())\n\t\t\tExpect(purgeAllKeys(prefix, \"127.0.0.1:6379\"))\n\t\t})\n\n\t\tIt(\"should connect successfully with no errors\", func() {\n\t\t\tExpect(ttlHashSetErr).To(BeNil())\n\t\t\tExpect(ttlHashSet).NotTo(BeNil())\n\t\t})\n\n\t\tIt(\"should return false when a key doesn't exist\", func() {\n\t\t\texists, err := ttlHashSet.Exists(\"foobar\")\n\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(exists).To(Equal(false))\n\t\t})\n\n\t\tIt(\"exposes a way of adding a key to redis\", func() {\n\t\t\tkey := \"foo.bar.baz\"\n\t\t\tadded, addedErr := ttlHashSet.Add(key)\n\n\t\t\tExpect(addedErr).To(BeNil())\n\t\t\tExpect(added).To(Equal(true))\n\n\t\t\texists, existsErr := ttlHashSet.Exists(key)\n\n\t\t\tExpect(existsErr).To(BeNil())\n\t\t\tExpect(exists).To(Equal(true))\n\t\t})\n\n\t\tDescribe(\"TTL()\", func() {\n\t\t\tIt(\"should return a negative TTL on a non-existent key\", func() {\n\t\t\t\tttl, err := ttlHashSet.TTL(\"this.key.does.not.exist\")\n\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t\tExpect(ttl).To(Equal(-2))\n\t\t\t})\n\n\t\t\tIt(\"should expose a positive TTL on key that exists\", func() {\n\t\t\t\tkey := \"some.ttl.key\"\n\t\t\t\tadded, addErr := ttlHashSet.Add(key)\n\n\t\t\t\tExpect(addErr).To(BeNil())\n\t\t\t\tExpect(added).To(Equal(true))\n\n\t\t\t\tttl, err := ttlHashSet.TTL(key)\n\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t\tExpect(ttl).To(BeNumerically(\">\", 1000))\n\t\t\t})\n\t\t})\n\t})\n})\n\nfunc purgeAllKeys(prefix string, address string) error {\n\tclient, err := redis.Dial(\"tcp\", address)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkeys, err := client.Cmd(\"KEYS\", prefix+\"*\").List()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treply := client.Cmd(\"DEL\", keys)\n\tif reply.Err != nil {\n\t\treturn reply.Err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"package stats\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/projecteru2\/core\/types\"\n\n\tstatsdlib \"github.com\/CMGS\/statsd\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\nconst (\n\tmemStats = \"eru-core.%s.mem\"\n\tdeployCount = \"eru-core.deploy.count\"\n)\n\ntype statsdClient struct {\n\tAddr string\n\tHostname string\n}\n\nfunc (s *statsdClient) gauge(keyPattern string, data map[string]float64) error {\n\tremote, err := statsdlib.New(s.Addr)\n\tif err != nil {\n\t\tlog.Errorf(\"[gauge] Connect statsd failed: %v\", err)\n\t\treturn err\n\t}\n\tdefer remote.Close()\n\tdefer remote.Flush()\n\tfor k, v := range data {\n\t\tkey := fmt.Sprintf(keyPattern, k)\n\t\tremote.Gauge(key, v)\n\t}\n\treturn nil\n}\n\nfunc (s *statsdClient) count(key string, n int, rate float32) error {\n\tremote, err := statsdlib.New(s.Addr)\n\tif err != nil {\n\t\tlog.Errorf(\"[count] Connect statsd failed: %v\", err)\n\t\treturn err\n\t}\n\tdefer remote.Close()\n\tdefer remote.Flush()\n\tremote.Count(key, n, rate)\n\treturn nil\n}\n\nfunc (s *statsdClient) isNotSet() bool {\n\treturn s.Addr == \"\"\n}\n\nfunc (s *statsdClient) SendMemCap(cpumemmap map[string]types.CPUAndMem) {\n\tif s.isNotSet() {\n\t\treturn\n\t}\n\tdata := map[string]float64{}\n\tfor node, cpuandmem := range cpumemmap {\n\t\tdata[node] = float64(cpuandmem.MemCap)\n\t}\n\n\tkeyPattern := fmt.Sprintf(memStats, s.Hostname)\n\tif err := s.gauge(keyPattern, data); err != nil {\n\t\tlog.Errorf(\"[SendMemCap] Error occured while sending data to statsd: %v\", err)\n\t}\n}\n\nfunc (s *statsdClient) SendDeployCount(n int) {\n\tif s.isNotSet() {\n\t\treturn\n\t}\n\tif err := s.count(deployCount, n, 1.0); err != nil {\n\t\tlog.Errorf(\"[SendDeployCount] Error occured while counting: %v\", err)\n\t}\n}\n\n\/\/Client ref to statsd client\nvar Client = statsdClient{}\n\n\/\/NewStatsdClient make a client\nfunc NewStatsdClient(addr string) {\n\thostname, _ := os.Hostname()\n\tcleanHost := strings.Replace(hostname, \".\", \"-\", -1)\n\tClient = statsdClient{addr, cleanHost}\n}\nfix gauge format bugpackage stats\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/projecteru2\/core\/types\"\n\n\tstatsdlib \"github.com\/CMGS\/statsd\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\nconst (\n\tmemStats = \"eru-core.%s.mem\"\n\tdeployCount = \"eru-core.deploy.count\"\n)\n\ntype statsdClient struct {\n\tAddr string\n\tHostname string\n}\n\nfunc (s *statsdClient) gauge(keyPattern string, data map[string]float64) error {\n\tremote, err := statsdlib.New(s.Addr)\n\tif err != nil {\n\t\tlog.Errorf(\"[gauge] Connect statsd failed: %v\", err)\n\t\treturn err\n\t}\n\tdefer remote.Close()\n\tdefer remote.Flush()\n\tfor k, v := range data {\n\t\tkey := fmt.Sprintf(\"%s.%s\", keyPattern, k)\n\t\tremote.Gauge(key, v)\n\t}\n\treturn nil\n}\n\nfunc (s *statsdClient) count(key string, n int, rate float32) error {\n\tremote, err := statsdlib.New(s.Addr)\n\tif err != nil {\n\t\tlog.Errorf(\"[count] Connect statsd failed: %v\", err)\n\t\treturn err\n\t}\n\tdefer remote.Close()\n\tdefer remote.Flush()\n\tremote.Count(key, n, rate)\n\treturn nil\n}\n\nfunc (s *statsdClient) isNotSet() bool {\n\treturn s.Addr == \"\"\n}\n\nfunc (s *statsdClient) SendMemCap(cpumemmap map[string]types.CPUAndMem) {\n\tif s.isNotSet() {\n\t\treturn\n\t}\n\tdata := map[string]float64{}\n\tfor node, cpuandmem := range cpumemmap {\n\t\tdata[node] = float64(cpuandmem.MemCap)\n\t}\n\n\tkeyPattern := fmt.Sprintf(memStats, s.Hostname)\n\tif err := s.gauge(keyPattern, data); err != nil {\n\t\tlog.Errorf(\"[SendMemCap] Error occured while sending data to statsd: %v\", err)\n\t}\n}\n\nfunc (s *statsdClient) SendDeployCount(n int) {\n\tif s.isNotSet() {\n\t\treturn\n\t}\n\tif err := s.count(deployCount, n, 1.0); err != nil {\n\t\tlog.Errorf(\"[SendDeployCount] Error occured while counting: %v\", err)\n\t}\n}\n\n\/\/Client ref to statsd client\nvar Client = statsdClient{}\n\n\/\/NewStatsdClient make a client\nfunc NewStatsdClient(addr string) {\n\thostname, _ := os.Hostname()\n\tcleanHost := strings.Replace(hostname, \".\", \"-\", -1)\n\tClient = statsdClient{addr, cleanHost}\n}\n<|endoftext|>"} {"text":"package graval\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\twelcomeMessage = \"Welcome to the Go FTP Server\"\n)\n\ntype FTPConn struct {\n\tconn *net.TCPConn\n\tcontrolReader *bufio.Reader\n\tcontrolWriter *bufio.Writer\n\tdata *net.TCPConn\n\tdriver FTPDriver\n\tnamePrefix string\n\treqUser string\n\tuser string\n\trenameFrom string\n}\n\n\/\/ NewFTPConn constructs a new object that will handle the FTP protocol over\n\/\/ an active net.TCPConn. The TCP connection should already be open before\n\/\/ it is handed to this functions. driver is an instance of FTPDrive that\n\/\/ will handle all auth and persistence details.\nfunc NewFTPConn(tcpConn *net.TCPConn, driver FTPDriver) *FTPConn {\n\tc := new(FTPConn)\n\tc.namePrefix = \"\/\"\n\tc.conn = tcpConn\n\tc.controlReader = bufio.NewReader(tcpConn)\n\tc.controlWriter = bufio.NewWriter(tcpConn)\n\tc.driver = driver\n\treturn c\n}\n\n\/\/ Serve starts an endless loop that reads FTP commands from the client and\n\/\/ responds appropriately. terminated is a channel that will receive a true\n\/\/ message when the connection closes. This loop will be running inside a\n\/\/ goroutine, so use this channel to be notified when the connection can be\n\/\/ cleaned up.\nfunc (ftpConn *FTPConn) Serve() {\n\tlog.Print(\"Connection Established\")\n\t\/\/ send welcome\n\tftpConn.writeMessage(220, welcomeMessage)\n\t\/\/ read commands\n\tfor {\n\t\tline, err := ftpConn.controlReader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tftpConn.receiveLine(line)\n\t}\n\tlog.Print(\"Connection Terminated\")\n}\n\n\/\/ Close will manually close this connection, even if the client isn't ready.\nfunc (ftpConn *FTPConn) Close() {\n\tftpConn.conn.Close()\n\tif ftpConn.data != nil {\n\t\tftpConn.data.Close()\n\t}\n}\n\n\/\/ receiveLine accepts a single line FTP command and co-ordinates an\n\/\/ appropriate response.\nfunc (ftpConn *FTPConn) receiveLine(line string) {\n\tlog.Print(line)\n\tcommand, param := ftpConn.parseLine(line)\n\tswitch command {\n\tcase \"ALLO\":\n\t\tftpConn.cmdAllo()\n\t\tbreak\n\tcase \"CDUP\", \"XCUP\":\n\t\tftpConn.cmdCdup()\n\t\tbreak\n\tcase \"CWD\", \"XCWD\":\n\t\tftpConn.cmdCwd(param)\n\t\tbreak\n\tcase \"DELE\":\n\t\tftpConn.cmdDele(param)\n\t\tbreak\n\tcase \"MKD\":\n\t\tftpConn.cmdMkd(param)\n\t\tbreak\n\tcase \"MODE\":\n\t\tftpConn.cmdMode(param)\n\t\tbreak\n\tcase \"NOOP\":\n\t\tftpConn.cmdNoop()\n\t\tbreak\n\tcase \"PASS\":\n\t\tftpConn.cmdPass(param)\n\t\tbreak\n\tcase \"PWD\", \"XPWD\":\n\t\tftpConn.cmdPwd()\n\t\tbreak\n\tcase \"QUIT\":\n\t\tftpConn.Close()\n\t\tbreak\n\tcase \"RMD\", \"XRMD\":\n\t\tftpConn.cmdRmd(param)\n\t\tbreak\n\tcase \"RNFR\":\n\t\tftpConn.cmdRnfr(param)\n\t\tbreak\n\tcase \"RNTO\":\n\t\tftpConn.cmdRnto(param)\n\t\tbreak\n\tcase \"SIZE\":\n\t\tftpConn.cmdSize(param)\n\t\tbreak\n\tcase \"STRU\":\n\t\tftpConn.cmdStru(param)\n\t\tbreak\n\tcase \"SYST\":\n\t\tftpConn.cmdSyst()\n\t\tbreak\n\tcase \"TYPE\":\n\t\tftpConn.cmdType(param)\n\t\tbreak\n\tcase \"USER\":\n\t\tftpConn.cmdUser(param)\n\t\tbreak\n\tdefault:\n\t\tftpConn.writeMessage(500, \"Command not found\")\n\t}\n}\n\n\/\/ cmdNoop responds to the ALLO FTP command.\n\/\/\n\/\/ This is essentially a ping from the client so we just respond with an\n\/\/ basic OK message.\nfunc (ftpConn *FTPConn) cmdAllo() {\n\tftpConn.writeMessage(202, \"Obsolete\")\n}\n\n\/\/ cmdCdup responds to the CDUP FTP command.\n\/\/\n\/\/ Allows the client change their current directory to the parent.\nfunc (ftpConn *FTPConn) cmdCdup() {\n\tftpConn.cmdCwd(\"..\")\n}\n\n\/\/ cmdCwd responds to the CWD FTP command. It allows the client to change the\n\/\/ current working directory.\nfunc (ftpConn *FTPConn) cmdCwd(param string) {\n\tpath := ftpConn.buildPath(param)\n\tif ftpConn.driver.ChangeDir(path) {\n\t\tftpConn.namePrefix = path\n\t\tftpConn.writeMessage(250, \"Directory changed to \" + path)\n\t} else {\n\t\tftpConn.writeMessage(550, \"Action not taken\")\n\t}\n}\n\n\/\/ cmdDele responds to the DELE FTP command. It allows the client to delete\n\/\/ a file\nfunc (ftpConn *FTPConn) cmdDele(param string) {\n\tpath := ftpConn.buildPath(param)\n\tif ftpConn.driver.DeleteFile(path) {\n\t\tftpConn.writeMessage(250, \"File deleted\")\n\t} else {\n\t\tftpConn.writeMessage(550, \"Action not taken\")\n\t}\n}\n\n\/\/ cmdMkd responds to the MKD FTP command. It allows the client to create\n\/\/ a new directory\nfunc (ftpConn *FTPConn) cmdMkd(param string) {\n\tpath := ftpConn.buildPath(param)\n\tif ftpConn.driver.MakeDir(path) {\n\t\tftpConn.writeMessage(257, \"Directory created\")\n\t} else {\n\t\tftpConn.writeMessage(550, \"Action not taken\")\n\t}\n}\n\n\/\/ cmdMode responds to the MODE FTP command.\n\/\/\n\/\/ the original FTP spec had various options for hosts to negotiate how data\n\/\/ would be sent over the data socket, In reality these days (S)tream mode\n\/\/ is all that is used for the mode - data is just streamed down the data\n\/\/ socket unchanged.\nfunc (ftpConn *FTPConn) cmdMode(param string) {\n\tif strings.ToUpper(param) == \"S\" {\n\t\tftpConn.writeMessage(200, \"OK\")\n\t} else {\n\t\tftpConn.writeMessage(504, \"MODE is an obsolete command\")\n\t}\n}\n\n\/\/ cmdNoop responds to the NOOP FTP command.\n\/\/\n\/\/ This is essentially a ping from the client so we just respond with an\n\/\/ basic 200 message.\nfunc (ftpConn *FTPConn) cmdNoop() {\n\tftpConn.writeMessage(200, \"OK\")\n}\n\n\/\/ cmdPass respond to the PASS FTP command by asking the driver if the supplied\n\/\/ username and password are valid\nfunc (ftpConn *FTPConn) cmdPass(param string) {\n\tif ftpConn.driver.Authenticate(ftpConn.reqUser, param) {\n\t\tftpConn.user = ftpConn.reqUser\n\t\tftpConn.reqUser = \"\"\n\t\tftpConn.writeMessage(230, \"Password ok, continue\")\n\t} else {\n\t\tftpConn.writeMessage(530, \"Incorrect password, not logged in\")\n\t}\n}\n\n\/\/ cmdPwd responds to the PWD FTP command.\n\/\/\n\/\/ Tells the client what the current working directory is.\nfunc (ftpConn *FTPConn) cmdPwd() {\n\tftpConn.writeMessage(257, \"\\\"\" + ftpConn.namePrefix + \"\\\" is the current directory\")\n}\n\n\/\/ cmdRmd responds to the RMD FTP command. It allows the client to delete a\n\/\/ directory.\nfunc (ftpConn *FTPConn) cmdRmd(param string) {\n\tpath := ftpConn.buildPath(param)\n\tif ftpConn.driver.DeleteDir(path) {\n\t\tftpConn.writeMessage(250, \"Directory deleted\")\n\t} else {\n\t\tftpConn.writeMessage(550, \"Action not taken\")\n\t}\n}\n\n\/\/ cmdRnfr responds to the RNFR FTP command. It's the first of two commands\n\/\/ required for a client to rename a file.\nfunc (ftpConn *FTPConn) cmdRnfr(param string) {\n\tftpConn.renameFrom = ftpConn.buildPath(param)\n\tftpConn.writeMessage(350, \"Requested file action pending further information.\")\n}\n\n\/\/ cmdRnto responds to the RNTO FTP command. It's the second of two commands\n\/\/ required for a client to rename a file.\nfunc (ftpConn *FTPConn) cmdRnto(param string) {\n\ttoPath := ftpConn.buildPath(param)\n\tif ftpConn.driver.Rename(ftpConn.renameFrom, toPath) {\n\t\tftpConn.writeMessage(250, \"File renamed\")\n\t} else {\n\t\tftpConn.writeMessage(550, \"Action not taken\")\n\t}\n}\n\n\/\/ cmdSize responds to the SIZE FTP command. It returns the size of the\n\/\/ requested path in bytes.\nfunc (ftpConn *FTPConn) cmdSize(param string) {\n\tpath := ftpConn.buildPath(param)\n\tbytes := ftpConn.driver.Bytes(path)\n\tif bytes >= 0 {\n\t\tftpConn.writeMessage(213, strconv.Itoa(bytes))\n\t} else {\n\t\tftpConn.writeMessage(450, \"file not available\")\n\t}\n}\n\n\/\/ cmdStru responds to the STRU FTP command.\n\/\/\n\/\/ like the MODE and TYPE commands, stru[cture] dates back to a time when the\n\/\/ FTP protocol was more aware of the content of the files it was transferring,\n\/\/ and would sometimes be expected to translate things like EOL markers on the\n\/\/ fly.\n\/\/\n\/\/ These days files are sent unmodified, and F(ile) mode is the only one we\n\/\/ really need to support.\nfunc (ftpConn *FTPConn) cmdStru(param string) {\n\tif strings.ToUpper(param) == \"F\" {\n\t\tftpConn.writeMessage(200, \"OK\")\n\t} else {\n\t\tftpConn.writeMessage(504, \"STRU is an obsolete command\")\n\t}\n}\n\n\/\/ cmdSyst responds to the SYST FTP command by providing a canned response.\nfunc (ftpConn *FTPConn) cmdSyst() {\n\tftpConn.writeMessage(215, \"UNIX Type: L8\")\n}\n\n\/\/ cmdType responds to the TYPE FTP command.\n\/\/\n\/\/ like the MODE and STRU commands, TYPE dates back to a time when the FTP\n\/\/ protocol was more aware of the content of the files it was transferring, and\n\/\/ would sometimes be expected to translate things like EOL markers on the fly.\n\/\/\n\/\/ Valid options were A(SCII), I(mage), E(BCDIC) or LN (for local type). Since\n\/\/ we plan to just accept bytes from the client unchanged, I think Image mode is\n\/\/ adequate. The RFC requires we accept ASCII mode however, so accept it, but\n\/\/ ignore it.\nfunc (ftpConn *FTPConn) cmdType(param string) {\n\tif strings.ToUpper(param) == \"A\" {\n\t\tftpConn.writeMessage(200, \"Type set to ASCII\")\n\t} else if strings.ToUpper(param) == \"I\" {\n\t\tftpConn.writeMessage(200, \"Type set to binary\")\n\t} else {\n\t\tftpConn.writeMessage(500, \"Invalid type\")\n\t}\n}\n\n\/\/ cmdUser responds to the USER FTP command by asking for the password\nfunc (ftpConn *FTPConn) cmdUser(param string) {\n\tftpConn.reqUser = param\n\tftpConn.writeMessage(331, \"User name ok, password required\")\n}\n\nfunc (ftpConn *FTPConn) parseLine(line string) (string, string) {\n\tparams := strings.SplitN(strings.Trim(line, \"\\r\\n\"), \" \", 2)\n\tif len(params) == 1 {\n\t\treturn params[0], \"\"\n\t}\n\treturn params[0], params[1]\n}\n\n\/\/ writeMessage will send a standard FTP response back to the client.\nfunc (ftpConn *FTPConn) writeMessage(code int, message string) (wrote int, err error) {\n\tline := fmt.Sprintf(\"%d %s\\r\\n\", code, message)\n\tlog.Print(line)\n\twrote, err = ftpConn.controlWriter.WriteString(line)\n\tftpConn.controlWriter.Flush()\n\treturn\n}\n\n\/\/ buildPath takes a client supplied path or filename and generates a safe\n\/\/ absolute path withing their account sandbox.\n\/\/\n\/\/ buildpath(\"\/\")\n\/\/ => \"\/\"\n\/\/ buildpath(\"one.txt\")\n\/\/ => \"\/one.txt\"\n\/\/ buildpath(\"\/files\/two.txt\")\n\/\/ => \"\/files\/two.txt\"\n\/\/ buildpath(\"files\/two.txt\")\n\/\/ => \"files\/two.txt\"\n\/\/ buildpath(\"\/..\/..\/..\/..\/etc\/passwd\")\n\/\/ => \"\/etc\/passwd\"\n\/\/\n\/\/ The driver implementation is responsible for deciding how to treat this path.\n\/\/ Obviously they MUST NOT just read the path off disk. The probably want to\n\/\/ prefix the path with something to scope the users access to a sandbox.\nfunc (ftpConn *FTPConn) buildPath(filename string) (fullPath string){\n\tif filename[0:1] == \"\/\" {\n\t\tfullPath = filepath.Clean(filename)\n\t} else if filename != \"\" && filename != \"-a\" {\n\t\tfullPath = filepath.Clean(ftpConn.namePrefix + \"\/\" + filename)\n\t} else {\n\t\tfullPath = filepath.Clean(ftpConn.namePrefix)\n\t}\n\tfullPath = strings.Replace(fullPath, \"\/\/\", \"\/\", -1)\n\treturn\n}\ngo switch statements don't have fall throughpackage graval\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\twelcomeMessage = \"Welcome to the Go FTP Server\"\n)\n\ntype FTPConn struct {\n\tconn *net.TCPConn\n\tcontrolReader *bufio.Reader\n\tcontrolWriter *bufio.Writer\n\tdata *net.TCPConn\n\tdriver FTPDriver\n\tnamePrefix string\n\treqUser string\n\tuser string\n\trenameFrom string\n}\n\n\/\/ NewFTPConn constructs a new object that will handle the FTP protocol over\n\/\/ an active net.TCPConn. The TCP connection should already be open before\n\/\/ it is handed to this functions. driver is an instance of FTPDrive that\n\/\/ will handle all auth and persistence details.\nfunc NewFTPConn(tcpConn *net.TCPConn, driver FTPDriver) *FTPConn {\n\tc := new(FTPConn)\n\tc.namePrefix = \"\/\"\n\tc.conn = tcpConn\n\tc.controlReader = bufio.NewReader(tcpConn)\n\tc.controlWriter = bufio.NewWriter(tcpConn)\n\tc.driver = driver\n\treturn c\n}\n\n\/\/ Serve starts an endless loop that reads FTP commands from the client and\n\/\/ responds appropriately. terminated is a channel that will receive a true\n\/\/ message when the connection closes. This loop will be running inside a\n\/\/ goroutine, so use this channel to be notified when the connection can be\n\/\/ cleaned up.\nfunc (ftpConn *FTPConn) Serve() {\n\tlog.Print(\"Connection Established\")\n\t\/\/ send welcome\n\tftpConn.writeMessage(220, welcomeMessage)\n\t\/\/ read commands\n\tfor {\n\t\tline, err := ftpConn.controlReader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tftpConn.receiveLine(line)\n\t}\n\tlog.Print(\"Connection Terminated\")\n}\n\n\/\/ Close will manually close this connection, even if the client isn't ready.\nfunc (ftpConn *FTPConn) Close() {\n\tftpConn.conn.Close()\n\tif ftpConn.data != nil {\n\t\tftpConn.data.Close()\n\t}\n}\n\n\/\/ receiveLine accepts a single line FTP command and co-ordinates an\n\/\/ appropriate response.\nfunc (ftpConn *FTPConn) receiveLine(line string) {\n\tlog.Print(line)\n\tcommand, param := ftpConn.parseLine(line)\n\tswitch command {\n\tcase \"ALLO\":\n\t\tftpConn.cmdAllo()\n\tcase \"CDUP\", \"XCUP\":\n\t\tftpConn.cmdCdup()\n\tcase \"CWD\", \"XCWD\":\n\t\tftpConn.cmdCwd(param)\n\tcase \"DELE\":\n\t\tftpConn.cmdDele(param)\n\tcase \"MKD\":\n\t\tftpConn.cmdMkd(param)\n\tcase \"MODE\":\n\t\tftpConn.cmdMode(param)\n\tcase \"NOOP\":\n\t\tftpConn.cmdNoop()\n\tcase \"PASS\":\n\t\tftpConn.cmdPass(param)\n\tcase \"PWD\", \"XPWD\":\n\t\tftpConn.cmdPwd()\n\tcase \"QUIT\":\n\t\tftpConn.Close()\n\tcase \"RMD\", \"XRMD\":\n\t\tftpConn.cmdRmd(param)\n\tcase \"RNFR\":\n\t\tftpConn.cmdRnfr(param)\n\tcase \"RNTO\":\n\t\tftpConn.cmdRnto(param)\n\tcase \"SIZE\":\n\t\tftpConn.cmdSize(param)\n\tcase \"STRU\":\n\t\tftpConn.cmdStru(param)\n\tcase \"SYST\":\n\t\tftpConn.cmdSyst()\n\tcase \"TYPE\":\n\t\tftpConn.cmdType(param)\n\tcase \"USER\":\n\t\tftpConn.cmdUser(param)\n\tdefault:\n\t\tftpConn.writeMessage(500, \"Command not found\")\n\t}\n}\n\n\/\/ cmdNoop responds to the ALLO FTP command.\n\/\/\n\/\/ This is essentially a ping from the client so we just respond with an\n\/\/ basic OK message.\nfunc (ftpConn *FTPConn) cmdAllo() {\n\tftpConn.writeMessage(202, \"Obsolete\")\n}\n\n\/\/ cmdCdup responds to the CDUP FTP command.\n\/\/\n\/\/ Allows the client change their current directory to the parent.\nfunc (ftpConn *FTPConn) cmdCdup() {\n\tftpConn.cmdCwd(\"..\")\n}\n\n\/\/ cmdCwd responds to the CWD FTP command. It allows the client to change the\n\/\/ current working directory.\nfunc (ftpConn *FTPConn) cmdCwd(param string) {\n\tpath := ftpConn.buildPath(param)\n\tif ftpConn.driver.ChangeDir(path) {\n\t\tftpConn.namePrefix = path\n\t\tftpConn.writeMessage(250, \"Directory changed to \" + path)\n\t} else {\n\t\tftpConn.writeMessage(550, \"Action not taken\")\n\t}\n}\n\n\/\/ cmdDele responds to the DELE FTP command. It allows the client to delete\n\/\/ a file\nfunc (ftpConn *FTPConn) cmdDele(param string) {\n\tpath := ftpConn.buildPath(param)\n\tif ftpConn.driver.DeleteFile(path) {\n\t\tftpConn.writeMessage(250, \"File deleted\")\n\t} else {\n\t\tftpConn.writeMessage(550, \"Action not taken\")\n\t}\n}\n\n\/\/ cmdMkd responds to the MKD FTP command. It allows the client to create\n\/\/ a new directory\nfunc (ftpConn *FTPConn) cmdMkd(param string) {\n\tpath := ftpConn.buildPath(param)\n\tif ftpConn.driver.MakeDir(path) {\n\t\tftpConn.writeMessage(257, \"Directory created\")\n\t} else {\n\t\tftpConn.writeMessage(550, \"Action not taken\")\n\t}\n}\n\n\/\/ cmdMode responds to the MODE FTP command.\n\/\/\n\/\/ the original FTP spec had various options for hosts to negotiate how data\n\/\/ would be sent over the data socket, In reality these days (S)tream mode\n\/\/ is all that is used for the mode - data is just streamed down the data\n\/\/ socket unchanged.\nfunc (ftpConn *FTPConn) cmdMode(param string) {\n\tif strings.ToUpper(param) == \"S\" {\n\t\tftpConn.writeMessage(200, \"OK\")\n\t} else {\n\t\tftpConn.writeMessage(504, \"MODE is an obsolete command\")\n\t}\n}\n\n\/\/ cmdNoop responds to the NOOP FTP command.\n\/\/\n\/\/ This is essentially a ping from the client so we just respond with an\n\/\/ basic 200 message.\nfunc (ftpConn *FTPConn) cmdNoop() {\n\tftpConn.writeMessage(200, \"OK\")\n}\n\n\/\/ cmdPass respond to the PASS FTP command by asking the driver if the supplied\n\/\/ username and password are valid\nfunc (ftpConn *FTPConn) cmdPass(param string) {\n\tif ftpConn.driver.Authenticate(ftpConn.reqUser, param) {\n\t\tftpConn.user = ftpConn.reqUser\n\t\tftpConn.reqUser = \"\"\n\t\tftpConn.writeMessage(230, \"Password ok, continue\")\n\t} else {\n\t\tftpConn.writeMessage(530, \"Incorrect password, not logged in\")\n\t}\n}\n\n\/\/ cmdPwd responds to the PWD FTP command.\n\/\/\n\/\/ Tells the client what the current working directory is.\nfunc (ftpConn *FTPConn) cmdPwd() {\n\tftpConn.writeMessage(257, \"\\\"\" + ftpConn.namePrefix + \"\\\" is the current directory\")\n}\n\n\/\/ cmdRmd responds to the RMD FTP command. It allows the client to delete a\n\/\/ directory.\nfunc (ftpConn *FTPConn) cmdRmd(param string) {\n\tpath := ftpConn.buildPath(param)\n\tif ftpConn.driver.DeleteDir(path) {\n\t\tftpConn.writeMessage(250, \"Directory deleted\")\n\t} else {\n\t\tftpConn.writeMessage(550, \"Action not taken\")\n\t}\n}\n\n\/\/ cmdRnfr responds to the RNFR FTP command. It's the first of two commands\n\/\/ required for a client to rename a file.\nfunc (ftpConn *FTPConn) cmdRnfr(param string) {\n\tftpConn.renameFrom = ftpConn.buildPath(param)\n\tftpConn.writeMessage(350, \"Requested file action pending further information.\")\n}\n\n\/\/ cmdRnto responds to the RNTO FTP command. It's the second of two commands\n\/\/ required for a client to rename a file.\nfunc (ftpConn *FTPConn) cmdRnto(param string) {\n\ttoPath := ftpConn.buildPath(param)\n\tif ftpConn.driver.Rename(ftpConn.renameFrom, toPath) {\n\t\tftpConn.writeMessage(250, \"File renamed\")\n\t} else {\n\t\tftpConn.writeMessage(550, \"Action not taken\")\n\t}\n}\n\n\/\/ cmdSize responds to the SIZE FTP command. It returns the size of the\n\/\/ requested path in bytes.\nfunc (ftpConn *FTPConn) cmdSize(param string) {\n\tpath := ftpConn.buildPath(param)\n\tbytes := ftpConn.driver.Bytes(path)\n\tif bytes >= 0 {\n\t\tftpConn.writeMessage(213, strconv.Itoa(bytes))\n\t} else {\n\t\tftpConn.writeMessage(450, \"file not available\")\n\t}\n}\n\n\/\/ cmdStru responds to the STRU FTP command.\n\/\/\n\/\/ like the MODE and TYPE commands, stru[cture] dates back to a time when the\n\/\/ FTP protocol was more aware of the content of the files it was transferring,\n\/\/ and would sometimes be expected to translate things like EOL markers on the\n\/\/ fly.\n\/\/\n\/\/ These days files are sent unmodified, and F(ile) mode is the only one we\n\/\/ really need to support.\nfunc (ftpConn *FTPConn) cmdStru(param string) {\n\tif strings.ToUpper(param) == \"F\" {\n\t\tftpConn.writeMessage(200, \"OK\")\n\t} else {\n\t\tftpConn.writeMessage(504, \"STRU is an obsolete command\")\n\t}\n}\n\n\/\/ cmdSyst responds to the SYST FTP command by providing a canned response.\nfunc (ftpConn *FTPConn) cmdSyst() {\n\tftpConn.writeMessage(215, \"UNIX Type: L8\")\n}\n\n\/\/ cmdType responds to the TYPE FTP command.\n\/\/\n\/\/ like the MODE and STRU commands, TYPE dates back to a time when the FTP\n\/\/ protocol was more aware of the content of the files it was transferring, and\n\/\/ would sometimes be expected to translate things like EOL markers on the fly.\n\/\/\n\/\/ Valid options were A(SCII), I(mage), E(BCDIC) or LN (for local type). Since\n\/\/ we plan to just accept bytes from the client unchanged, I think Image mode is\n\/\/ adequate. The RFC requires we accept ASCII mode however, so accept it, but\n\/\/ ignore it.\nfunc (ftpConn *FTPConn) cmdType(param string) {\n\tif strings.ToUpper(param) == \"A\" {\n\t\tftpConn.writeMessage(200, \"Type set to ASCII\")\n\t} else if strings.ToUpper(param) == \"I\" {\n\t\tftpConn.writeMessage(200, \"Type set to binary\")\n\t} else {\n\t\tftpConn.writeMessage(500, \"Invalid type\")\n\t}\n}\n\n\/\/ cmdUser responds to the USER FTP command by asking for the password\nfunc (ftpConn *FTPConn) cmdUser(param string) {\n\tftpConn.reqUser = param\n\tftpConn.writeMessage(331, \"User name ok, password required\")\n}\n\nfunc (ftpConn *FTPConn) parseLine(line string) (string, string) {\n\tparams := strings.SplitN(strings.Trim(line, \"\\r\\n\"), \" \", 2)\n\tif len(params) == 1 {\n\t\treturn params[0], \"\"\n\t}\n\treturn params[0], params[1]\n}\n\n\/\/ writeMessage will send a standard FTP response back to the client.\nfunc (ftpConn *FTPConn) writeMessage(code int, message string) (wrote int, err error) {\n\tline := fmt.Sprintf(\"%d %s\\r\\n\", code, message)\n\tlog.Print(line)\n\twrote, err = ftpConn.controlWriter.WriteString(line)\n\tftpConn.controlWriter.Flush()\n\treturn\n}\n\n\/\/ buildPath takes a client supplied path or filename and generates a safe\n\/\/ absolute path withing their account sandbox.\n\/\/\n\/\/ buildpath(\"\/\")\n\/\/ => \"\/\"\n\/\/ buildpath(\"one.txt\")\n\/\/ => \"\/one.txt\"\n\/\/ buildpath(\"\/files\/two.txt\")\n\/\/ => \"\/files\/two.txt\"\n\/\/ buildpath(\"files\/two.txt\")\n\/\/ => \"files\/two.txt\"\n\/\/ buildpath(\"\/..\/..\/..\/..\/etc\/passwd\")\n\/\/ => \"\/etc\/passwd\"\n\/\/\n\/\/ The driver implementation is responsible for deciding how to treat this path.\n\/\/ Obviously they MUST NOT just read the path off disk. The probably want to\n\/\/ prefix the path with something to scope the users access to a sandbox.\nfunc (ftpConn *FTPConn) buildPath(filename string) (fullPath string){\n\tif filename[0:1] == \"\/\" {\n\t\tfullPath = filepath.Clean(filename)\n\t} else if filename != \"\" && filename != \"-a\" {\n\t\tfullPath = filepath.Clean(ftpConn.namePrefix + \"\/\" + filename)\n\t} else {\n\t\tfullPath = filepath.Clean(ftpConn.namePrefix)\n\t}\n\tfullPath = strings.Replace(fullPath, \"\/\/\", \"\/\", -1)\n\treturn\n}\n<|endoftext|>"} {"text":"package image\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\tsimage \"image\"\n\t\"image\/gif\"\n\t\"image\/jpeg\"\n\t\"image\/png\"\n\t\"io\"\n\t\"io\/ioutil\"\n\n\t\"github.com\/nfnt\/resize\"\n)\n\n\/\/ Type for supported profile picture types\ntype Type string\n\n\/\/ Size in pixels\ntype Size struct {\n\tWidth int\n\tHeight int\n}\n\n\/\/ Image content and type\ntype Image struct {\n\tContent []byte\n\tType Type\n}\n\nconst (\n\t\/\/ Invalid is an invalid image type\n\tInvalid Type = \"\"\n\t\/\/ GIF image\/gif\n\tGIF Type = \"image\/gif\"\n\t\/\/ JPEG image\/jpeg\n\tJPEG Type = \"image\/jpeg\"\n\t\/\/ PNG image\/png\n\tPNG Type = \"image\/png\"\n)\n\nvar validImageTypes = [...]Type{\n\tGIF,\n\tJPEG,\n\tPNG,\n}\n\n\/\/ ErrInvalidData when the image data is invalid\nvar ErrInvalidData = errors.New(\"Invalid image data\")\n\n\/\/ ErrInvalidType when an image type is invalid\nvar ErrInvalidType = errors.New(\"Invalid image type\")\n\n\/\/ ProcessError image processing failed (resizing)\ntype ProcessError struct {\n\terr error\n}\n\nfunc (v ProcessError) Error() string {\n\treturn fmt.Sprintf(\"Failed to process image: %+v\", v.err)\n}\n\nfunc (v Type) String() string {\n\treturn string(v)\n}\n\n\/\/ NewImage creates a new profile picture\nfunc NewImage(r io.Reader, typ Type, maxSize Size) (pic Image, err error) {\n\tdata, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn\n\t}\n\n\timg, err := createProfileImage(typ, data)\n\tif err != nil {\n\t\treturn\n\t}\n\t\/\/ Resize the image\n\timg = resizeProfileImage(img, maxSize)\n\n\tbytes, err := func(img simage.Image, typ Type) (b []byte, err error) {\n\t\tvar buf bytes.Buffer\n\t\tswitch typ {\n\t\tcase GIF:\n\t\t\terr = gif.Encode(&buf, img, nil)\n\t\t\tbreak\n\t\tcase PNG:\n\t\t\terr = png.Encode(&buf, img)\n\t\t\tbreak\n\t\tcase JPEG:\n\t\t\terr = jpeg.Encode(&buf, img, nil)\n\t\t\tbreak\n\t\t}\n\t\tb = buf.Bytes()\n\t\treturn\n\t}(img, typ)\n\n\tif err != nil {\n\t\terr = ProcessError{err}\n\t\treturn\n\t}\n\n\tpic = Image{bytes, typ}\n\treturn\n}\n\n\/\/ Base64 returns base64 encoded data\nfunc (v Image) Base64() string {\n\treturn base64.StdEncoding.EncodeToString(v.Content)\n}\n\n\/\/ Image returns an image from the Content bytes\nfunc (v Image) Image() (img simage.Image, err error) {\n\timg, err = createProfileImage(v.Type, v.Content)\n\treturn\n}\n\n\/\/ IsValid checks if the profile pic is valid\nfunc (v Image) IsValid() bool {\n\tif len(v.Content) < 10 {\n\t\treturn false\n\t}\n\n\tif v.Type == Invalid {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc createProfileImage(typ Type, data []byte) (img simage.Image, err error) {\n\tswitch typ {\n\tcase GIF:\n\t\timg, err = gif.Decode(bytes.NewReader(data))\n\t\tbreak\n\tcase PNG:\n\t\timg, err = png.Decode(bytes.NewReader(data))\n\t\tbreak\n\tcase JPEG:\n\t\timg, err = jpeg.Decode(bytes.NewReader(data))\n\t\tbreak\n\tdefault:\n\t\terr = ErrInvalidType\n\t\tbreak\n\t}\n\tif err != nil {\n\t\terr = ProcessError{err}\n\t\treturn\n\t}\n\treturn\n}\n\nfunc resizeProfileImage(img simage.Image, maxSize Size) simage.Image {\n\ts := img.Bounds().Size()\n\tif s.X > maxSize.Width || s.Y > maxSize.Height {\n\t\treturn resize.Thumbnail(uint(maxSize.Width), uint(maxSize.Height), img, resize.Lanczos3)\n\t}\n\treturn img\n}\nimage type IsValidpackage image\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\tsimage \"image\"\n\t\"image\/gif\"\n\t\"image\/jpeg\"\n\t\"image\/png\"\n\t\"io\"\n\t\"io\/ioutil\"\n\n\t\"github.com\/nfnt\/resize\"\n)\n\n\/\/ Type for supported profile picture types\ntype Type string\n\n\/\/ Size in pixels\ntype Size struct {\n\tWidth int\n\tHeight int\n}\n\n\/\/ Image content and type\ntype Image struct {\n\tContent []byte\n\tType Type\n}\n\nconst (\n\t\/\/ Invalid is an invalid image type\n\tInvalid Type = \"\"\n\t\/\/ GIF image\/gif\n\tGIF Type = \"image\/gif\"\n\t\/\/ JPEG image\/jpeg\n\tJPEG Type = \"image\/jpeg\"\n\t\/\/ PNG image\/png\n\tPNG Type = \"image\/png\"\n)\n\nvar validImageTypes = [...]Type{\n\tGIF,\n\tJPEG,\n\tPNG,\n}\n\n\/\/ ErrInvalidData when the image data is invalid\nvar ErrInvalidData = errors.New(\"Invalid image data\")\n\n\/\/ ErrInvalidType when an image type is invalid\nvar ErrInvalidType = errors.New(\"Invalid image type\")\n\n\/\/ ProcessError image processing failed (resizing)\ntype ProcessError struct {\n\terr error\n}\n\nfunc (v ProcessError) Error() string {\n\treturn fmt.Sprintf(\"Failed to process image: %+v\", v.err)\n}\n\nfunc (v Type) String() string {\n\treturn string(v)\n}\n\n\/\/ IsValid returns true if the image type is either gif\/png\/jpeg\nfunc (v Type) IsValid() bool {\n\tswitch v {\n\tcase GIF:\n\t\treturn true\n\tcase PNG:\n\t\treturn true\n\tcase JPEG:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n\/\/ NewImage creates a new profile picture\nfunc NewImage(r io.Reader, typ Type, maxSize Size) (pic Image, err error) {\n\tdata, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn\n\t}\n\n\timg, err := createProfileImage(typ, data)\n\tif err != nil {\n\t\treturn\n\t}\n\t\/\/ Resize the image\n\timg = resizeProfileImage(img, maxSize)\n\n\tbytes, err := func(img simage.Image, typ Type) (b []byte, err error) {\n\t\tvar buf bytes.Buffer\n\t\tswitch typ {\n\t\tcase GIF:\n\t\t\terr = gif.Encode(&buf, img, nil)\n\t\t\tbreak\n\t\tcase PNG:\n\t\t\terr = png.Encode(&buf, img)\n\t\t\tbreak\n\t\tcase JPEG:\n\t\t\terr = jpeg.Encode(&buf, img, nil)\n\t\t\tbreak\n\t\t}\n\t\tb = buf.Bytes()\n\t\treturn\n\t}(img, typ)\n\n\tif err != nil {\n\t\terr = ProcessError{err}\n\t\treturn\n\t}\n\n\tpic = Image{bytes, typ}\n\treturn\n}\n\n\/\/ Base64 returns base64 encoded data\nfunc (v Image) Base64() string {\n\treturn base64.StdEncoding.EncodeToString(v.Content)\n}\n\n\/\/ Image returns an image from the Content bytes\nfunc (v Image) Image() (img simage.Image, err error) {\n\timg, err = createProfileImage(v.Type, v.Content)\n\treturn\n}\n\n\/\/ IsValid checks if the profile pic is valid\nfunc (v Image) IsValid() bool {\n\tif len(v.Content) < 10 {\n\t\treturn false\n\t}\n\n\tif v.Type == Invalid {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc createProfileImage(typ Type, data []byte) (img simage.Image, err error) {\n\tswitch typ {\n\tcase GIF:\n\t\timg, err = gif.Decode(bytes.NewReader(data))\n\t\tbreak\n\tcase PNG:\n\t\timg, err = png.Decode(bytes.NewReader(data))\n\t\tbreak\n\tcase JPEG:\n\t\timg, err = jpeg.Decode(bytes.NewReader(data))\n\t\tbreak\n\tdefault:\n\t\terr = ErrInvalidType\n\t\tbreak\n\t}\n\tif err != nil {\n\t\terr = ProcessError{err}\n\t\treturn\n\t}\n\treturn\n}\n\nfunc resizeProfileImage(img simage.Image, maxSize Size) simage.Image {\n\ts := img.Bounds().Size()\n\tif s.X > maxSize.Width || s.Y > maxSize.Height {\n\t\treturn resize.Thumbnail(uint(maxSize.Width), uint(maxSize.Height), img, resize.Lanczos3)\n\t}\n\treturn img\n}\n<|endoftext|>"} {"text":"package storage\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/anacrolix\/missinggo\"\n\n\t\"github.com\/anacrolix\/torrent\/metainfo\"\n)\n\n\/\/ File-based storage for torrents, that isn't yet bound to a particular\n\/\/ torrent.\ntype fileClientImpl struct {\n\tbaseDir string\n\tpathMaker func(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string\n\tpc PieceCompletion\n}\n\n\/\/ The Default path maker just returns the current path\nfunc defaultPathMaker(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string {\n\treturn baseDir\n}\n\nfunc infoHashPathMaker(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string {\n\treturn filepath.Join(baseDir, infoHash.HexString())\n}\n\n\/\/ All Torrent data stored in this baseDir\nfunc NewFile(baseDir string) ClientImpl {\n\treturn NewFileWithCompletion(baseDir, pieceCompletionForDir(baseDir))\n}\n\nfunc NewFileWithCompletion(baseDir string, completion PieceCompletion) ClientImpl {\n\treturn newFileWithCustomPathMakerAndCompletion(baseDir, nil, completion)\n}\n\n\/\/ All Torrent data stored in subdirectorys by infohash\nfunc NewFileByInfoHash(baseDir string) ClientImpl {\n\treturn NewFileWithCustomPathMaker(baseDir, infoHashPathMaker)\n}\n\n\/\/ Allows passing a function to determine the path for storing torrent data\nfunc NewFileWithCustomPathMaker(baseDir string, pathMaker func(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string) ClientImpl {\n\treturn newFileWithCustomPathMakerAndCompletion(baseDir, pathMaker, pieceCompletionForDir(baseDir))\n}\n\nfunc newFileWithCustomPathMakerAndCompletion(baseDir string, pathMaker func(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string, completion PieceCompletion) ClientImpl {\n\tif pathMaker == nil {\n\t\tpathMaker = defaultPathMaker\n\t}\n\treturn &fileClientImpl{\n\t\tbaseDir: baseDir,\n\t\tpathMaker: pathMaker,\n\t\tpc: completion,\n\t}\n}\n\nfunc (me *fileClientImpl) Close() error {\n\treturn me.pc.Close()\n}\n\nfunc (fs *fileClientImpl) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (TorrentImpl, error) {\n\tdir := fs.pathMaker(fs.baseDir, info, infoHash)\n\terr := CreateNativeZeroLengthFiles(info, dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &fileTorrentImpl{\n\t\tdir,\n\t\tinfo,\n\t\tinfoHash,\n\t\tfs.pc,\n\t}, nil\n}\n\n\/\/ File-based torrent storage, not yet bound to a Torrent.\ntype fileTorrentImpl struct {\n\tdir string\n\tinfo *metainfo.Info\n\tinfoHash metainfo.Hash\n\tcompletion PieceCompletion\n}\n\nfunc (fts *fileTorrentImpl) Piece(p metainfo.Piece) PieceImpl {\n\t\/\/ Create a view onto the file-based torrent storage.\n\t_io := fileTorrentImplIO{fts}\n\t\/\/ Return the appropriate segments of this.\n\treturn &fileStoragePiece{\n\t\tfts,\n\t\tp,\n\t\tmissinggo.NewSectionWriter(_io, p.Offset(), p.Length()),\n\t\tio.NewSectionReader(_io, p.Offset(), p.Length()),\n\t}\n}\n\nfunc (fs *fileTorrentImpl) Close() error {\n\treturn nil\n}\n\n\/\/ Creates natives files for any zero-length file entries in the info. This is\n\/\/ a helper for file-based storages, which don't address or write to zero-\n\/\/ length files because they have no corresponding pieces.\nfunc CreateNativeZeroLengthFiles(info *metainfo.Info, dir string) (err error) {\n\tfor _, fi := range info.UpvertedFiles() {\n\t\tif fi.Length != 0 {\n\t\t\tcontinue\n\t\t}\n\t\tname := filepath.Join(append([]string{dir, info.Name}, fi.Path...)...)\n\t\tos.MkdirAll(filepath.Dir(name), 0750)\n\t\tvar f io.Closer\n\t\tf, err = os.Create(name)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tf.Close()\n\t}\n\treturn\n}\n\n\/\/ Exposes file-based storage of a torrent, as one big ReadWriterAt.\ntype fileTorrentImplIO struct {\n\tfts *fileTorrentImpl\n}\n\n\/\/ Returns EOF on short or missing file.\nfunc (fst *fileTorrentImplIO) readFileAt(fi metainfo.FileInfo, b []byte, off int64) (n int, err error) {\n\tf, err := os.Open(fst.fts.fileInfoName(fi))\n\tif os.IsNotExist(err) {\n\t\t\/\/ File missing is treated the same as a short file.\n\t\terr = io.EOF\n\t\treturn\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\t\/\/ Limit the read to within the expected bounds of this file.\n\tif int64(len(b)) > fi.Length-off {\n\t\tb = b[:fi.Length-off]\n\t}\n\tfor off < fi.Length && len(b) != 0 {\n\t\tn1, err1 := f.ReadAt(b, off)\n\t\tb = b[n1:]\n\t\tn += n1\n\t\toff += int64(n1)\n\t\tif n1 == 0 {\n\t\t\terr = err1\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ Only returns EOF at the end of the torrent. Premature EOF is ErrUnexpectedEOF.\nfunc (fst fileTorrentImplIO) ReadAt(b []byte, off int64) (n int, err error) {\n\tfor _, fi := range fst.fts.info.UpvertedFiles() {\n\t\tfor off < fi.Length {\n\t\t\tn1, err1 := fst.readFileAt(fi, b, off)\n\t\t\tn += n1\n\t\t\toff += int64(n1)\n\t\t\tb = b[n1:]\n\t\t\tif len(b) == 0 {\n\t\t\t\t\/\/ Got what we need.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif n1 != 0 {\n\t\t\t\t\/\/ Made progress.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = err1\n\t\t\tif err == io.EOF {\n\t\t\t\t\/\/ Lies.\n\t\t\t\terr = io.ErrUnexpectedEOF\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\toff -= fi.Length\n\t}\n\terr = io.EOF\n\treturn\n}\n\nfunc (fst fileTorrentImplIO) WriteAt(p []byte, off int64) (n int, err error) {\n\tfor _, fi := range fst.fts.info.UpvertedFiles() {\n\t\tif off >= fi.Length {\n\t\t\toff -= fi.Length\n\t\t\tcontinue\n\t\t}\n\t\tn1 := len(p)\n\t\tif int64(n1) > fi.Length-off {\n\t\t\tn1 = int(fi.Length - off)\n\t\t}\n\t\tname := fst.fts.fileInfoName(fi)\n\t\tos.MkdirAll(filepath.Dir(name), 0770)\n\t\tvar f *os.File\n\t\tf, err = os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0660)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tn1, err = f.WriteAt(p[:n1], off)\n\t\t\/\/ TODO: On some systems, write errors can be delayed until the Close.\n\t\tf.Close()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tn += n1\n\t\toff = 0\n\t\tp = p[n1:]\n\t\tif len(p) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\nfunc (fts *fileTorrentImpl) fileInfoName(fi metainfo.FileInfo) string {\n\treturn filepath.Join(append([]string{fts.dir, fts.info.Name}, fi.Path...)...)\n}\nstorage: Remove incorrect commentpackage storage\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/anacrolix\/missinggo\"\n\n\t\"github.com\/anacrolix\/torrent\/metainfo\"\n)\n\n\/\/ File-based storage for torrents, that isn't yet bound to a particular\n\/\/ torrent.\ntype fileClientImpl struct {\n\tbaseDir string\n\tpathMaker func(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string\n\tpc PieceCompletion\n}\n\n\/\/ The Default path maker just returns the current path\nfunc defaultPathMaker(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string {\n\treturn baseDir\n}\n\nfunc infoHashPathMaker(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string {\n\treturn filepath.Join(baseDir, infoHash.HexString())\n}\n\n\/\/ All Torrent data stored in this baseDir\nfunc NewFile(baseDir string) ClientImpl {\n\treturn NewFileWithCompletion(baseDir, pieceCompletionForDir(baseDir))\n}\n\nfunc NewFileWithCompletion(baseDir string, completion PieceCompletion) ClientImpl {\n\treturn newFileWithCustomPathMakerAndCompletion(baseDir, nil, completion)\n}\n\n\/\/ All Torrent data stored in subdirectorys by infohash\nfunc NewFileByInfoHash(baseDir string) ClientImpl {\n\treturn NewFileWithCustomPathMaker(baseDir, infoHashPathMaker)\n}\n\n\/\/ Allows passing a function to determine the path for storing torrent data\nfunc NewFileWithCustomPathMaker(baseDir string, pathMaker func(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string) ClientImpl {\n\treturn newFileWithCustomPathMakerAndCompletion(baseDir, pathMaker, pieceCompletionForDir(baseDir))\n}\n\nfunc newFileWithCustomPathMakerAndCompletion(baseDir string, pathMaker func(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string, completion PieceCompletion) ClientImpl {\n\tif pathMaker == nil {\n\t\tpathMaker = defaultPathMaker\n\t}\n\treturn &fileClientImpl{\n\t\tbaseDir: baseDir,\n\t\tpathMaker: pathMaker,\n\t\tpc: completion,\n\t}\n}\n\nfunc (me *fileClientImpl) Close() error {\n\treturn me.pc.Close()\n}\n\nfunc (fs *fileClientImpl) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (TorrentImpl, error) {\n\tdir := fs.pathMaker(fs.baseDir, info, infoHash)\n\terr := CreateNativeZeroLengthFiles(info, dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &fileTorrentImpl{\n\t\tdir,\n\t\tinfo,\n\t\tinfoHash,\n\t\tfs.pc,\n\t}, nil\n}\n\ntype fileTorrentImpl struct {\n\tdir string\n\tinfo *metainfo.Info\n\tinfoHash metainfo.Hash\n\tcompletion PieceCompletion\n}\n\nfunc (fts *fileTorrentImpl) Piece(p metainfo.Piece) PieceImpl {\n\t\/\/ Create a view onto the file-based torrent storage.\n\t_io := fileTorrentImplIO{fts}\n\t\/\/ Return the appropriate segments of this.\n\treturn &fileStoragePiece{\n\t\tfts,\n\t\tp,\n\t\tmissinggo.NewSectionWriter(_io, p.Offset(), p.Length()),\n\t\tio.NewSectionReader(_io, p.Offset(), p.Length()),\n\t}\n}\n\nfunc (fs *fileTorrentImpl) Close() error {\n\treturn nil\n}\n\n\/\/ Creates natives files for any zero-length file entries in the info. This is\n\/\/ a helper for file-based storages, which don't address or write to zero-\n\/\/ length files because they have no corresponding pieces.\nfunc CreateNativeZeroLengthFiles(info *metainfo.Info, dir string) (err error) {\n\tfor _, fi := range info.UpvertedFiles() {\n\t\tif fi.Length != 0 {\n\t\t\tcontinue\n\t\t}\n\t\tname := filepath.Join(append([]string{dir, info.Name}, fi.Path...)...)\n\t\tos.MkdirAll(filepath.Dir(name), 0750)\n\t\tvar f io.Closer\n\t\tf, err = os.Create(name)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tf.Close()\n\t}\n\treturn\n}\n\n\/\/ Exposes file-based storage of a torrent, as one big ReadWriterAt.\ntype fileTorrentImplIO struct {\n\tfts *fileTorrentImpl\n}\n\n\/\/ Returns EOF on short or missing file.\nfunc (fst *fileTorrentImplIO) readFileAt(fi metainfo.FileInfo, b []byte, off int64) (n int, err error) {\n\tf, err := os.Open(fst.fts.fileInfoName(fi))\n\tif os.IsNotExist(err) {\n\t\t\/\/ File missing is treated the same as a short file.\n\t\terr = io.EOF\n\t\treturn\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\t\/\/ Limit the read to within the expected bounds of this file.\n\tif int64(len(b)) > fi.Length-off {\n\t\tb = b[:fi.Length-off]\n\t}\n\tfor off < fi.Length && len(b) != 0 {\n\t\tn1, err1 := f.ReadAt(b, off)\n\t\tb = b[n1:]\n\t\tn += n1\n\t\toff += int64(n1)\n\t\tif n1 == 0 {\n\t\t\terr = err1\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ Only returns EOF at the end of the torrent. Premature EOF is ErrUnexpectedEOF.\nfunc (fst fileTorrentImplIO) ReadAt(b []byte, off int64) (n int, err error) {\n\tfor _, fi := range fst.fts.info.UpvertedFiles() {\n\t\tfor off < fi.Length {\n\t\t\tn1, err1 := fst.readFileAt(fi, b, off)\n\t\t\tn += n1\n\t\t\toff += int64(n1)\n\t\t\tb = b[n1:]\n\t\t\tif len(b) == 0 {\n\t\t\t\t\/\/ Got what we need.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif n1 != 0 {\n\t\t\t\t\/\/ Made progress.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = err1\n\t\t\tif err == io.EOF {\n\t\t\t\t\/\/ Lies.\n\t\t\t\terr = io.ErrUnexpectedEOF\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\toff -= fi.Length\n\t}\n\terr = io.EOF\n\treturn\n}\n\nfunc (fst fileTorrentImplIO) WriteAt(p []byte, off int64) (n int, err error) {\n\tfor _, fi := range fst.fts.info.UpvertedFiles() {\n\t\tif off >= fi.Length {\n\t\t\toff -= fi.Length\n\t\t\tcontinue\n\t\t}\n\t\tn1 := len(p)\n\t\tif int64(n1) > fi.Length-off {\n\t\t\tn1 = int(fi.Length - off)\n\t\t}\n\t\tname := fst.fts.fileInfoName(fi)\n\t\tos.MkdirAll(filepath.Dir(name), 0770)\n\t\tvar f *os.File\n\t\tf, err = os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0660)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tn1, err = f.WriteAt(p[:n1], off)\n\t\t\/\/ TODO: On some systems, write errors can be delayed until the Close.\n\t\tf.Close()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tn += n1\n\t\toff = 0\n\t\tp = p[n1:]\n\t\tif len(p) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\nfunc (fts *fileTorrentImpl) fileInfoName(fi metainfo.FileInfo) string {\n\treturn filepath.Join(append([]string{fts.dir, fts.info.Name}, fi.Path...)...)\n}\n<|endoftext|>"} {"text":"package storage\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/anacrolix\/missinggo\"\n\t\"github.com\/edsrzf\/mmap-go\"\n\n\t\"github.com\/anacrolix\/torrent\/metainfo\"\n\t\"github.com\/anacrolix\/torrent\/mmap_span\"\n)\n\ntype mmapStorage struct {\n\tbaseDir string\n}\n\nfunc NewMMap(baseDir string) Client {\n\treturn &mmapStorage{\n\t\tbaseDir: baseDir,\n\t}\n}\n\nfunc (s *mmapStorage) OpenTorrent(info *metainfo.InfoEx) (t Torrent, err error) {\n\tspan, err := mMapTorrent(&info.Info, s.baseDir)\n\tt = &mmapTorrentStorage{\n\t\tspan: span,\n\t}\n\treturn\n}\n\ntype mmapTorrentStorage struct {\n\tspan mmap_span.MMapSpan\n\tcompleted map[metainfo.Hash]bool\n}\n\nfunc (ts *mmapTorrentStorage) Piece(p metainfo.Piece) Piece {\n\treturn mmapStoragePiece{\n\t\tstorage: ts,\n\t\tp: p,\n\t\tReaderAt: io.NewSectionReader(ts.span, p.Offset(), p.Length()),\n\t\tWriterAt: missinggo.NewSectionWriter(ts.span, p.Offset(), p.Length()),\n\t}\n}\n\nfunc (ts *mmapTorrentStorage) Close() error {\n\tts.span.Close()\n\treturn nil\n}\n\ntype mmapStoragePiece struct {\n\tstorage *mmapTorrentStorage\n\tp metainfo.Piece\n\tio.ReaderAt\n\tio.WriterAt\n}\n\nfunc (sp mmapStoragePiece) GetIsComplete() bool {\n\treturn sp.storage.completed[sp.p.Hash()]\n}\n\nfunc (sp mmapStoragePiece) MarkComplete() error {\n\tif sp.storage.completed == nil {\n\t\tsp.storage.completed = make(map[metainfo.Hash]bool)\n\t}\n\tsp.storage.completed[sp.p.Hash()] = true\n\treturn nil\n}\n\nfunc mMapTorrent(md *metainfo.Info, location string) (mms mmap_span.MMapSpan, err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tmms.Close()\n\t\t}\n\t}()\n\tfor _, miFile := range md.UpvertedFiles() {\n\t\tfileName := filepath.Join(append([]string{location, md.Name}, miFile.Path...)...)\n\t\terr = os.MkdirAll(filepath.Dir(fileName), 0777)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"error creating data directory %q: %s\", filepath.Dir(fileName), err)\n\t\t\treturn\n\t\t}\n\t\tvar file *os.File\n\t\tfile, err = os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tfunc() {\n\t\t\tdefer file.Close()\n\t\t\tvar fi os.FileInfo\n\t\t\tfi, err = file.Stat()\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif fi.Size() < miFile.Length {\n\t\t\t\terr = file.Truncate(miFile.Length)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif miFile.Length == 0 {\n\t\t\t\t\/\/ Can't mmap() regions with length 0.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvar mMap mmap.MMap\n\t\t\tmMap, err = mmap.MapRegion(file,\n\t\t\t\tint(miFile.Length), \/\/ Probably not great on <64 bit systems.\n\t\t\t\tmmap.RDWR, 0, 0)\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"error mapping file %q, length %d: %s\", file.Name(), miFile.Length, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif int64(len(mMap)) != miFile.Length {\n\t\t\t\tpanic(\"mmap has wrong length\")\n\t\t\t}\n\t\t\tmms.Append(mMap)\n\t\t}()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\nstorage: Also use completion DB in mmap implementationpackage storage\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/anacrolix\/missinggo\"\n\t\"github.com\/edsrzf\/mmap-go\"\n\n\t\"github.com\/anacrolix\/torrent\/metainfo\"\n\t\"github.com\/anacrolix\/torrent\/mmap_span\"\n)\n\ntype mmapStorage struct {\n\tbaseDir string\n\tcompletion pieceCompletion\n}\n\nfunc NewMMap(baseDir string) Client {\n\treturn &mmapStorage{\n\t\tbaseDir: baseDir,\n\t\tcompletion: pieceCompletionForDir(baseDir),\n\t}\n}\n\nfunc (s *mmapStorage) OpenTorrent(info *metainfo.InfoEx) (t Torrent, err error) {\n\tspan, err := mMapTorrent(&info.Info, s.baseDir)\n\tt = &mmapTorrentStorage{\n\t\tspan: span,\n\t\tpc: s.completion,\n\t}\n\treturn\n}\n\ntype mmapTorrentStorage struct {\n\tspan mmap_span.MMapSpan\n\tpc pieceCompletion\n}\n\nfunc (ts *mmapTorrentStorage) Piece(p metainfo.Piece) Piece {\n\treturn mmapStoragePiece{\n\t\tpc: ts.pc,\n\t\tp: p,\n\t\tReaderAt: io.NewSectionReader(ts.span, p.Offset(), p.Length()),\n\t\tWriterAt: missinggo.NewSectionWriter(ts.span, p.Offset(), p.Length()),\n\t}\n}\n\nfunc (ts *mmapTorrentStorage) Close() error {\n\tts.span.Close()\n\treturn nil\n}\n\ntype mmapStoragePiece struct {\n\tpc pieceCompletion\n\tp metainfo.Piece\n\tio.ReaderAt\n\tio.WriterAt\n}\n\nfunc (sp mmapStoragePiece) GetIsComplete() bool {\n\treturn sp.pc.Get(sp.p)\n}\n\nfunc (sp mmapStoragePiece) MarkComplete() error {\n\tsp.pc.Set(sp.p, true)\n\treturn nil\n}\n\nfunc mMapTorrent(md *metainfo.Info, location string) (mms mmap_span.MMapSpan, err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tmms.Close()\n\t\t}\n\t}()\n\tfor _, miFile := range md.UpvertedFiles() {\n\t\tfileName := filepath.Join(append([]string{location, md.Name}, miFile.Path...)...)\n\t\terr = os.MkdirAll(filepath.Dir(fileName), 0777)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"error creating data directory %q: %s\", filepath.Dir(fileName), err)\n\t\t\treturn\n\t\t}\n\t\tvar file *os.File\n\t\tfile, err = os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tfunc() {\n\t\t\tdefer file.Close()\n\t\t\tvar fi os.FileInfo\n\t\t\tfi, err = file.Stat()\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif fi.Size() < miFile.Length {\n\t\t\t\terr = file.Truncate(miFile.Length)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif miFile.Length == 0 {\n\t\t\t\t\/\/ Can't mmap() regions with length 0.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvar mMap mmap.MMap\n\t\t\tmMap, err = mmap.MapRegion(file,\n\t\t\t\tint(miFile.Length), \/\/ Probably not great on <64 bit systems.\n\t\t\t\tmmap.RDWR, 0, 0)\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"error mapping file %q, length %d: %s\", file.Name(), miFile.Length, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif int64(len(mMap)) != miFile.Length {\n\t\t\t\tpanic(\"mmap has wrong length\")\n\t\t\t}\n\t\t\tmms.Append(mMap)\n\t\t}()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"encoding\/json\"\n\n\t\"github.com\/catatsuy\/isucon6-final\/bench\/scenario\"\n\t\"github.com\/catatsuy\/isucon6-final\/bench\/session\"\n\t\"github.com\/catatsuy\/isucon6-final\/bench\/sse\"\n)\n\ntype Log struct {\n\tTime time.Time `json:\"time\"`\n\tRoomID int `json:\"room_id\"`\n\tStrokeID int64 `json:\"stroke_id\"`\n\tStrokeTime time.Time `json:\"stroke_time\"`\n}\n\ntype RoomWatcher struct {\n\tEndCh chan struct{}\n\tLogs []Log\n\tErrors []string\n\n\tes *sse.EventSource\n\tisLeft bool\n}\n\nfunc NewRoomWatcher(target string, roomID int) *RoomWatcher {\n\tw := &RoomWatcher{\n\t\tEndCh: make(chan struct{}, 1),\n\t\tLogs: make([]Log, 0),\n\t\tErrors: make([]string, 0),\n\t\tisLeft: false,\n\t}\n\n\tgo w.watch(target, roomID)\n\n\treturn w\n}\n\n\/\/ 描いたstrokeがこの時間以上経ってから届いたら、ユーザーがストレスに感じてタブを閉じる、という設定にした。\nconst thresholdResponseTime = 5 * time.Second\n\nfunc (w *RoomWatcher) watch(target string, roomID int) {\n\n\ts := session.New(target)\n\ts.Client.Timeout = thresholdResponseTime\n\n\tpath := fmt.Sprintf(\"\/rooms\/%d\", roomID)\n\ttoken, err := scenario.GetCSRFToken(s, target+path)\n\tif err != nil {\n\t\tw.addError(fmt.Sprintf(\"GET %s リクエストに失敗しました\", path))\n\t\tfmt.Println(err)\n\t\tw.EndCh <- struct{}{}\n\t\treturn\n\t}\n\n\tstartTime := time.Now()\n\tpath = \"\/api\/strokes\" + path\n\n\tif w.isLeft {\n\t\tw.EndCh <- struct{}{}\n\t\treturn\n\t}\n\tw.es = sse.NewEventSource(s.Client, target+path+\"?csrf_token=\"+token)\n\n\tw.es.On(\"stroke\", func(data string) {\n\t\tvar stroke scenario.Stroke\n\t\terr := json.Unmarshal([]byte(data), &stroke)\n\t\tif err != nil {\n\t\t\tw.Errors = append(w.Errors, err.Error())\n\t\t\tfmt.Println(err)\n\t\t\tw.es.Close()\n\t\t}\n\t\tnow := time.Now()\n\t\t\/\/ strokes APIには最初はLast-Event-IDをつけずに送るので、これまでに描かれたstrokeが全部降ってくるが、それは無視する。\n\t\tif stroke.CreatedAt.After(startTime) && now.Sub(stroke.CreatedAt) > thresholdResponseTime {\n\t\t\tfmt.Println(\"response too late\")\n\t\t\tw.es.Close()\n\t\t}\n\t\tw.Logs = append(w.Logs, Log{\n\t\t\tTime: now,\n\t\t\tRoomID: roomID,\n\t\t\tStrokeID: stroke.ID,\n\t\t\tStrokeTime: stroke.CreatedAt,\n\t\t})\n\t})\n\tw.es.On(\"bad_request\", func(data string) {\n\t\tw.addError(path + \" bad_request: \" + data)\n\t\tw.es.Close()\n\t})\n\t\/\/w.es.On(\"watcher_count\", func(data string) {\n\t\/\/\tfmt.Println(\"watcher_count\")\n\t\/\/\tfmt.Println(data)\n\t\/\/})\n\tw.es.OnError(func(err error) {\n\t\tif e, ok := err.(*sse.BadContentType); ok {\n\t\t\tw.addError(path + \" Content-Typeが正しくありません: \" + e.ContentType)\n\t\t\treturn\n\t\t}\n\t\tif e, ok := err.(*sse.BadStatusCode); ok {\n\t\t\tw.addError(fmt.Sprintf(\"%s ステータスコードが正しくありません: %d\\n\", path, e.StatusCode))\n\t\t\tw.es.Close()\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(err)\n\t\tw.addError(path + \" 予期せぬエラー\")\n\t})\n\tw.es.OnEnd(func() {\n\t\tw.EndCh <- struct{}{}\n\t})\n\n\tw.es.Start()\n}\n\nfunc (w *RoomWatcher) addError(msg string) {\n\tw.Errors = append(w.Errors, fmt.Sprintf(\"%s\", msg))\n}\n\nfunc (w *RoomWatcher) Leave() {\n\tw.isLeft = true\n\tif w.es != nil {\n\t\tw.es.Close()\n\t}\n}\nAdd commentpackage main\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"encoding\/json\"\n\n\t\"github.com\/catatsuy\/isucon6-final\/bench\/scenario\"\n\t\"github.com\/catatsuy\/isucon6-final\/bench\/session\"\n\t\"github.com\/catatsuy\/isucon6-final\/bench\/sse\"\n)\n\ntype Log struct {\n\tTime time.Time `json:\"time\"`\n\tRoomID int `json:\"room_id\"`\n\tStrokeID int64 `json:\"stroke_id\"`\n\tStrokeTime time.Time `json:\"stroke_time\"`\n}\n\ntype RoomWatcher struct {\n\tEndCh chan struct{}\n\tLogs []Log\n\tErrors []string\n\n\tes *sse.EventSource\n\tisLeft bool\n}\n\nfunc NewRoomWatcher(target string, roomID int) *RoomWatcher {\n\tw := &RoomWatcher{\n\t\tEndCh: make(chan struct{}, 1),\n\t\tLogs: make([]Log, 0),\n\t\tErrors: make([]string, 0),\n\t\tisLeft: false,\n\t}\n\n\tgo w.watch(target, roomID)\n\n\treturn w\n}\n\n\/\/ 描いたstrokeがこの時間以上経ってから届いたら、ユーザーがストレスに感じてタブを閉じる、という設定にした。\nconst thresholdResponseTime = 5 * time.Second\n\nfunc (w *RoomWatcher) watch(target string, roomID int) {\n\n\t\/\/ TODO:用途がだいぶ特殊なので普通のベンチマークと同じsessionを使うべきか悩ましい\n\ts := session.New(target)\n\ts.Client.Timeout = thresholdResponseTime\n\n\tpath := fmt.Sprintf(\"\/rooms\/%d\", roomID)\n\ttoken, err := scenario.GetCSRFToken(s, target+path)\n\tif err != nil {\n\t\tw.addError(fmt.Sprintf(\"GET %s リクエストに失敗しました\", path))\n\t\tfmt.Println(err)\n\t\tw.EndCh <- struct{}{}\n\t\treturn\n\t}\n\n\tstartTime := time.Now()\n\tpath = \"\/api\/strokes\" + path\n\n\tif w.isLeft {\n\t\tw.EndCh <- struct{}{}\n\t\treturn\n\t}\n\tw.es = sse.NewEventSource(s.Client, target+path+\"?csrf_token=\"+token)\n\n\tw.es.On(\"stroke\", func(data string) {\n\t\tvar stroke scenario.Stroke\n\t\terr := json.Unmarshal([]byte(data), &stroke)\n\t\tif err != nil {\n\t\t\tw.Errors = append(w.Errors, err.Error())\n\t\t\tfmt.Println(err)\n\t\t\tw.es.Close()\n\t\t}\n\t\tnow := time.Now()\n\t\t\/\/ strokes APIには最初はLast-Event-IDをつけずに送るので、これまでに描かれたstrokeが全部降ってくるが、それは無視する。\n\t\tif stroke.CreatedAt.After(startTime) && now.Sub(stroke.CreatedAt) > thresholdResponseTime {\n\t\t\tfmt.Println(\"response too late\")\n\t\t\tw.es.Close()\n\t\t}\n\t\tw.Logs = append(w.Logs, Log{\n\t\t\tTime: now,\n\t\t\tRoomID: roomID,\n\t\t\tStrokeID: stroke.ID,\n\t\t\tStrokeTime: stroke.CreatedAt,\n\t\t})\n\t})\n\tw.es.On(\"bad_request\", func(data string) {\n\t\tw.addError(path + \" bad_request: \" + data)\n\t\tw.es.Close()\n\t})\n\t\/\/w.es.On(\"watcher_count\", func(data string) {\n\t\/\/\tfmt.Println(\"watcher_count\")\n\t\/\/\tfmt.Println(data)\n\t\/\/})\n\tw.es.OnError(func(err error) {\n\t\tif e, ok := err.(*sse.BadContentType); ok {\n\t\t\tw.addError(path + \" Content-Typeが正しくありません: \" + e.ContentType)\n\t\t\treturn\n\t\t}\n\t\tif e, ok := err.(*sse.BadStatusCode); ok {\n\t\t\tw.addError(fmt.Sprintf(\"%s ステータスコードが正しくありません: %d\\n\", path, e.StatusCode))\n\t\t\tw.es.Close()\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(err)\n\t\tw.addError(path + \" 予期せぬエラー\")\n\t})\n\tw.es.OnEnd(func() {\n\t\tw.EndCh <- struct{}{}\n\t})\n\n\tw.es.Start()\n}\n\nfunc (w *RoomWatcher) addError(msg string) {\n\tw.Errors = append(w.Errors, fmt.Sprintf(\"%s\", msg))\n}\n\nfunc (w *RoomWatcher) Leave() {\n\tw.isLeft = true\n\tif w.es != nil {\n\t\tw.es.Close()\n\t}\n}\n<|endoftext|>"} {"text":"package ui\n\nimport (\n\t\"github.com\/jroimartin\/gocui\"\n)\n\n\/\/ nextView is shared between Playlists and Queue and they all go to Tracks\nfunc nextView(g *gocui.Gui, v *gocui.View) error {\n\treturn gui.enableTracksView()\n}\n\nfunc mainNextViewLeft(g *gocui.Gui, v *gocui.View) error {\n\treturn gui.enableSideView()\n}\n\nfunc mainNextViewRight(g *gocui.Gui, v *gocui.View) error {\n\treturn gui.enableQueueView()\n}\n\nfunc cursorEnd(g *gocui.Gui, v *gocui.View) error {\n\tif newIndex := getCurrentViewSize(v); newIndex > -1 {\n\t\tox, _ := v.Origin()\n\t\tcx, _ := v.Cursor()\n\t\t_, sizeY := v.Size()\n\t\tsizeY--\n\n\t\tif newIndex > sizeY {\n\t\t\tv.SetOrigin(ox, newIndex-sizeY)\n\t\t\tv.SetCursor(cx, sizeY)\n\t\t} else {\n\t\t\tv.SetCursor(cx, newIndex)\n\t\t}\n\n\t\tupdateTracksView(g, v)\n\t}\n\treturn nil\n}\n\nfunc cursorHome(g *gocui.Gui, v *gocui.View) error {\n\tox, _ := v.Origin()\n\tcx, _ := v.Cursor()\n\tv.SetCursor(cx, 0)\n\tv.SetOrigin(ox, 0)\n\n\tupdateTracksView(g, v)\n\treturn nil\n}\n\nfunc cursorPgup(g *gocui.Gui, v *gocui.View) error {\n\tox, oy := v.Origin()\n\tcx, cy := v.Cursor()\n\t_, pageSizeY := v.Size()\n\tpageSizeY--\n\n\tif newOriginY := oy - pageSizeY; newOriginY > 0 {\n\t\tv.SetOrigin(ox, newOriginY)\n\t\tv.SetCursor(cx, cy)\n\t} else {\n\t\tv.SetOrigin(ox, 0)\n\t\tv.SetCursor(cx, cy)\n\t}\n\tupdateTracksView(g, v)\n\treturn nil\n}\n\nfunc cursorPgdn(g *gocui.Gui, v *gocui.View) error {\n\tif maxSize := getCurrentViewSize(v); maxSize > -1 {\n\t\tox, oy := v.Origin()\n\t\tcx, cy := v.Cursor()\n\t\t_, pageSizeY := v.Size()\n\t\tpageSizeY--\n\n\t\tnewOriginY := oy + pageSizeY\n\n\t\tif hasMorePages(newOriginY, cy, maxSize) {\n\t\t\tv.SetOrigin(ox, newOriginY)\n\t\t\tv.SetCursor(cx, cy)\n\t\t} else if isNotInLastPage(oy, pageSizeY, maxSize) {\n\t\t\tv.SetOrigin(ox, maxSize-pageSizeY)\n\t\t\tv.SetCursor(cx, pageSizeY)\n\t\t}\n\t\tupdateTracksView(g, v)\n\t}\n\treturn nil\n}\n\nfunc updateTracksView(g *gocui.Gui, v *gocui.View) {\n\tif v == gui.playlistsView {\n\t\tgui.updateTracksView()\n\t}\n}\n\nfunc getCurrentViewSize(v *gocui.View) int {\n\tif v == gui.tracksView {\n\t\tif selectedPlaylist := gui.getSelectedPlaylist(); selectedPlaylist != nil {\n\t\t\treturn selectedPlaylist.Tracks() - 1\n\t\t}\n\t} else if v == gui.playlistsView {\n\t\treturn playlists.Playlists() - 1\n\t}\n\treturn -1\n}\n\nfunc hasMorePages(newOriginY int, cursorY int, maxSize int) bool {\n\treturn newOriginY+cursorY <= maxSize\n}\n\nfunc isNotInLastPage(originY int, pageSizeY int, maxSize int) bool {\n\treturn originY+pageSizeY <= maxSize\n}\n\nfunc cursorDown(g *gocui.Gui, v *gocui.View) error {\n\toffset := getOffsetFromTypedNumbers()\n\tif cx, cy := v.Cursor(); canGoToNewPosition(cy + offset) {\n\t\tif err := v.SetCursor(cx, cy+offset); err != nil {\n\t\t\tox, oy := v.Origin()\n\t\t\tif err := v.SetOrigin(ox, oy+offset); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif v == gui.playlistsView {\n\t\t\tgui.updateTracksView()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc cursorUp(g *gocui.Gui, v *gocui.View) error {\n\toffset := getOffsetFromTypedNumbers()\n\tox, oy := v.Origin()\n\tcx, cy := v.Cursor()\n\tif err := v.SetCursor(cx, cy-offset); err != nil && oy > 0 {\n\t\tif err := v.SetOrigin(ox, oy-offset); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif v == gui.playlistsView {\n\t\tgui.updateTracksView()\n\t}\n\treturn nil\n}\n\nfunc getOffsetFromTypedNumbers() int {\n\tif multipleKeysNumber > 1 {\n\t\treturn multipleKeysNumber\n\t}\n\treturn 1\n}\n\nfunc canGoToNewPosition(newPosition int) bool {\n\tcurrentView := gui.g.CurrentView()\n\tline, err := currentView.Line(newPosition)\n\tif err != nil || len(line) == 0 {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc canGoToAbsolutNewPosition(v *gocui.View, newPosition int) bool {\n\tswitch v {\n\tcase gui.playlistsView:\n\t\treturn newPosition <= playlists.Playlists()\n\tcase gui.tracksView:\n\t\tif currentPlaylist := gui.getSelectedPlaylist(); currentPlaylist != nil {\n\t\t\treturn newPosition <= currentPlaylist.Tracks()\n\t\t}\n\tcase gui.queueView:\n\t}\n\treturn true\n}\n\nfunc goTo(g *gocui.Gui, v *gocui.View, position int) error {\n\tif canGoToAbsolutNewPosition(v, position) {\n\t\tposition--\n\t\tox, _ := v.Origin()\n\t\tcx, _ := v.Cursor()\n\t\tv.SetCursor(cx, 0)\n\t\tv.SetOrigin(ox, 0)\n\t\tif err := v.SetCursor(cx, position); err != nil {\n\t\t\tif err := v.SetOrigin(ox, position); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif v == gui.playlistsView && gui.tracksView != nil {\n\t\t\tgui.updateTracksView()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc goToFirstLineCommand(g *gocui.Gui, v *gocui.View) error {\n\tif multipleKeysNumber <= 0 {\n\t\treturn cursorHome(g, v)\n\t}\n\n\treturn goTo(g, v, multipleKeysNumber)\n}\n\nfunc goToLastLineCommand(g *gocui.Gui, v *gocui.View) error {\n\tif multipleKeysNumber <= 0 {\n\t\treturn cursorEnd(g, v)\n\t}\n\n\treturn goTo(g, v, multipleKeysNumber)\n}\nCount opened subplaylist when counting playlists sizepackage ui\n\nimport (\n\t\"github.com\/jroimartin\/gocui\"\n)\n\n\/\/ nextView is shared between Playlists and Queue and they all go to Tracks\nfunc nextView(g *gocui.Gui, v *gocui.View) error {\n\treturn gui.enableTracksView()\n}\n\nfunc mainNextViewLeft(g *gocui.Gui, v *gocui.View) error {\n\treturn gui.enableSideView()\n}\n\nfunc mainNextViewRight(g *gocui.Gui, v *gocui.View) error {\n\treturn gui.enableQueueView()\n}\n\nfunc cursorEnd(g *gocui.Gui, v *gocui.View) error {\n\tif newIndex := getCurrentViewSize(v); newIndex > -1 {\n\t\tox, _ := v.Origin()\n\t\tcx, _ := v.Cursor()\n\t\t_, sizeY := v.Size()\n\t\tsizeY--\n\n\t\tif newIndex > sizeY {\n\t\t\tv.SetOrigin(ox, newIndex-sizeY)\n\t\t\tv.SetCursor(cx, sizeY)\n\t\t} else {\n\t\t\tv.SetCursor(cx, newIndex)\n\t\t}\n\n\t\tupdateTracksView(g, v)\n\t}\n\treturn nil\n}\n\nfunc cursorHome(g *gocui.Gui, v *gocui.View) error {\n\tox, _ := v.Origin()\n\tcx, _ := v.Cursor()\n\tv.SetCursor(cx, 0)\n\tv.SetOrigin(ox, 0)\n\n\tupdateTracksView(g, v)\n\treturn nil\n}\n\nfunc cursorPgup(g *gocui.Gui, v *gocui.View) error {\n\tox, oy := v.Origin()\n\tcx, cy := v.Cursor()\n\t_, pageSizeY := v.Size()\n\tpageSizeY--\n\n\tif newOriginY := oy - pageSizeY; newOriginY > 0 {\n\t\tv.SetOrigin(ox, newOriginY)\n\t\tv.SetCursor(cx, cy)\n\t} else {\n\t\tv.SetOrigin(ox, 0)\n\t\tv.SetCursor(cx, cy)\n\t}\n\tupdateTracksView(g, v)\n\treturn nil\n}\n\nfunc cursorPgdn(g *gocui.Gui, v *gocui.View) error {\n\tif maxSize := getCurrentViewSize(v); maxSize > -1 {\n\t\tox, oy := v.Origin()\n\t\tcx, cy := v.Cursor()\n\t\t_, pageSizeY := v.Size()\n\t\tpageSizeY--\n\n\t\tnewOriginY := oy + pageSizeY\n\n\t\tif hasMorePages(newOriginY, cy, maxSize) {\n\t\t\tv.SetOrigin(ox, newOriginY)\n\t\t\tv.SetCursor(cx, cy)\n\t\t} else if isNotInLastPage(oy, pageSizeY, maxSize) {\n\t\t\tv.SetOrigin(ox, maxSize-pageSizeY)\n\t\t\tv.SetCursor(cx, pageSizeY)\n\t\t}\n\t\tupdateTracksView(g, v)\n\t}\n\treturn nil\n}\n\nfunc updateTracksView(g *gocui.Gui, v *gocui.View) {\n\tif v == gui.playlistsView {\n\t\tgui.updateTracksView()\n\t}\n}\n\nfunc getCurrentViewSize(v *gocui.View) int {\n\tif v == gui.tracksView {\n\t\treturn getTracksViewSize(v)\n\t} else if v == gui.playlistsView {\n\t\treturn getPlaylistsViewSize(v)\n\t}\n\treturn -1\n}\n\nfunc getTracksViewSize(v *gocui.View) int {\n\tif selectedPlaylist := gui.getSelectedPlaylist(); selectedPlaylist != nil {\n\t\treturn selectedPlaylist.Tracks() - 1\n\t}\n\treturn -1\n}\n\nfunc getPlaylistsViewSize(v *gocui.View) int {\n\tsubPlaylists := 0\n\tfor _, key := range playlists.Names() {\n\t\tplaylist := playlists.Get(key)\n\t\tif playlist.IsFolder() && playlist.IsFolderOpen() {\n\t\t\tsubPlaylists += playlist.Playlists()\n\t\t}\n\t}\n\treturn playlists.Playlists() + subPlaylists - 1\n}\n\nfunc hasMorePages(newOriginY int, cursorY int, maxSize int) bool {\n\treturn newOriginY+cursorY <= maxSize\n}\n\nfunc isNotInLastPage(originY int, pageSizeY int, maxSize int) bool {\n\treturn originY+pageSizeY <= maxSize\n}\n\nfunc cursorDown(g *gocui.Gui, v *gocui.View) error {\n\toffset := getOffsetFromTypedNumbers()\n\tif cx, cy := v.Cursor(); canGoToNewPosition(cy + offset) {\n\t\tif err := v.SetCursor(cx, cy+offset); err != nil {\n\t\t\tox, oy := v.Origin()\n\t\t\tif err := v.SetOrigin(ox, oy+offset); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif v == gui.playlistsView {\n\t\t\tgui.updateTracksView()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc cursorUp(g *gocui.Gui, v *gocui.View) error {\n\toffset := getOffsetFromTypedNumbers()\n\tox, oy := v.Origin()\n\tcx, cy := v.Cursor()\n\tif err := v.SetCursor(cx, cy-offset); err != nil && oy > 0 {\n\t\tif err := v.SetOrigin(ox, oy-offset); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif v == gui.playlistsView {\n\t\tgui.updateTracksView()\n\t}\n\treturn nil\n}\n\nfunc getOffsetFromTypedNumbers() int {\n\tif multipleKeysNumber > 1 {\n\t\treturn multipleKeysNumber\n\t}\n\treturn 1\n}\n\nfunc canGoToNewPosition(newPosition int) bool {\n\tcurrentView := gui.g.CurrentView()\n\tline, err := currentView.Line(newPosition)\n\tif err != nil || len(line) == 0 {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc canGoToAbsolutNewPosition(v *gocui.View, newPosition int) bool {\n\tswitch v {\n\tcase gui.playlistsView:\n\t\treturn newPosition <= playlists.Playlists()\n\tcase gui.tracksView:\n\t\tif currentPlaylist := gui.getSelectedPlaylist(); currentPlaylist != nil {\n\t\t\treturn newPosition <= currentPlaylist.Tracks()\n\t\t}\n\tcase gui.queueView:\n\t}\n\treturn true\n}\n\nfunc goTo(g *gocui.Gui, v *gocui.View, position int) error {\n\tif canGoToAbsolutNewPosition(v, position) {\n\t\tposition--\n\t\tox, _ := v.Origin()\n\t\tcx, _ := v.Cursor()\n\t\tv.SetCursor(cx, 0)\n\t\tv.SetOrigin(ox, 0)\n\t\tif err := v.SetCursor(cx, position); err != nil {\n\t\t\tif err := v.SetOrigin(ox, position); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif v == gui.playlistsView && gui.tracksView != nil {\n\t\t\tgui.updateTracksView()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc goToFirstLineCommand(g *gocui.Gui, v *gocui.View) error {\n\tif multipleKeysNumber <= 0 {\n\t\treturn cursorHome(g, v)\n\t}\n\n\treturn goTo(g, v, multipleKeysNumber)\n}\n\nfunc goToLastLineCommand(g *gocui.Gui, v *gocui.View) error {\n\tif multipleKeysNumber <= 0 {\n\t\treturn cursorEnd(g, v)\n\t}\n\n\treturn goTo(g, v, multipleKeysNumber)\n}\n<|endoftext|>"} {"text":"package structs\n\nimport \"fmt\"\n\n\/\/ Bitmap is a simple uncompressed bitmap\ntype Bitmap []byte\n\n\/\/ NewBitmap returns a bitmap with up to size indexes\nfunc NewBitmap(size uint) (Bitmap, error) {\n\tif size == 0 {\n\t\treturn nil, fmt.Errorf(\"bitmap must be positive size\")\n\t}\n\tif size&7 != 0 {\n\t\treturn nil, fmt.Errorf(\"bitmap must be byte aligned\")\n\t}\n\tb := make([]byte, size>>3)\n\treturn Bitmap(b), nil\n}\n\n\/\/ Copy returns a copy of the Bitmap\nfunc (b Bitmap) Copy() (Bitmap, error) {\n\tif b == nil {\n\t\treturn nil, fmt.Errorf(\"can't copy nil Bitmap\")\n\t}\n\n\traw := make([]byte, len(b))\n\tcopy(raw, b)\n\treturn Bitmap(raw), nil\n}\n\n\/\/ Size returns the size of the bitmap\nfunc (b Bitmap) Size() uint {\n\treturn uint(len(b) << 3)\n}\n\n\/\/ Set is used to set the given index of the bitmap\nfunc (b Bitmap) Set(idx uint) {\n\tbucket := idx >> 3\n\tmask := byte(1 << (idx & 7))\n\tb[bucket] |= mask\n}\n\n\/\/ Check is used to check the given index of the bitmap\nfunc (b Bitmap) Check(idx uint) bool {\n\tbucket := idx >> 3\n\tmask := byte(1 << (idx & 7))\n\treturn (b[bucket] & mask) != 0\n}\n\n\/\/ Clear is used to efficiently clear the bitmap\nfunc (b Bitmap) Clear() {\n\tfor i := range b {\n\t\tb[i] = 0\n\t}\n}\n\n\/\/ IndexesInRange returns the indexes in which the values are either set or unset based\n\/\/ on the passed parameter in the passed range\nfunc (b Bitmap) IndexesInRange(set bool, from, to uint) []int {\n\tvar indexes []int\n\tfor i := from; i < to; i++ {\n\t\tc := b.Check(i)\n\t\tif c && set || !c && !set {\n\t\t\tindexes = append(indexes, int(i))\n\t\t}\n\t}\n\n\treturn indexes\n}\ninclusive rangepackage structs\n\nimport \"fmt\"\n\n\/\/ Bitmap is a simple uncompressed bitmap\ntype Bitmap []byte\n\n\/\/ NewBitmap returns a bitmap with up to size indexes\nfunc NewBitmap(size uint) (Bitmap, error) {\n\tif size == 0 {\n\t\treturn nil, fmt.Errorf(\"bitmap must be positive size\")\n\t}\n\tif size&7 != 0 {\n\t\treturn nil, fmt.Errorf(\"bitmap must be byte aligned\")\n\t}\n\tb := make([]byte, size>>3)\n\treturn Bitmap(b), nil\n}\n\n\/\/ Copy returns a copy of the Bitmap\nfunc (b Bitmap) Copy() (Bitmap, error) {\n\tif b == nil {\n\t\treturn nil, fmt.Errorf(\"can't copy nil Bitmap\")\n\t}\n\n\traw := make([]byte, len(b))\n\tcopy(raw, b)\n\treturn Bitmap(raw), nil\n}\n\n\/\/ Size returns the size of the bitmap\nfunc (b Bitmap) Size() uint {\n\treturn uint(len(b) << 3)\n}\n\n\/\/ Set is used to set the given index of the bitmap\nfunc (b Bitmap) Set(idx uint) {\n\tbucket := idx >> 3\n\tmask := byte(1 << (idx & 7))\n\tb[bucket] |= mask\n}\n\n\/\/ Check is used to check the given index of the bitmap\nfunc (b Bitmap) Check(idx uint) bool {\n\tbucket := idx >> 3\n\tmask := byte(1 << (idx & 7))\n\treturn (b[bucket] & mask) != 0\n}\n\n\/\/ Clear is used to efficiently clear the bitmap\nfunc (b Bitmap) Clear() {\n\tfor i := range b {\n\t\tb[i] = 0\n\t}\n}\n\n\/\/ IndexesInRange returns the indexes in which the values are either set or unset based\n\/\/ on the passed parameter in the passed range\nfunc (b Bitmap) IndexesInRange(set bool, from, to uint) []int {\n\tvar indexes []int\n\tfor i := from; i <= to; i++ {\n\t\tc := b.Check(i)\n\t\tif c && set || !c && !set {\n\t\t\tindexes = append(indexes, int(i))\n\t\t}\n\t}\n\n\treturn indexes\n}\n<|endoftext|>"} {"text":"package dependency\n\nimport (\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/consul\/api\"\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\tHealthAny = \"any\"\n\tHealthPassing = \"passing\"\n\tHealthWarning = \"warning\"\n\tHealthCritical = \"critical\"\n\tHealthMaint = \"maintenance\"\n\n\tNodeMaint = \"_node_maintenance\"\n\tServiceMaint = \"_service_maintenance:\"\n)\n\nvar (\n\t\/\/ Ensure implements\n\t_ Dependency = (*HealthServiceQuery)(nil)\n\n\t\/\/ HealthServiceQueryRe is the regular expression to use.\n\tHealthServiceQueryRe = regexp.MustCompile(`\\A` + tagRe + nameRe + dcRe + nearRe + filterRe + `\\z`)\n)\n\nfunc init() {\n\tgob.Register([]*HealthService{})\n}\n\n\/\/ HealthService is a service entry in Consul.\ntype HealthService struct {\n\tNode string\n\tNodeID string\n\tNodeAddress string\n\tNodeTaggedAddresses map[string]string\n\tNodeMeta map[string]string\n\tAddress string\n\tID string\n\tName string\n\tTags ServiceTags\n\tChecks []*api.HealthCheck\n\tStatus string\n\tPort int\n}\n\n\/\/ HealthServiceQuery is the representation of all a service query in Consul.\ntype HealthServiceQuery struct {\n\tstopCh chan struct{}\n\n\tdc string\n\tfilters []string\n\tname string\n\tnear string\n\ttag string\n}\n\n\/\/ NewHealthServiceQuery processes the strings to build a service dependency.\nfunc NewHealthServiceQuery(s string) (*HealthServiceQuery, error) {\n\tif !HealthServiceQueryRe.MatchString(s) {\n\t\treturn nil, fmt.Errorf(\"health.service: invalid format: %q\", s)\n\t}\n\n\tm := regexpMatch(HealthServiceQueryRe, s)\n\n\tvar filters []string\n\tif filter := m[\"filter\"]; filter != \"\" {\n\t\tsplit := strings.Split(filter, \",\")\n\t\tfor _, f := range split {\n\t\t\tf = strings.TrimSpace(f)\n\t\t\tswitch f {\n\t\t\tcase HealthAny,\n\t\t\t\tHealthPassing,\n\t\t\t\tHealthWarning,\n\t\t\t\tHealthCritical,\n\t\t\t\tHealthMaint:\n\t\t\t\tfilters = append(filters, f)\n\t\t\tcase \"\":\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"health.service: invalid filter: %q in %q\", f, s)\n\t\t\t}\n\t\t}\n\t\tsort.Strings(filters)\n\t} else {\n\t\tfilters = []string{HealthPassing}\n\t}\n\n\treturn &HealthServiceQuery{\n\t\tstopCh: make(chan struct{}, 1),\n\t\tdc: m[\"dc\"],\n\t\tfilters: filters,\n\t\tname: m[\"name\"],\n\t\tnear: m[\"near\"],\n\t\ttag: m[\"tag\"],\n\t}, nil\n}\n\n\/\/ Fetch queries the Consul API defined by the given client and returns a slice\n\/\/ of HealthService objects.\nfunc (d *HealthServiceQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {\n\tselect {\n\tcase <-d.stopCh:\n\t\treturn nil, nil, ErrStopped\n\tdefault:\n\t}\n\n\topts = opts.Merge(&QueryOptions{\n\t\tDatacenter: d.dc,\n\t\tNear: d.near,\n\t})\n\n\tu := &url.URL{\n\t\tPath: \"\/v1\/health\/service\/\" + d.name,\n\t\tRawQuery: opts.String(),\n\t}\n\tif d.tag != \"\" {\n\t\tq := u.Query()\n\t\tq.Set(\"tag\", d.tag)\n\t\tu.RawQuery = q.Encode()\n\t}\n\tlog.Printf(\"[TRACE] %s: GET %s\", d, u)\n\n\t\/\/ Check if a user-supplied filter was given. If so, we may be querying for\n\t\/\/ more than healthy services, so we need to implement client-side filtering.\n\tpassingOnly := len(d.filters) == 1 && d.filters[0] == HealthPassing\n\n\tentries, qm, err := clients.Consul().Health().Service(d.name, d.tag, passingOnly, opts.ToConsulOpts())\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, d.String())\n\t}\n\n\tlog.Printf(\"[TRACE] %s: returned %d results\", d, len(entries))\n\n\tlist := make([]*HealthService, 0, len(entries))\n\tfor _, entry := range entries {\n\t\t\/\/ Get the status of this service from its checks.\n\t\tstatus := entry.Checks.AggregatedStatus()\n\n\t\t\/\/ If we are not checking only healthy services, filter out services that do\n\t\t\/\/ not match the given filter.\n\t\tif !acceptStatus(d.filters, status) {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Get the address of the service, falling back to the address of the node.\n\t\taddress := entry.Service.Address\n\t\tif address == \"\" {\n\t\t\taddress = entry.Node.Address\n\t\t}\n\n\t\tlist = append(list, &HealthService{\n\t\t\tNode: entry.Node.Node,\n\t\t\tNodeID: entry.Node.ID,\n\t\t\tNodeAddress: entry.Node.Address,\n\t\t\tNodeTaggedAddresses: entry.Node.TaggedAddresses,\n\t\t\tNodeMeta: entry.Node.Meta,\n\t\t\tAddress: address,\n\t\t\tID: entry.Service.ID,\n\t\t\tName: entry.Service.Service,\n\t\t\tTags: ServiceTags(deepCopyAndSortTags(entry.Service.Tags)),\n\t\t\tStatus: status,\n\t\t\tChecks: entry.Checks,\n\t\t\tPort: entry.Service.Port,\n\t\t})\n\t}\n\n\tlog.Printf(\"[TRACE] %s: returned %d results after filtering\", d, len(list))\n\n\tsort.Stable(ByNodeThenID(list))\n\n\trm := &ResponseMetadata{\n\t\tLastIndex: qm.LastIndex,\n\t\tLastContact: qm.LastContact,\n\t}\n\n\treturn list, rm, nil\n}\n\n\/\/ CanShare returns a boolean if this dependency is shareable.\nfunc (d *HealthServiceQuery) CanShare() bool {\n\treturn true\n}\n\n\/\/ Stop halts the dependency's fetch function.\nfunc (d *HealthServiceQuery) Stop() {\n\tclose(d.stopCh)\n}\n\n\/\/ String returns the human-friendly version of this dependency.\nfunc (d *HealthServiceQuery) String() string {\n\tname := d.name\n\tif d.tag != \"\" {\n\t\tname = d.tag + \".\" + name\n\t}\n\tif d.dc != \"\" {\n\t\tname = name + \"@\" + d.dc\n\t}\n\tif d.near != \"\" {\n\t\tname = name + \"~\" + d.near\n\t}\n\tif len(d.filters) > 0 {\n\t\tname = name + \"|\" + strings.Join(d.filters, \",\")\n\t}\n\treturn fmt.Sprintf(\"health.service(%s)\", name)\n}\n\n\/\/ Type returns the type of this dependency.\nfunc (d *HealthServiceQuery) Type() Type {\n\treturn TypeConsul\n}\n\n\/\/ acceptStatus allows us to check if a slice of health checks pass this filter.\nfunc acceptStatus(list []string, s string) bool {\n\tfor _, status := range list {\n\t\tif status == s || status == HealthAny {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ ByNodeThenID is a sortable slice of Service\ntype ByNodeThenID []*HealthService\n\n\/\/ Len, Swap, and Less are used to implement the sort.Sort interface.\nfunc (s ByNodeThenID) Len() int { return len(s) }\nfunc (s ByNodeThenID) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\nfunc (s ByNodeThenID) Less(i, j int) bool {\n\tif s[i].Node < s[j].Node {\n\t\treturn true\n\t} else if s[i].Node == s[j].Node {\n\t\treturn s[i].ID <= s[j].ID\n\t}\n\treturn false\n}\nUse internal type insteadpackage dependency\n\nimport (\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/consul\/api\"\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\tHealthAny = \"any\"\n\tHealthPassing = \"passing\"\n\tHealthWarning = \"warning\"\n\tHealthCritical = \"critical\"\n\tHealthMaint = \"maintenance\"\n\n\tNodeMaint = \"_node_maintenance\"\n\tServiceMaint = \"_service_maintenance:\"\n)\n\nvar (\n\t\/\/ Ensure implements\n\t_ Dependency = (*HealthServiceQuery)(nil)\n\n\t\/\/ HealthServiceQueryRe is the regular expression to use.\n\tHealthServiceQueryRe = regexp.MustCompile(`\\A` + tagRe + nameRe + dcRe + nearRe + filterRe + `\\z`)\n)\n\nfunc init() {\n\tgob.Register([]*HealthService{})\n}\n\n\/\/ HealthService is a service entry in Consul.\ntype HealthService struct {\n\tNode string\n\tNodeID string\n\tNodeAddress string\n\tNodeTaggedAddresses map[string]string\n\tNodeMeta map[string]string\n\tAddress string\n\tID string\n\tName string\n\tTags ServiceTags\n\tChecks api.HealthChecks\n\tStatus string\n\tPort int\n}\n\n\/\/ HealthServiceQuery is the representation of all a service query in Consul.\ntype HealthServiceQuery struct {\n\tstopCh chan struct{}\n\n\tdc string\n\tfilters []string\n\tname string\n\tnear string\n\ttag string\n}\n\n\/\/ NewHealthServiceQuery processes the strings to build a service dependency.\nfunc NewHealthServiceQuery(s string) (*HealthServiceQuery, error) {\n\tif !HealthServiceQueryRe.MatchString(s) {\n\t\treturn nil, fmt.Errorf(\"health.service: invalid format: %q\", s)\n\t}\n\n\tm := regexpMatch(HealthServiceQueryRe, s)\n\n\tvar filters []string\n\tif filter := m[\"filter\"]; filter != \"\" {\n\t\tsplit := strings.Split(filter, \",\")\n\t\tfor _, f := range split {\n\t\t\tf = strings.TrimSpace(f)\n\t\t\tswitch f {\n\t\t\tcase HealthAny,\n\t\t\t\tHealthPassing,\n\t\t\t\tHealthWarning,\n\t\t\t\tHealthCritical,\n\t\t\t\tHealthMaint:\n\t\t\t\tfilters = append(filters, f)\n\t\t\tcase \"\":\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"health.service: invalid filter: %q in %q\", f, s)\n\t\t\t}\n\t\t}\n\t\tsort.Strings(filters)\n\t} else {\n\t\tfilters = []string{HealthPassing}\n\t}\n\n\treturn &HealthServiceQuery{\n\t\tstopCh: make(chan struct{}, 1),\n\t\tdc: m[\"dc\"],\n\t\tfilters: filters,\n\t\tname: m[\"name\"],\n\t\tnear: m[\"near\"],\n\t\ttag: m[\"tag\"],\n\t}, nil\n}\n\n\/\/ Fetch queries the Consul API defined by the given client and returns a slice\n\/\/ of HealthService objects.\nfunc (d *HealthServiceQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {\n\tselect {\n\tcase <-d.stopCh:\n\t\treturn nil, nil, ErrStopped\n\tdefault:\n\t}\n\n\topts = opts.Merge(&QueryOptions{\n\t\tDatacenter: d.dc,\n\t\tNear: d.near,\n\t})\n\n\tu := &url.URL{\n\t\tPath: \"\/v1\/health\/service\/\" + d.name,\n\t\tRawQuery: opts.String(),\n\t}\n\tif d.tag != \"\" {\n\t\tq := u.Query()\n\t\tq.Set(\"tag\", d.tag)\n\t\tu.RawQuery = q.Encode()\n\t}\n\tlog.Printf(\"[TRACE] %s: GET %s\", d, u)\n\n\t\/\/ Check if a user-supplied filter was given. If so, we may be querying for\n\t\/\/ more than healthy services, so we need to implement client-side filtering.\n\tpassingOnly := len(d.filters) == 1 && d.filters[0] == HealthPassing\n\n\tentries, qm, err := clients.Consul().Health().Service(d.name, d.tag, passingOnly, opts.ToConsulOpts())\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, d.String())\n\t}\n\n\tlog.Printf(\"[TRACE] %s: returned %d results\", d, len(entries))\n\n\tlist := make([]*HealthService, 0, len(entries))\n\tfor _, entry := range entries {\n\t\t\/\/ Get the status of this service from its checks.\n\t\tstatus := entry.Checks.AggregatedStatus()\n\n\t\t\/\/ If we are not checking only healthy services, filter out services that do\n\t\t\/\/ not match the given filter.\n\t\tif !acceptStatus(d.filters, status) {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Get the address of the service, falling back to the address of the node.\n\t\taddress := entry.Service.Address\n\t\tif address == \"\" {\n\t\t\taddress = entry.Node.Address\n\t\t}\n\n\t\tlist = append(list, &HealthService{\n\t\t\tNode: entry.Node.Node,\n\t\t\tNodeID: entry.Node.ID,\n\t\t\tNodeAddress: entry.Node.Address,\n\t\t\tNodeTaggedAddresses: entry.Node.TaggedAddresses,\n\t\t\tNodeMeta: entry.Node.Meta,\n\t\t\tAddress: address,\n\t\t\tID: entry.Service.ID,\n\t\t\tName: entry.Service.Service,\n\t\t\tTags: ServiceTags(deepCopyAndSortTags(entry.Service.Tags)),\n\t\t\tStatus: status,\n\t\t\tChecks: entry.Checks,\n\t\t\tPort: entry.Service.Port,\n\t\t})\n\t}\n\n\tlog.Printf(\"[TRACE] %s: returned %d results after filtering\", d, len(list))\n\n\tsort.Stable(ByNodeThenID(list))\n\n\trm := &ResponseMetadata{\n\t\tLastIndex: qm.LastIndex,\n\t\tLastContact: qm.LastContact,\n\t}\n\n\treturn list, rm, nil\n}\n\n\/\/ CanShare returns a boolean if this dependency is shareable.\nfunc (d *HealthServiceQuery) CanShare() bool {\n\treturn true\n}\n\n\/\/ Stop halts the dependency's fetch function.\nfunc (d *HealthServiceQuery) Stop() {\n\tclose(d.stopCh)\n}\n\n\/\/ String returns the human-friendly version of this dependency.\nfunc (d *HealthServiceQuery) String() string {\n\tname := d.name\n\tif d.tag != \"\" {\n\t\tname = d.tag + \".\" + name\n\t}\n\tif d.dc != \"\" {\n\t\tname = name + \"@\" + d.dc\n\t}\n\tif d.near != \"\" {\n\t\tname = name + \"~\" + d.near\n\t}\n\tif len(d.filters) > 0 {\n\t\tname = name + \"|\" + strings.Join(d.filters, \",\")\n\t}\n\treturn fmt.Sprintf(\"health.service(%s)\", name)\n}\n\n\/\/ Type returns the type of this dependency.\nfunc (d *HealthServiceQuery) Type() Type {\n\treturn TypeConsul\n}\n\n\/\/ acceptStatus allows us to check if a slice of health checks pass this filter.\nfunc acceptStatus(list []string, s string) bool {\n\tfor _, status := range list {\n\t\tif status == s || status == HealthAny {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ ByNodeThenID is a sortable slice of Service\ntype ByNodeThenID []*HealthService\n\n\/\/ Len, Swap, and Less are used to implement the sort.Sort interface.\nfunc (s ByNodeThenID) Len() int { return len(s) }\nfunc (s ByNodeThenID) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\nfunc (s ByNodeThenID) Less(i, j int) bool {\n\tif s[i].Node < s[j].Node {\n\t\treturn true\n\t} else if s[i].Node == s[j].Node {\n\t\treturn s[i].ID <= s[j].ID\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage snc\n\nimport (\n\t\"bytes\"\n\t\"crypto\/ecdsa\"\n\t\"crypto\/elliptic\"\n\t\"crypto\/rand\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"encoding\/pem\"\n\t\"math\/big\"\n\t\"net\"\n\t\"time\"\n)\n\nfunc generateCert(host string) (*bytes.Buffer, *bytes.Buffer, error) {\n\tcert := bytes.NewBuffer(nil)\n\tkey := bytes.NewBuffer(nil)\n\n\tpriv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)\n\tif err != nil {\n\t\treturn cert, key, err\n\t}\n\n\tnotBefore := time.Now()\n\n\tnotAfter := notBefore.Add(24 * time.Hour)\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\treturn cert, key, err\n\t}\n\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"Acme Co\"},\n\t\t},\n\t\tNotBefore: notBefore,\n\t\tNotAfter: notAfter,\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t}\n\n\tif ip := net.ParseIP(host); ip != nil {\n\t\ttemplate.IPAddresses = append(template.IPAddresses, ip)\n\t} else {\n\t\ttemplate.DNSNames = append(template.DNSNames, host)\n\t}\n\n\ttemplate.IsCA = true\n\ttemplate.KeyUsage |= x509.KeyUsageCertSign\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)\n\tif err != nil {\n\t\treturn cert, key, err\n\t}\n\n\tpem.Encode(cert, &pem.Block{\n\t\tType: \"CERTIFICATE\",\n\t\tBytes: derBytes,\n\t})\n\n\tb, err := x509.MarshalECPrivateKey(priv)\n\tif err != nil {\n\t\treturn cert, key, err\n\t}\n\n\tpem.Encode(key, &pem.Block{Type: \"EC PRIVATE KEY\", Bytes: b})\n\treturn cert, key, nil\n}\nminor, better readabilty\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage snc\n\nimport (\n\t\"bytes\"\n\t\"crypto\/ecdsa\"\n\t\"crypto\/elliptic\"\n\t\"crypto\/rand\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"encoding\/pem\"\n\t\"math\/big\"\n\t\"net\"\n\t\"time\"\n)\n\nfunc generateCert(host string) (*bytes.Buffer, *bytes.Buffer, error) {\n\tcert := bytes.NewBuffer(nil)\n\tkey := bytes.NewBuffer(nil)\n\n\tpriv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)\n\tif err != nil {\n\t\treturn cert, key, err\n\t}\n\n\tnotBefore := time.Now()\n\n\tnotAfter := notBefore.Add(24 * time.Hour)\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\treturn cert, key, err\n\t}\n\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"Acme Co\"},\n\t\t},\n\t\tNotBefore: notBefore,\n\t\tNotAfter: notAfter,\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tIsCA: true,\n\t\tBasicConstraintsValid: true,\n\t}\n\n\tif ip := net.ParseIP(host); ip != nil {\n\t\ttemplate.IPAddresses = append(template.IPAddresses, ip)\n\t} else {\n\t\ttemplate.DNSNames = append(template.DNSNames, host)\n\t}\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)\n\tif err != nil {\n\t\treturn cert, key, err\n\t}\n\n\tpem.Encode(cert, &pem.Block{\n\t\tType: \"CERTIFICATE\",\n\t\tBytes: derBytes,\n\t})\n\n\tb, err := x509.MarshalECPrivateKey(priv)\n\tif err != nil {\n\t\treturn cert, key, err\n\t}\n\n\tpem.Encode(key, &pem.Block{\n\t\tType: \"EC PRIVATE KEY\",\n\t\tBytes: b,\n\t})\n\treturn cert, key, nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Jonathan J Lawlor. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage matrixexp\n\nimport (\n\t\"github.com\/gonum\/blas\/blas64\"\n)\n\n\/\/ General is a typical matrix literal.\ntype General struct {\n\tblas64.General\n}\n\n\/\/ Dims returns the matrix dimensions.\nfunc (m1 *General) Dims() (r, c int) {\n\tr, c = m1.Rows, m1.Cols\n\treturn\n}\n\n\/\/ At returns the value at a given row, column index.\nfunc (m1 *General) At(r, c int) float64 {\n\treturn m1.Data[r*m1.Stride+c]\n}\n\n\/\/ Set changes the value at a given row, column index.\nfunc (m1 *General) Set(r, c int, v float64) {\n\tm1.Data[r*m1.Stride+c] = v\n}\n\n\/\/ Eval returns a matrix literal.\nfunc (m1 *General) Eval() MatrixLiteral {\n\treturn m1\n}\n\n\/\/ Copy creates a (deep) copy of the Matrix Expression.\nfunc (m1 *General) Copy() MatrixExp {\n\tv := make([]float64, len(m1.Data))\n\tcopy(v, m1.Data)\n\treturn &General{\n\t\tblas64.General{\n\t\t\tRows: m1.Rows,\n\t\t\tCols: m1.Cols,\n\t\t\tStride: m1.Stride,\n\t\t\tData: v,\n\t\t},\n\t}\n}\n\n\/\/ Err returns the first error encountered while constructing the matrix expression.\nfunc (m1 *General) Err() error {\n\tif m1.Rows < 0 {\n\t\treturn ErrInvalidRows(m1.Rows)\n\t}\n\tif m1.Cols < 0 {\n\t\treturn ErrInvalidCols(m1.Cols)\n\t}\n\tif m1.Stride < 1 {\n\t\treturn ErrInvalidStride(m1.Stride)\n\t}\n\tif m1.Stride < m1.Cols {\n\t\treturn ErrStrideLessThanCols{m1.Stride, m1.Cols}\n\t}\n\tif maxLen := (m1.Rows-1)*m1.Stride + m1.Cols; maxLen > len(m1.Data) {\n\t\treturn ErrInvalidDataLen{len(m1.Data), maxLen}\n\t}\n\treturn nil\n}\n\n\/\/ T transposes a matrix.\nfunc (m1 *General) T() MatrixExp {\n\treturn &T{m1}\n}\n\n\/\/ Add two matrices together.\nfunc (m1 *General) Add(m2 MatrixExp) MatrixExp {\n\treturn &Add{\n\t\tLeft: m1,\n\t\tRight: m2,\n\t}\n}\n\n\/\/ Sub subtracts the right matrix from the left matrix.\nfunc (m1 *General) Sub(m2 MatrixExp) MatrixExp {\n\treturn &Sub{\n\t\tLeft: m1,\n\t\tRight: m2,\n\t}\n}\n\n\/\/ Scale performs scalar multiplication.\nfunc (m1 *General) Scale(c float64) MatrixExp {\n\treturn &Scale{\n\t\tC: c,\n\t\tM: m1,\n\t}\n}\n\n\/\/ Mul performs matrix multiplication.\nfunc (m1 *General) Mul(m2 MatrixExp) MatrixExp {\n\treturn &Mul{\n\t\tLeft: m1,\n\t\tRight: m2,\n\t}\n}\n\n\/\/ MulElem performs element-wise multiplication.\nfunc (m1 *General) MulElem(m2 MatrixExp) MatrixExp {\n\treturn &MulElem{\n\t\tLeft: m1,\n\t\tRight: m2,\n\t}\n}\n\n\/\/ DivElem performs element-wise division.\nfunc (m1 *General) DivElem(m2 MatrixExp) MatrixExp {\n\treturn &DivElem{\n\t\tLeft: m1,\n\t\tRight: m2,\n\t}\n}\n\n\/\/ AsVector returns a copy of the values in the matrix as a []float64, in row order.\nfunc (m1 *General) AsVector() []float64 {\n\t\/\/ TODO(jonlawlor): make use of a pool.\n\tv := make([]float64, len(m1.Data))\n\tcopy(v, m1.Data)\n\treturn v\n}\n\n\/\/ AsGeneral returns the matrix as a blas64.General (not a copy!)\nfunc (m1 *General) AsGeneral() blas64.General {\n\treturn m1.General\n}\nFix AsVector for General\/\/ Copyright 2015 Jonathan J Lawlor. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage matrixexp\n\nimport (\n\t\"github.com\/gonum\/blas\/blas64\"\n)\n\n\/\/ General is a typical matrix literal.\ntype General struct {\n\tblas64.General\n}\n\n\/\/ Dims returns the matrix dimensions.\nfunc (m1 *General) Dims() (r, c int) {\n\tr, c = m1.Rows, m1.Cols\n\treturn\n}\n\n\/\/ At returns the value at a given row, column index.\nfunc (m1 *General) At(r, c int) float64 {\n\treturn m1.Data[r*m1.Stride+c]\n}\n\n\/\/ Set changes the value at a given row, column index.\nfunc (m1 *General) Set(r, c int, v float64) {\n\tm1.Data[r*m1.Stride+c] = v\n}\n\n\/\/ Eval returns a matrix literal.\nfunc (m1 *General) Eval() MatrixLiteral {\n\treturn m1\n}\n\n\/\/ Copy creates a (deep) copy of the Matrix Expression.\nfunc (m1 *General) Copy() MatrixExp {\n\tv := make([]float64, len(m1.Data))\n\tcopy(v, m1.Data)\n\treturn &General{\n\t\tblas64.General{\n\t\t\tRows: m1.Rows,\n\t\t\tCols: m1.Cols,\n\t\t\tStride: m1.Stride,\n\t\t\tData: v,\n\t\t},\n\t}\n}\n\n\/\/ Err returns the first error encountered while constructing the matrix expression.\nfunc (m1 *General) Err() error {\n\tif m1.Rows < 0 {\n\t\treturn ErrInvalidRows(m1.Rows)\n\t}\n\tif m1.Cols < 0 {\n\t\treturn ErrInvalidCols(m1.Cols)\n\t}\n\tif m1.Stride < 1 {\n\t\treturn ErrInvalidStride(m1.Stride)\n\t}\n\tif m1.Stride < m1.Cols {\n\t\treturn ErrStrideLessThanCols{m1.Stride, m1.Cols}\n\t}\n\tif maxLen := (m1.Rows-1)*m1.Stride + m1.Cols; maxLen > len(m1.Data) {\n\t\treturn ErrInvalidDataLen{len(m1.Data), maxLen}\n\t}\n\treturn nil\n}\n\n\/\/ T transposes a matrix.\nfunc (m1 *General) T() MatrixExp {\n\treturn &T{m1}\n}\n\n\/\/ Add two matrices together.\nfunc (m1 *General) Add(m2 MatrixExp) MatrixExp {\n\treturn &Add{\n\t\tLeft: m1,\n\t\tRight: m2,\n\t}\n}\n\n\/\/ Sub subtracts the right matrix from the left matrix.\nfunc (m1 *General) Sub(m2 MatrixExp) MatrixExp {\n\treturn &Sub{\n\t\tLeft: m1,\n\t\tRight: m2,\n\t}\n}\n\n\/\/ Scale performs scalar multiplication.\nfunc (m1 *General) Scale(c float64) MatrixExp {\n\treturn &Scale{\n\t\tC: c,\n\t\tM: m1,\n\t}\n}\n\n\/\/ Mul performs matrix multiplication.\nfunc (m1 *General) Mul(m2 MatrixExp) MatrixExp {\n\treturn &Mul{\n\t\tLeft: m1,\n\t\tRight: m2,\n\t}\n}\n\n\/\/ MulElem performs element-wise multiplication.\nfunc (m1 *General) MulElem(m2 MatrixExp) MatrixExp {\n\treturn &MulElem{\n\t\tLeft: m1,\n\t\tRight: m2,\n\t}\n}\n\n\/\/ DivElem performs element-wise division.\nfunc (m1 *General) DivElem(m2 MatrixExp) MatrixExp {\n\treturn &DivElem{\n\t\tLeft: m1,\n\t\tRight: m2,\n\t}\n}\n\n\/\/ AsVector returns a copy of the values in the matrix as a []float64, in row order.\nfunc (m1 *General) AsVector() []float64 {\n\t\/\/ TODO(jonlawlor): make use of a pool.\n\tv := make([]float64, m1.Rows*m1.Cols)\n\tfor i := 0; i < m1.Rows; i++ {\n\t\tcopy(v[i*m1.Cols:(i+1)*m1.Cols], m1.Data[i*m1.Stride:i*m1.Stride+m1.Cols])\n\t}\n\tcopy(v, m1.Data)\n\treturn v\n}\n\n\/\/ AsGeneral returns the matrix as a blas64.General (not a copy!)\nfunc (m1 *General) AsGeneral() blas64.General {\n\treturn m1.General\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"gesture\/gis\"\n\t\"gesture\/rewrite\"\n\t\"gesture\/twitter\"\n\tirc \"github.com\/fluffle\/goirc\/client\"\n\t\"log\"\n\t\"strings\"\n)\n\nvar (\n\tchannels = []string{\"#collinjester\"}\n)\n\n\/\/ when an error occurs, calling this method will send the error back to the irc channel\nfunc sendError(conn *irc.Conn, channel string, nick string, err error) {\n\tlog.Print(err)\n\tconn.Privmsg(channel, fmt.Sprintf(\"%s: oops: %v\", nick, err))\n}\n\n\/\/ When a message comes in on a channel gesture has joined, this method will be called.\nfunc messageReceived(conn *irc.Conn, line *irc.Line) {\n\tif len(line.Args) > 1 {\n\t\tchannel := line.Args[0]\n\t\tmessage := line.Args[1]\n\t\tmessageSliced := strings.Split(message, \" \")\n\t\tcommand := messageSliced[0]\n\t\tcommandArgs := messageSliced[1:]\n\n\t\tlog.Printf(\">> %s (%s): %s\\n\", line.Nick, channel, message)\n\n\t\tswitch {\n\t\tcase command == \"gis\":\n\t\t\tif len(commandArgs) > 0 {\n\t\t\t\tlink, err := gis.Search(strings.Join(commandArgs, \" \"))\n\t\t\t\tif err != nil {\n\t\t\t\t\tsendError(conn, channel, line.Nick, err)\n\t\t\t\t} else {\n\t\t\t\t\tconn.Privmsg(channel, fmt.Sprintf(\"%s: %s\", line.Nick, link))\n\t\t\t\t}\n\t\t\t}\n\t\tcase command == \"echo\":\n\t\t\tconn.Privmsg(channel, fmt.Sprintf(\"%s: %s\", line.Nick, rewrite.Rewrite(message)))\n\t\tcase command == \"describe\":\n\t\t\tif len(commandArgs) > 0 {\n\t\t\t\tdescribed, err := twitter.Describe(commandArgs[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\tsendError(conn, channel, line.Nick, err)\n\t\t\t\t} else {\n\t\t\t\t\tconn.Privmsg(channel, fmt.Sprintf(\"%s: %s\", line.Nick, rewrite.Rewrite(described)))\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\t\/\/ find any shortened links and output the expanded versions\n\t\t\tfor _, link := range rewrite.GetRewrittenLinks(message) {\n\t\t\t\tresponse := line.Nick + \": \" + link\n\t\t\t\tconn.Privmsg(channel, response)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tc := irc.SimpleClient(\"gesturebot\")\n\tc.SSL = true\n\tc.AddHandler(irc.CONNECTED,\n\t\tfunc(conn *irc.Conn, line *irc.Line) {\n\t\t\tfor _, channel := range channels {\n\t\t\t\tconn.Join(channel)\n\t\t\t}\n\t\t})\n\tquit := make(chan bool)\n\tc.AddHandler(irc.DISCONNECTED, func(conn *irc.Conn, line *irc.Line) { quit <- true })\n\tc.AddHandler(\"PRIVMSG\", func(conn *irc.Conn, line *irc.Line) {\n\t\tmessageReceived(conn, line)\n\t})\n\tif err := c.Connect(\"irc.freenode.net\"); err != nil {\n\t\tfmt.Printf(\"Connection error: %s\\n\", err)\n\t}\n\t\/\/ Wait for disconnect\n\t<-quit\n}\nMinor code rearrangingpackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"gesture\/gis\"\n\t\"gesture\/rewrite\"\n\t\"gesture\/twitter\"\n\tirc \"github.com\/fluffle\/goirc\/client\"\n\t\"log\"\n\t\"strings\"\n)\n\nvar (\n\tchannels = []string{\"#collinjester\"}\n)\n\nfunc main() {\n\tflag.Parse()\n\tc := irc.SimpleClient(\"gesturebot\")\n\tc.SSL = true\n\tc.AddHandler(irc.CONNECTED,\n\t\tfunc(conn *irc.Conn, line *irc.Line) {\n\t\t\tfor _, channel := range channels {\n\t\t\t\tconn.Join(channel)\n\t\t\t}\n\t\t})\n\tquit := make(chan bool)\n\tc.AddHandler(irc.DISCONNECTED, func(conn *irc.Conn, line *irc.Line) { quit <- true })\n\tc.AddHandler(\"PRIVMSG\", func(conn *irc.Conn, line *irc.Line) {\n\t\tmessageReceived(conn, line)\n\t})\n\tif err := c.Connect(\"irc.freenode.net\"); err != nil {\n\t\tfmt.Printf(\"Connection error: %s\\n\", err)\n\t}\n\t\/\/ Wait for disconnect\n\t<-quit\n}\n\n\/\/ When a message comes in on a channel gesture has joined, this method will be called.\nfunc messageReceived(conn *irc.Conn, line *irc.Line) {\n\tif len(line.Args) > 1 {\n\t\tchannel := line.Args[0]\n\t\tmessage := line.Args[1]\n\t\tmessageSliced := strings.Split(message, \" \")\n\t\tcommand := messageSliced[0]\n\t\tcommandArgs := messageSliced[1:]\n\n\t\tlog.Printf(\">> %s (%s): %s\\n\", line.Nick, channel, message)\n\n\t\tswitch {\n\t\tcase command == \"gis\":\n\t\t\tif len(commandArgs) > 0 {\n\t\t\t\tlink, err := gis.Search(strings.Join(commandArgs, \" \"))\n\t\t\t\tif err != nil {\n\t\t\t\t\tsendError(conn, channel, line.Nick, err)\n\t\t\t\t} else {\n\t\t\t\t\tconn.Privmsg(channel, fmt.Sprintf(\"%s: %s\", line.Nick, link))\n\t\t\t\t}\n\t\t\t}\n\t\tcase command == \"echo\":\n\t\t\tconn.Privmsg(channel, fmt.Sprintf(\"%s: %s\", line.Nick, rewrite.Rewrite(message)))\n\t\tcase command == \"describe\":\n\t\t\tif len(commandArgs) > 0 {\n\t\t\t\tdescribed, err := twitter.Describe(commandArgs[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\tsendError(conn, channel, line.Nick, err)\n\t\t\t\t} else {\n\t\t\t\t\tconn.Privmsg(channel, fmt.Sprintf(\"%s: %s\", line.Nick, rewrite.Rewrite(described)))\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\t\/\/ find any shortened links and output the expanded versions\n\t\t\tfor _, link := range rewrite.GetRewrittenLinks(message) {\n\t\t\t\tresponse := line.Nick + \": \" + link\n\t\t\t\tconn.Privmsg(channel, response)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ when an error occurs, calling this method will send the error back to the irc channel\nfunc sendError(conn *irc.Conn, channel string, nick string, err error) {\n\tlog.Print(err)\n\tconn.Privmsg(channel, fmt.Sprintf(\"%s: oops: %v\", nick, err))\n}\n\n<|endoftext|>"} {"text":"package main\n\nimport (\n \"net\/http\"\n)\n\nfunc main() {\n \/\/ your http.Handle calls here\n http.ListenAndServe(\"localhost:4000\", nil)\n}\nImplement `ServeHTTP`.package main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n)\n\ntype String string\n\ntype Struct struct {\n\tGreeting string\n\tPunct string\n\tWho string\n}\n\nfunc (h String) ServeHTTP(\n\tw http.ResponseWriter,\n\tr *http.Request) {\n\tfmt.Fprint(w, h)\n}\n\nfunc (h Struct) ServeHTTP(\n\tw http.ResponseWriter,\n\tr *http.Request) {\n\tfmt.Fprint(w, fmt.Sprintf(\"%s%s%s\", h.Greeting, h.Punct, h.Who))\n}\n\nfunc main() {\n\thttp.Handle(\"\/string\", String(\"I'm a frayed knot.\"))\n\thttp.Handle(\"\/struct\", &Struct{\"Hello\", \":\", \"Gophers!\"})\n\thttp.ListenAndServe(\"localhost:4000\", nil)\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n \"flag\"\n \"fmt\"\n \"os\"\n \"github.com\/PuerkitoBio\/goquery\"\n)\n\ntype RepoInf struct {\n RepositoryName string\n Description string\n}\n\nconst (\n TREND_MAX_NUM = 25\n)\n\nvar repoInf []RepoInf\nvar baseUrl string = \"https:\/\/github.com\/trending\"\n\n\nvar (\n lang = flag.String(\"l\", \"all\", \"Select language\")\n desc = flag.Bool(\"d\", false, \"Show description\")\n num = flag.Int(\"n\", 10, \"Limit numbers\")\n help = flag.Bool(\"h\", false, \"Show help message\")\n)\n\nfunc main() {\n flag.Usage = func() {\n fmt.Fprint(os.Stderr, `\nusage: ghtrend [options] \n\noptional arguments:\n -l Select language.\n -d Show description.\n -n Limit numbers.\n -h Show help message.\n`)\n }\n flag.Parse()\n\n if *help {\n flag.Usage()\n os.Exit(0)\n }\n n := getNum(*num)\n url := getUrl(*lang)\n\n repoInf = getMemory(n)\n\n getPage(url, n)\n\n showResult()\n}\n\nfunc getUrl(lang string) string {\n if lang == \"\" {\n return baseUrl\n } else {\n return baseUrl + \"?l=\" + lang\n }\n}\n\nfunc getNum(num int) int {\n if num > TREND_MAX_NUM {\n num = TREND_MAX_NUM\n }\n return num\n}\n\nfunc getMemory(num int) []RepoInf {\n return make([]RepoInf, num)\n}\n\nfunc getPage(url string, num int) {\n doc, _ := goquery.NewDocument(url)\n doc.Find(\".leaderboard-list-content\").Each(func(i int, s *goquery.Selection) {\n \/\/ fmt.Println(s.Find(\".owner-name\").Text())\n \/\/ fmt.Println(s.Find(\"span[class='owner-name']\").Text())\n \/\/ fmt.Println(s.Find(\"strong\").Text())\n if i < num {\n \/\/ fmt.Println(s.Find(\"a[class='repository-name']\").Text())\n repoInf[i].RepositoryName = s.Find(\"a[class='repository-name']\").Text()\n repoInf[i].Description = s.Find(\"p[class='repo-leaderboard-description']\").Text()\n }\n\n })\n}\n\nfunc showResult() {\n fmt.Println(\"Trending \" + *lang + \" repositories on GitHub today\")\n line := \"\"\n for i := 0; i < 56; i++ {\n line += \"-\"\n }\n fmt.Println(line)\n\n spaces := \"\"\n for i, rp := range repoInf {\n fmt.Println(fmt.Sprint(i + 1) + \": \" + rp.RepositoryName)\n\n if (i + 1) >= 10 {\n spaces = \" \"\n } else {\n spaces = \" \"\n }\n if *desc {\n fmt.Println(spaces + rp.Description)\n }\n }\n}\nAdded comannd option -b and -vpackage main\n\nimport (\n \"flag\"\n \"fmt\"\n \"os\"\n \"github.com\/PuerkitoBio\/goquery\"\n \"os\/exec\"\n)\n\ntype RepoInf struct {\n RepositoryName string\n Description string\n RepoUrl string\n}\n\nconst (\n TREND_MAX_NUM = 25\n VERSION = \"0.0.1\"\n)\n\nvar repoInf []RepoInf\nvar baseUrl string = \"https:\/\/github.com\/trending\"\n\n\nvar (\n lang = flag.String(\"l\", \"all\", \"Select language\")\n desc = flag.Bool(\"d\", false, \"Show description\")\n num = flag.Int(\"n\", 10, \"Limit numbers\")\n brows = flag.Int(\"b\", 0, \"Show repository on browser\")\n help = flag.Bool(\"h\", false, \"Show help message\")\n version = flag.Bool(\"v\", false, \"Show version\")\n)\n\nfunc main() {\n flag.Usage = func() {\n fmt.Fprint(os.Stderr, `\nusage: ghtrend [options] \n\noptional arguments:\n -l Select language.\n -d Show description.\n -n Limit numbers.\n -b Show repository on browser.\n -h Show help message.\n -v Show version.\n`)\n }\n flag.Parse()\n\n if *version {\n showVersion()\n os.Exit(0)\n }\n if *help {\n flag.Usage()\n os.Exit(0)\n }\n \n n := getNum(*num)\n url := getGithubUrl(*lang)\n\n repoInf = getMemory(n)\n\n getPage(url, n)\n\n if *brows > 0 && *brows <= 25 {\n browsUrl := getBrowsUrl(*brows)\n openBrowser(browsUrl)\n os.Exit(0)\n }\n showResult()\n}\n\nfunc getGithubUrl(lang string) string {\n if lang == \"\" {\n return baseUrl\n } else {\n return baseUrl + \"?l=\" + lang\n }\n}\n\nfunc getNum(num int) int {\n if num > TREND_MAX_NUM {\n num = TREND_MAX_NUM\n }\n return num\n}\n\nfunc getMemory(num int) []RepoInf {\n return make([]RepoInf, num)\n}\n\nfunc getPage(url string, num int) {\n doc, _ := goquery.NewDocument(url)\n doc.Find(\".leaderboard-list-content\").Each(func(i int, s *goquery.Selection) {\n if i < num {\n repoInf[i].RepositoryName = s.Find(\"a[class='repository-name']\").Text()\n repoInf[i].Description = s.Find(\"p[class='repo-leaderboard-description']\").Text()\n repoInf[i].RepoUrl = s.Find(\"a[class='repository-name']\").Text()\n }\n\n })\n}\n\nfunc showResult() {\n fmt.Println(\"Trending \" + *lang + \" repositories on GitHub today\")\n line := \"\"\n for i := 0; i < 56; i++ {\n line += \"-\"\n }\n fmt.Println(line)\n\n spaces := \"\"\n for i, rp := range repoInf {\n fmt.Println(fmt.Sprint(i + 1) + \": \" + rp.RepositoryName)\n\n if (i + 1) >= 10 {\n spaces = \" \"\n } else {\n spaces = \" \"\n }\n if *desc {\n fmt.Println(spaces + rp.Description)\n }\n }\n}\n\nfunc getBrowsUrl(idx int) string {\n return \"https:\/\/github.com\/\" + repoInf[idx - 1].RepoUrl\n}\n\nfunc openBrowser(url string) {\n exec.Command(\"open\", url).Run()\n}\n\nfunc showVersion() {\n fmt.Printf(\"ghtrend Ver %s\\n\", VERSION)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package git provides types and utilities for dealing with Git repositories.\n\/\/ It's very limited, and provide some access to git config file, being focused\n\/\/ on tsuru needs.\npackage git\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n)\n\n\/\/ DiscoverRepositoryPath finds the path of the repository from a given\n\/\/ directory. It returns the path to the repository, or an an empty string and\n\/\/ a non-nil error if it can't find the repository.\nfunc DiscoverRepositoryPath(dir string) (string, error) {\n\t_, err := os.Stat(dir)\n\tif os.IsNotExist(err) {\n\t\treturn \"\", errors.New(\"Repository not found.\")\n\t}\n\tdir = path.Join(dir, \".git\")\n\tfor dir != \"\/.git\" {\n\t\tfi, err := os.Stat(dir)\n\t\tif err == nil && fi.IsDir() {\n\t\t\treturn dir, nil\n\t\t}\n\t\tdir = path.Join(dir, \"..\", \"..\", \".git\")\n\t}\n\treturn \"\", errors.New(\"Repository not found.\")\n}\n\n\/\/ Repository represents a git repository.\ntype Repository struct {\n\tpath string\n}\n\n\/\/ OpenRepository opens a repository by its path. You can use\n\/\/ DiscoverRepositoryPath to discover the repository from any directory, and\n\/\/ use the result of this call as parameter for OpenRepository.\n\/\/\n\/\/ OpenRepository will return an error if the given path does not appear to be\n\/\/ a git repository.\nfunc OpenRepository(p string) (*Repository, error) {\n\tif !strings.HasSuffix(p, \".git\") && !strings.HasSuffix(p, \".git\/\") {\n\t\tp = path.Join(p, \".git\")\n\t}\n\tp = strings.TrimRight(p, \"\/\")\n\tfi, err := os.Stat(path.Join(p, \"config\"))\n\tif err == nil && !fi.IsDir() {\n\t\treturn &Repository{path: p}, nil\n\t}\n\treturn nil, errors.New(\"Repository not found.\")\n}\n\n\/\/ RemoteURL returns the URL of a remote by its name. Or an error, if the\n\/\/ remote is not declared.\nfunc (r *Repository) RemoteURL(name string) (string, error) {\n\tconfig, err := os.Open(path.Join(r.path, \"config\"))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer config.Close()\n\tline := fmt.Sprintf(\"[remote %q]\", name)\n\tscanner := bufio.NewScanner(config)\n\tscanner.Split(bufio.ScanLines)\n\tfor scanner.Scan() {\n\t\tif scanner.Text() == line {\n\t\t\tscanner.Scan()\n\t\t\treturn strings.Split(scanner.Text(), \" = \")[1], nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"Remote %q not found.\", name)\n}\ngit: use filepath instead of path for joining paths\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package git provides types and utilities for dealing with Git repositories.\n\/\/ It's very limited, and provide some access to git config file, being focused\n\/\/ on tsuru needs.\npackage git\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\n\/\/ DiscoverRepositoryPath finds the path of the repository from a given\n\/\/ directory. It returns the path to the repository, or an an empty string and\n\/\/ a non-nil error if it can't find the repository.\nfunc DiscoverRepositoryPath(dir string) (string, error) {\n\t_, err := os.Stat(dir)\n\tif os.IsNotExist(err) {\n\t\treturn \"\", errors.New(\"Repository not found.\")\n\t}\n\tdir = filepath.Join(dir, \".git\")\n\tfor dir != \"\/.git\" {\n\t\tfi, err := os.Stat(dir)\n\t\tif err == nil && fi.IsDir() {\n\t\t\treturn dir, nil\n\t\t}\n\t\tdir = filepath.Join(dir, \"..\", \"..\", \".git\")\n\t}\n\treturn \"\", errors.New(\"Repository not found.\")\n}\n\n\/\/ Repository represents a git repository.\ntype Repository struct {\n\tpath string\n}\n\n\/\/ OpenRepository opens a repository by its filepath. You can use\n\/\/ DiscoverRepositoryPath to discover the repository from any directory, and\n\/\/ use the result of this call as parameter for OpenRepository.\n\/\/\n\/\/ OpenRepository will return an error if the given path does not appear to be\n\/\/ a git repository.\nfunc OpenRepository(p string) (*Repository, error) {\n\tif !strings.HasSuffix(p, \".git\") && !strings.HasSuffix(p, \".git\/\") {\n\t\tp = filepath.Join(p, \".git\")\n\t}\n\tp = strings.TrimRight(p, \"\/\")\n\tfi, err := os.Stat(filepath.Join(p, \"config\"))\n\tif err == nil && !fi.IsDir() {\n\t\treturn &Repository{path: p}, nil\n\t}\n\treturn nil, errors.New(\"Repository not found.\")\n}\n\n\/\/ RemoteURL returns the URL of a remote by its name. Or an error, if the\n\/\/ remote is not declared.\nfunc (r *Repository) RemoteURL(name string) (string, error) {\n\tconfig, err := os.Open(filepath.Join(r.path, \"config\"))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer config.Close()\n\tline := fmt.Sprintf(\"[remote %q]\", name)\n\tscanner := bufio.NewScanner(config)\n\tscanner.Split(bufio.ScanLines)\n\tfor scanner.Scan() {\n\t\tif scanner.Text() == line {\n\t\t\tscanner.Scan()\n\t\t\treturn strings.Split(scanner.Text(), \" = \")[1], nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"Remote %q not found.\", name)\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/cdale77\/gitmine\/Godeps\/_workspace\/src\/github.com\/melvinmt\/firebase\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Event struct {\n\tType string\n\tCreated_at string\n\tActor EventActor\n\tPayload EventPayload\n}\n\ntype EventActor struct {\n\tLogin string\n\tAvatar_url string\n}\n\ntype EventPayload struct {\n\tSize int\n\tCommits []CommitCommit\n}\n\ntype StoredCommit struct {\n\tDate string\n\tLogin string\n\tAvatar string\n\tMessage string\n\tUrl string\n}\n\ntype CommitCommit struct {\n\tMessage string\n\tUrl string\n}\n\nfunc main() {\n\tfullDate := time.Now().AddDate(0, 0, -1).Format(\"2006-01-02\")\n\tgetData(fullDate)\n}\n\nfunc storeCommit(event Event, commitMessage string, commitUrl string) bool {\n\tfmt.Println(\"storing event:\")\n\tfmt.Println(event)\n\tauthToken := os.Getenv(\"FIREBASE_SECRET\")\n\n\turl := os.Getenv(\"FIREBASE_URL\")\n\n\tfireBase := firebase.NewReference(url).Auth(authToken)\n\n\tvar storedCommit StoredCommit\n\tstoredCommit.Date = event.Created_at\n\tstoredCommit.Login = event.Actor.Login\n\tstoredCommit.Avatar = event.Actor.Avatar_url\n\tstoredCommit.Message = commitMessage\n\tstoredCommit.Url = commitUrl\n\n\terr := fireBase.Push(storedCommit)\n\tif err != nil {\n\t\tfmt.Println(\"Firebase error\")\n\t\tfmt.Println(err)\n\t\treturn false\n\t} else {\n\t\tfmt.Println(\"Firebase success\")\n\t\treturn true\n\t}\n}\n\n\/\/ There must be a better way to do this. Probably sort cussWords alpha\n\/\/ and use a lookup table.\nfunc isDirty(message string) bool {\n\n\tresult := false\n\n\tcussWords := []string{\n\t\t\"fuck\",\n\t\t\"bitch\",\n\t\t\"stupid\",\n\t\t\"tits\",\n\t\t\"asshole\",\n\t\t\"cocksucker\",\n\t\t\"cunt\",\n\t\t\"hell\",\n\t\t\"douche\",\n\t\t\"testicle\",\n\t\t\"twat\",\n\t\t\"bastard\",\n\t\t\"sperm\",\n\t\t\"shit\",\n\t\t\"dildo\",\n\t\t\"wanker\",\n\t\t\"prick\",\n\t\t\"penis\",\n\t\t\"vagina\",\n\t\t\"whore\"}\n\n\tmessageWords := strings.Split(message, \" \")\n\n\tfor _, cussWord := range cussWords {\n\t\tfor _, word := range messageWords {\n\t\t\tif word == cussWord {\n\t\t\t\tresult = true\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\n\nfunc parseEvent(line string) {\n\tvar event Event\n\n\tjsonErr := json.Unmarshal([]byte(line), &event)\n\tif jsonErr != nil {\n\t\tfmt.Println(\"Could not parse json.\")\n\t\tfmt.Println(jsonErr)\n\t}\n\n\tif event.Type == \"PushEvent\" && event.Payload.Size > 0 {\n\n\t\t\/\/ An event can have multiple commits.\n\t\tcommits := event.Payload.Commits\n\t\tfor _, commit := range commits {\n\t\t\tif isDirty(commit.Message) {\n\t\t\t\tfmt.Println(commit.Message)\n\t\t\t\tstoreCommit(event, commit.Message, commit.Url)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc parseFile(fName string) {\n\t\/\/ https:\/\/groups.google.com\/forum\/#!topic\/golang-nuts\/GjIkryuCyAY\n\t\/\/ TODO: standardize use of file api\n\t\/\/ https:\/\/stackoverflow.com\/questions\/1821811\/how-to-read-write-from-to-file\n\tfileOS, err := os.Open(fName)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Can't open %s: error: %s\\n\", fName, err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/https:\/\/stackoverflow.com\/questions\/1821811\/how-to-read-write-from-to-file\n\t\/\/ close fi on exit and check for its returned error\n\tdefer func() {\n\t\tif err := fileOS.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\tfileGzip, err := gzip.NewReader(fileOS)\n\tif err != nil {\n\t\tfmt.Printf(\"The file %v is not in gzip format.\\n\", fName)\n\t\tos.Exit(1)\n\t}\n\n\tfileRead := bufio.NewReader(fileGzip)\n\ti := 0\n\tfor {\n\t\tline, err := fileRead.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error reading file.\")\n\t\t\tfmt.Println(err)\n\t\t\tbreak\n\t\t}\n\n\t\tparseEvent(line)\n\n\t\ti++\n\t}\n\n\tos.Remove(fName)\n}\n\nfunc getData(fullDate string) {\n\n\turls := makeUrlArray(fullDate)\n\n\tfor i, value := range urls {\n\n\t\tfmt.Println(\"fetching url\", value)\n\n\t\tresp, archiveErr := http.Get(value)\n\n\t\tif resp != nil {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\n\t\tif archiveErr != nil {\n\t\t\thandleError(\"Error getting github archive\", archiveErr)\n\t\t}\n\n\t\tcontents, readErr := ioutil.ReadAll(resp.Body)\n\n\t\tif readErr != nil {\n\t\t\thandleError(\"Error converting response\", readErr)\n\t\t}\n\n\t\tfname := makeFileName(fullDate, i)\n\n\t\tfileErr := ioutil.WriteFile(fname, contents, 0644)\n\n\t\tif fileErr != nil {\n\t\t\thandleError(\"Error writing response to file\", fileErr)\n\t\t}\n\n\t\tparseFile(fname)\n\n\t}\n}\n\nfunc makeUrlArray(fullDate string) [24]string {\n\n\tbaseUrl := makeUrlBase(fullDate)\n\turlEnd := \".json.gz\"\n\n\tvar urls [24]string\n\n\tfor i := 0; i < 24; i++ {\n\n\t\tvar buffer bytes.Buffer\n\t\tbuffer.WriteString(baseUrl)\n\t\tbuffer.WriteString(\"-\")\n\t\tbuffer.WriteString(strconv.Itoa(i))\n\t\tbuffer.WriteString(urlEnd)\n\t\turl := buffer.String()\n\n\t\turls[i] = url\n\t}\n\n\treturn urls\n}\n\nfunc makeUrlBase(fullDate string) string {\n\tsplit := strings.Split(fullDate, \"-\")\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"http:\/\/data.githubarchive.org\/\")\n\tbuffer.WriteString(split[0]) \/\/year\n\tbuffer.WriteString(\"-\")\n\tbuffer.WriteString(split[1]) \/\/month\n\tbuffer.WriteString(\"-\")\n\tbuffer.WriteString(split[2]) \/\/day\n\n\treturn buffer.String()\n}\n\nfunc makeFileName(fullDate string, i int) string {\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"data-\")\n\tbuffer.WriteString(fullDate)\n\tbuffer.WriteString(\"-\")\n\tbuffer.WriteString(strconv.Itoa(i))\n\tbuffer.WriteString(\".gz\")\n\n\treturn buffer.String()\n\n}\n\nfunc handleError(message string, err error) {\n\tfmt.Println(message, err)\n\tos.Exit(1)\n}\ndeduce html urlpackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/cdale77\/gitmine\/Godeps\/_workspace\/src\/github.com\/melvinmt\/firebase\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Event struct {\n\tType string\n\tCreated_at string\n\tActor EventActor\n\tPayload EventPayload\n}\n\ntype EventActor struct {\n\tLogin string\n\tAvatar_url string\n}\n\ntype EventPayload struct {\n\tSize int\n\tCommits []CommitCommit\n}\n\ntype StoredCommit struct {\n\tDate string\n\tLogin string\n\tAvatar string\n\tMessage string\n\tUrl string\n}\n\ntype CommitCommit struct {\n\tMessage string\n\tUrl string\n}\n\nfunc main() {\n\tfullDate := time.Now().AddDate(0, 0, -1).Format(\"2006-01-02\")\n\tgetData(fullDate)\n}\n\nfunc storeCommit(event Event, commitMessage string, commitUrl string) bool {\n\tfmt.Println(\"storing event:\")\n\tfmt.Println(event)\n\tauthToken := os.Getenv(\"FIREBASE_SECRET\")\n\n\turl := os.Getenv(\"FIREBASE_URL\")\n\n\tfireBase := firebase.NewReference(url).Auth(authToken)\n\n\tvar storedCommit StoredCommit\n\tstoredCommit.Date = event.Created_at\n\tstoredCommit.Login = event.Actor.Login\n\tstoredCommit.Avatar = event.Actor.Avatar_url\n\tstoredCommit.Message = commitMessage\n\tstoredCommit.Url = commitUrl\n\n\terr := fireBase.Push(storedCommit)\n\tif err != nil {\n\t\tfmt.Println(\"Firebase error\")\n\t\tfmt.Println(err)\n\t\treturn false\n\t} else {\n\t\tfmt.Println(\"Firebase success\")\n\t\treturn true\n\t}\n}\n\n\/\/ There must be a better way to do this. Probably sort cussWords alpha\n\/\/ and use a lookup table.\nfunc isDirty(message string) bool {\n\n\tresult := false\n\n\tcussWords := []string{\n\t\t\"fuck\",\n\t\t\"bitch\",\n\t\t\"stupid\",\n\t\t\"tits\",\n\t\t\"asshole\",\n\t\t\"cocksucker\",\n\t\t\"cunt\",\n\t\t\"hell\",\n\t\t\"douche\",\n\t\t\"testicle\",\n\t\t\"twat\",\n\t\t\"bastard\",\n\t\t\"sperm\",\n\t\t\"shit\",\n\t\t\"dildo\",\n\t\t\"wanker\",\n\t\t\"prick\",\n\t\t\"penis\",\n\t\t\"vagina\",\n\t\t\"whore\"}\n\n\tmessageWords := strings.Split(message, \" \")\n\n\tfor _, cussWord := range cussWords {\n\t\tfor _, word := range messageWords {\n\t\t\tif word == cussWord {\n\t\t\t\tresult = true\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\n\nfunc parseEvent(line string) {\n\tvar event Event\n\n\tjsonErr := json.Unmarshal([]byte(line), &event)\n\tif jsonErr != nil {\n\t\tfmt.Println(\"Could not parse json.\")\n\t\tfmt.Println(jsonErr)\n\t}\n\n\tif event.Type == \"PushEvent\" && event.Payload.Size > 0 {\n\n\t\t\/\/ An event can have multiple commits.\n\t\tcommits := event.Payload.Commits\n\t\tfor _, commit := range commits {\n\t\t\tif isDirty(commit.Message) {\n\t\t\t\tfmt.Println(commit.Message)\n\t\t\t\thtmlUrl := makeHtmlUrl(commit.Url)\n\t\t\t\tstoreCommit(event, commit.Message, htmlUrl)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc parseFile(fName string) {\n\t\/\/ https:\/\/groups.google.com\/forum\/#!topic\/golang-nuts\/GjIkryuCyAY\n\t\/\/ TODO: standardize use of file api\n\t\/\/ https:\/\/stackoverflow.com\/questions\/1821811\/how-to-read-write-from-to-file\n\tfileOS, err := os.Open(fName)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Can't open %s: error: %s\\n\", fName, err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/https:\/\/stackoverflow.com\/questions\/1821811\/how-to-read-write-from-to-file\n\t\/\/ close fi on exit and check for its returned error\n\tdefer func() {\n\t\tif err := fileOS.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\tfileGzip, err := gzip.NewReader(fileOS)\n\tif err != nil {\n\t\tfmt.Printf(\"The file %v is not in gzip format.\\n\", fName)\n\t\tos.Exit(1)\n\t}\n\n\tfileRead := bufio.NewReader(fileGzip)\n\ti := 0\n\tfor {\n\t\tline, err := fileRead.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error reading file.\")\n\t\t\tfmt.Println(err)\n\t\t\tbreak\n\t\t}\n\n\t\tparseEvent(line)\n\n\t\ti++\n\t}\n\n\tos.Remove(fName)\n}\n\nfunc getData(fullDate string) {\n\n\turls := makeUrlArray(fullDate)\n\n\tfor i, value := range urls {\n\n\t\tfmt.Println(\"fetching url\", value)\n\n\t\tresp, archiveErr := http.Get(value)\n\n\t\tif resp != nil {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\n\t\tif archiveErr != nil {\n\t\t\thandleError(\"Error getting github archive\", archiveErr)\n\t\t}\n\n\t\tcontents, readErr := ioutil.ReadAll(resp.Body)\n\n\t\tif readErr != nil {\n\t\t\thandleError(\"Error converting response\", readErr)\n\t\t}\n\n\t\tfname := makeFileName(fullDate, i)\n\n\t\tfileErr := ioutil.WriteFile(fname, contents, 0644)\n\n\t\tif fileErr != nil {\n\t\t\thandleError(\"Error writing response to file\", fileErr)\n\t\t}\n\n\t\tparseFile(fname)\n\n\t}\n}\n\nfunc makeUrlArray(fullDate string) [24]string {\n\n\tbaseUrl := makeUrlBase(fullDate)\n\turlEnd := \".json.gz\"\n\n\tvar urls [24]string\n\n\tfor i := 0; i < 24; i++ {\n\n\t\tvar buffer bytes.Buffer\n\t\tbuffer.WriteString(baseUrl)\n\t\tbuffer.WriteString(\"-\")\n\t\tbuffer.WriteString(strconv.Itoa(i))\n\t\tbuffer.WriteString(urlEnd)\n\t\turl := buffer.String()\n\n\t\turls[i] = url\n\t}\n\n\treturn urls\n}\n\nfunc makeUrlBase(fullDate string) string {\n\tsplit := strings.Split(fullDate, \"-\")\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"http:\/\/data.githubarchive.org\/\")\n\tbuffer.WriteString(split[0]) \/\/year\n\tbuffer.WriteString(\"-\")\n\tbuffer.WriteString(split[1]) \/\/month\n\tbuffer.WriteString(\"-\")\n\tbuffer.WriteString(split[2]) \/\/day\n\n\treturn buffer.String()\n}\n\nfunc makeFileName(fullDate string, i int) string {\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"data-\")\n\tbuffer.WriteString(fullDate)\n\tbuffer.WriteString(\"-\")\n\tbuffer.WriteString(strconv.Itoa(i))\n\tbuffer.WriteString(\".gz\")\n\n\treturn buffer.String()\n\n}\n\n\/\/ The data does not contain an url to make a proper html page. But we can\n\/\/ deduce it from the supplied api url (which makes json)\nfunc makeHtmlUrl(apiUrl string) string {\n\tnewUrl1 := strings.Replace(apiUrl, \"api.\", \"\", 1)\n\tnewUrl2 := strings.Replace(newUrl1, \"repos\/\", \"\", 1)\n\tnewUrl3 := strings.Replace(newUrl2, \"commits\", \"commit\", 1)\n\treturn newUrl3\n}\n\nfunc handleError(message string, err error) {\n\tfmt.Println(message, err)\n\tos.Exit(1)\n}\n<|endoftext|>"} {"text":"package glasses\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/oauth2\/google\"\n)\n\nconst (\n\tCLOUD_VISION_ENDPOINT = \"https:\/\/vision.googleapis.com\/v1alpha1\/images:annotate\"\n)\n\ntype Glasses struct {\n\tClient *http.Client\n}\n\ntype CloudVisionRequest struct {\n\tRequests []*AnnotateRequest `json:\"requests\"`\n\tUser string `json:\"user\"`\n}\n\ntype AnnotateRequest struct {\n\tImage *Image `json:\"image\"`\n\tFeatures []Feature `json:\"features\"`\n\tImageContext *ImageContext `json:\"imageContext,omitempty\"`\n}\n\ntype Feature struct {\n\tType string `json:\"type\"`\n\tMaxResults int `json:\"maxResults\"`\n}\n\ntype ImageContext struct {\n\tLatLongRect interface{} `json:\"latLongRect\"`\n\tImageContextSearchExtension interface{} `imageContextSearchExtension`\n}\n\n\/\/type AnnotateResponse struct {\n\/\/FaceAnnotations []FaceAnnotation `json:\"faceAnnotations\"`\n\/\/LandmarkAnnotations []LandmarkAnnotation `json:\"landmarkAnnotation\"`\n\/\/LogoAnnotations []LogoAnnotation `json:\"logoAnnotations\"`\n\/\/LabelAnnotations []LabelAnnotation `json:\"labelAnnotations\"`\n\/\/TextAnnotations []TextAnnotation `json:\"textAnnotations\"`\n\/\/SafeSearchAnnotation SafeSearchAnnotation `json:\"safeSearchAnnotation\"`\n\/\/SuggestAnnotations []SuggestAnnotation `json:\"suggestAnnotations\"`\n\/\/QueryAnnotation QueryAnnotation `json:\"queryAnnotation\"`\n\/\/Error Status `json:\"error\"`\n\/\/}\n\ntype AnnotateResponses struct {\n\tResponses []AnnotateResponse `json:\"responses\"`\n}\n\ntype AnnotateResponse struct {\n\tFaceAnnotations []interface{} `json:\"faceAnnotations\"`\n\tLandmarkAnnotations []interface{} `json:\"landmarkAnnotation\"`\n\tLogoAnnotations []interface{} `json:\"logoAnnotations\"`\n\tLabelAnnotations []interface{} `json:\"labelAnnotations\"`\n\tTextAnnotations []interface{} `json:\"textAnnotations\"`\n\tSafeSearchAnnotation interface{} `json:\"safeSearchAnnotation\"`\n\tSuggestAnnotations []interface{} `json:\"suggestAnnotations\"`\n\tQueryAnnotation interface{} `json:\"queryAnnotation\"`\n\tError interface{} `json:\"error\"`\n}\n\nfunc NewGlasses() (*Glasses, error) {\n\tclient, err := google.DefaultClient(oauth2.NoContext, \"https:\/\/www.googleapis.com\/auth\/cloud-platform\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Glasses{client}, nil\n}\n\nfunc (g *Glasses) Do(r *CloudVisionRequest) (*AnnotateResponses, error) {\n\n\tjR, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trawResp, err := g.Client.Post(CLOUD_VISION_ENDPOINT, \"application\/json\", bytes.NewBuffer(jR))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody, err := ioutil.ReadAll(rawResp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/log.Println(\"Body:\", string(body))\n\n\tvar resp *AnnotateResponses\n\n\terr = json.Unmarshal(body, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}\nfix struct tagpackage glasses\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/oauth2\/google\"\n)\n\nconst (\n\tCLOUD_VISION_ENDPOINT = \"https:\/\/vision.googleapis.com\/v1alpha1\/images:annotate\"\n)\n\ntype Glasses struct {\n\tClient *http.Client\n}\n\ntype CloudVisionRequest struct {\n\tRequests []*AnnotateRequest `json:\"requests\"`\n\tUser string `json:\"user\"`\n}\n\ntype AnnotateRequest struct {\n\tImage *Image `json:\"image\"`\n\tFeatures []Feature `json:\"features\"`\n\tImageContext *ImageContext `json:\"imageContext,omitempty\"`\n}\n\ntype Feature struct {\n\tType string `json:\"type\"`\n\tMaxResults int `json:\"maxResults\"`\n}\n\ntype ImageContext struct {\n\tLatLongRect interface{} `json:\"latLongRect\"`\n\tImageContextSearchExtension interface{} `json:\"imageContextSearchExtension\"`\n}\n\n\/\/type AnnotateResponse struct {\n\/\/FaceAnnotations []FaceAnnotation `json:\"faceAnnotations\"`\n\/\/LandmarkAnnotations []LandmarkAnnotation `json:\"landmarkAnnotation\"`\n\/\/LogoAnnotations []LogoAnnotation `json:\"logoAnnotations\"`\n\/\/LabelAnnotations []LabelAnnotation `json:\"labelAnnotations\"`\n\/\/TextAnnotations []TextAnnotation `json:\"textAnnotations\"`\n\/\/SafeSearchAnnotation SafeSearchAnnotation `json:\"safeSearchAnnotation\"`\n\/\/SuggestAnnotations []SuggestAnnotation `json:\"suggestAnnotations\"`\n\/\/QueryAnnotation QueryAnnotation `json:\"queryAnnotation\"`\n\/\/Error Status `json:\"error\"`\n\/\/}\n\ntype AnnotateResponses struct {\n\tResponses []AnnotateResponse `json:\"responses\"`\n}\n\ntype AnnotateResponse struct {\n\tFaceAnnotations []interface{} `json:\"faceAnnotations\"`\n\tLandmarkAnnotations []interface{} `json:\"landmarkAnnotation\"`\n\tLogoAnnotations []interface{} `json:\"logoAnnotations\"`\n\tLabelAnnotations []interface{} `json:\"labelAnnotations\"`\n\tTextAnnotations []interface{} `json:\"textAnnotations\"`\n\tSafeSearchAnnotation interface{} `json:\"safeSearchAnnotation\"`\n\tSuggestAnnotations []interface{} `json:\"suggestAnnotations\"`\n\tQueryAnnotation interface{} `json:\"queryAnnotation\"`\n\tError interface{} `json:\"error\"`\n}\n\nfunc NewGlasses() (*Glasses, error) {\n\tclient, err := google.DefaultClient(oauth2.NoContext, \"https:\/\/www.googleapis.com\/auth\/cloud-platform\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Glasses{client}, nil\n}\n\nfunc (g *Glasses) Do(r *CloudVisionRequest) (*AnnotateResponses, error) {\n\n\tjR, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trawResp, err := g.Client.Post(CLOUD_VISION_ENDPOINT, \"application\/json\", bytes.NewBuffer(jR))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody, err := ioutil.ReadAll(rawResp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/log.Println(\"Body:\", string(body))\n\n\tvar resp *AnnotateResponses\n\n\terr = json.Unmarshal(body, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2020 Buf Technologies Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage buf\n\nimport (\n\t\"context\"\n\n\t\"github.com\/bufbuild\/buf\/internal\/pkg\/app\/appcmd\"\n\t\"github.com\/bufbuild\/buf\/internal\/pkg\/app\/appflag\"\n)\n\nconst version = \"0.16.0-dev\"\n\n\/\/ Main is the main.\nfunc Main(use string, options ...RootCommandOption) {\n\tappcmd.Main(context.Background(), newRootCommand(use, options...), version)\n}\n\n\/\/ RootCommandOption is an option for a root Command.\ntype RootCommandOption func(*appcmd.Command, appflag.Builder)\nUpdate to v0.16.0\/\/ Copyright 2020 Buf Technologies Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage buf\n\nimport (\n\t\"context\"\n\n\t\"github.com\/bufbuild\/buf\/internal\/pkg\/app\/appcmd\"\n\t\"github.com\/bufbuild\/buf\/internal\/pkg\/app\/appflag\"\n)\n\nconst version = \"0.16.0\"\n\n\/\/ Main is the main.\nfunc Main(use string, options ...RootCommandOption) {\n\tappcmd.Main(context.Background(), newRootCommand(use, options...), version)\n}\n\n\/\/ RootCommandOption is an option for a root Command.\ntype RootCommandOption func(*appcmd.Command, appflag.Builder)\n<|endoftext|>"} {"text":"package console\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/cenkalti\/rain\/internal\/jsonutil\"\n\t\"github.com\/cenkalti\/rain\/rainrpc\"\n\t\"github.com\/cenkalti\/rain\/torrent\"\n\t\"github.com\/jroimartin\/gocui\"\n)\n\nconst (\n\t\/\/ tabs\n\tgeneral int = iota\n\ttrackers\n\tpeers\n)\n\ntype Console struct {\n\tclient *rainrpc.Client\n\ttorrents []rainrpc.Torrent\n\terrTorrents error\n\tselectedID uint64\n\tselectedTab int\n\tstats torrent.Stats\n\ttrackers []torrent.Tracker\n\tpeers []torrent.Peer\n\terrDetails error\n\tm sync.Mutex\n\tupdateTorrentsC chan struct{}\n\tupdateDetailsC chan struct{}\n}\n\nfunc New(clt *rainrpc.Client) *Console {\n\treturn &Console{\n\t\tclient: clt,\n\t\tupdateTorrentsC: make(chan struct{}),\n\t\tupdateDetailsC: make(chan struct{}),\n\t}\n}\n\nfunc (c *Console) Run() error {\n\tg, err := gocui.NewGui(gocui.OutputNormal)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer g.Close()\n\n\tg.SetManagerFunc(c.layout)\n\n\tg.SetKeybinding(\"\", gocui.KeyCtrlC, gocui.ModNone, quit)\n\tg.SetKeybinding(\"\", 'q', gocui.ModNone, quit)\n\tg.SetKeybinding(\"torrents\", 'j', gocui.ModNone, c.cursorDown)\n\tg.SetKeybinding(\"torrents\", 'k', gocui.ModNone, c.cursorUp)\n\tg.SetKeybinding(\"torrents\", 'R', gocui.ModNone, c.removeTorrent)\n\tg.SetKeybinding(\"torrents\", 's', gocui.ModNone, c.startTorrent)\n\tg.SetKeybinding(\"torrents\", 'S', gocui.ModNone, c.stopTorrent)\n\tg.SetKeybinding(\"torrents\", gocui.KeyCtrlG, gocui.ModNone, c.switchGeneral)\n\tg.SetKeybinding(\"torrents\", gocui.KeyCtrlT, gocui.ModNone, c.switchTrackers)\n\tg.SetKeybinding(\"torrents\", gocui.KeyCtrlP, gocui.ModNone, c.switchPeers)\n\n\tgo c.updateLoop(g)\n\n\terr = g.MainLoop()\n\tif err == gocui.ErrQuit {\n\t\terr = nil\n\t}\n\treturn err\n}\n\nfunc (c *Console) layout(g *gocui.Gui) error {\n\terr := c.drawTorrents(g)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = c.drawDetails(g)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = g.SetCurrentView(\"torrents\")\n\treturn err\n}\n\nfunc (c *Console) drawTorrents(g *gocui.Gui) error {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\n\tmaxX, maxY := g.Size()\n\thalfY := maxY \/ 2\n\tif v, err := g.SetView(\"torrents\", -1, -1, maxX, halfY); err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\t\tv.Highlight = true\n\t\tv.SelBgColor = gocui.ColorGreen\n\t\tv.SelFgColor = gocui.ColorBlack\n\t\tfmt.Fprintln(v, \"loading torrents...\")\n\t} else {\n\t\tv.Clear()\n\t\tif c.errTorrents != nil {\n\t\t\tfmt.Fprintln(v, \"error:\", c.errTorrents)\n\t\t\tc.selectedID = 0\n\t\t} else {\n\t\t\tfor _, t := range c.torrents {\n\t\t\t\tfmt.Fprintf(v, \"%5d %s %5d %s\\n\", t.ID, t.InfoHash, t.Port, t.Name)\n\t\t\t}\n\t\t\t_, cy := v.Cursor()\n\t\t\t_, oy := v.Origin()\n\t\t\tselectedRow := cy + oy\n\t\t\tif selectedRow < len(c.torrents) {\n\t\t\t\tc.setSelectedID(c.torrents[selectedRow].ID)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *Console) drawDetails(g *gocui.Gui) error {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\n\tmaxX, maxY := g.Size()\n\thalfY := maxY \/ 2\n\tif v, err := g.SetView(\"details\", -1, halfY, maxX, maxY); err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\t\tv.Wrap = true\n\t\tfmt.Fprintln(v, \"loading details...\")\n\t} else {\n\t\tv.Clear()\n\t\tif c.errDetails != nil {\n\t\t\tfmt.Fprintln(v, \"error:\", c.errDetails)\n\t\t} else {\n\t\t\tswitch c.selectedTab {\n\t\t\tcase general:\n\t\t\t\tb, err := jsonutil.MarshalCompactPretty(c.stats)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Fprintln(v, \"error:\", c.errDetails)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprintln(v, string(b))\n\t\t\t\t}\n\t\t\tcase trackers:\n\t\t\t\tfor i, t := range c.trackers {\n\t\t\t\t\tfmt.Fprintf(v, \"#%d [%s] Status: %s, Seeders: %d, Leechers: %d\\n\", i, t.URL, t.Status, t.Seeders, t.Leechers)\n\t\t\t\t}\n\t\t\tcase peers:\n\t\t\t\tfor i, p := range c.peers {\n\t\t\t\t\tfmt.Fprintf(v, \"#%d Addr: %s\\n\", i, p.Addr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *Console) updateLoop(g *gocui.Gui) {\n\tc.updateTorrents(g)\n\tc.updateDetails(g)\n\n\tticker := time.NewTicker(time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tc.updateTorrents(g)\n\t\t\tc.updateDetails(g)\n\t\tcase <-c.updateTorrentsC:\n\t\t\tc.updateTorrents(g)\n\t\tcase <-c.updateDetailsC:\n\t\t\tc.updateDetails(g)\n\t\t}\n\t}\n}\n\nfunc (c *Console) updateTorrents(g *gocui.Gui) {\n\tresp, err := c.client.ListTorrents()\n\n\tsort.Slice(resp.Torrents, func(i, j int) bool { return resp.Torrents[i].ID < resp.Torrents[j].ID })\n\n\tc.m.Lock()\n\tc.torrents = resp.Torrents\n\tc.errTorrents = err\n\tif len(c.torrents) == 0 {\n\t\tc.setSelectedID(0)\n\t} else if c.selectedID == 0 {\n\t\tc.setSelectedID(c.torrents[0].ID)\n\t}\n\tc.m.Unlock()\n\n\tg.Update(c.drawTorrents)\n}\n\nfunc (c *Console) updateDetails(g *gocui.Gui) {\n\tc.m.Lock()\n\tselectedID := c.selectedID\n\tc.m.Unlock()\n\n\tif selectedID != 0 {\n\t\tswitch c.selectedTab {\n\t\tcase general:\n\t\t\tresp, err := c.client.GetTorrentStats(selectedID)\n\t\t\tc.m.Lock()\n\t\t\tc.stats = resp.Stats\n\t\t\tc.errDetails = err\n\t\t\tc.m.Unlock()\n\t\tcase trackers:\n\t\t\tresp, err := c.client.GetTorrentTrackers(selectedID)\n\t\t\tsort.Slice(resp.Trackers, func(i, j int) bool { return strings.Compare(resp.Trackers[i].URL, resp.Trackers[j].URL) < 0 })\n\t\t\tc.m.Lock()\n\t\t\tc.trackers = resp.Trackers\n\t\t\tc.errDetails = err\n\t\t\tc.m.Unlock()\n\t\tcase peers:\n\t\t\tresp, err := c.client.GetTorrentPeers(selectedID)\n\t\t\tsort.Slice(resp.Peers, func(i, j int) bool { return strings.Compare(resp.Peers[i].Addr, resp.Peers[j].Addr) < 0 })\n\t\t\tc.m.Lock()\n\t\t\tc.peers = resp.Peers\n\t\t\tc.errDetails = err\n\t\t\tc.m.Unlock()\n\t\t}\n\t} else {\n\t\tc.m.Lock()\n\t\tc.errDetails = errors.New(\"no torrent selected\")\n\t\tc.m.Unlock()\n\t}\n\n\tg.Update(c.drawDetails)\n}\n\nfunc quit(g *gocui.Gui, v *gocui.View) error {\n\treturn gocui.ErrQuit\n}\n\nfunc (c *Console) cursorDown(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\n\tcx, cy := v.Cursor()\n\tox, oy := v.Origin()\n\tif cy+oy >= len(c.torrents)-1 {\n\t\treturn nil\n\t}\n\tif err := v.SetCursor(cx, cy+1); err != nil {\n\t\tif err := v.SetOrigin(ox, oy+1); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\trow := cy + oy + 1\n\tif row >= 0 && row < len(c.torrents) {\n\t\tc.setSelectedID(c.torrents[row].ID)\n\t}\n\treturn nil\n}\n\nfunc (c *Console) cursorUp(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\n\tcx, cy := v.Cursor()\n\tox, oy := v.Origin()\n\tif cy+oy <= 0 {\n\t\treturn nil\n\t}\n\tif err := v.SetCursor(cx, cy-1); err != nil && oy > 0 {\n\t\tif err := v.SetOrigin(ox, oy-1); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\trow := cy + oy - 1\n\tif row >= 0 && row < len(c.torrents) {\n\t\tc.setSelectedID(c.torrents[row].ID)\n\t}\n\treturn nil\n}\n\nfunc (c *Console) removeTorrent(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tid := c.selectedID\n\tc.m.Unlock()\n\n\t_, err := c.client.RemoveTorrent(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.triggerUpdateTorrents()\n\treturn nil\n}\n\nfunc (c *Console) setSelectedID(id uint64) {\n\tchanged := id != c.selectedID\n\tc.selectedID = id\n\tif changed {\n\t\tc.triggerUpdateDetails()\n\t}\n}\n\nfunc (c *Console) startTorrent(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tid := c.selectedID\n\tc.m.Unlock()\n\n\t_, err := c.client.StartTorrent(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.triggerUpdateDetails()\n\treturn nil\n}\n\nfunc (c *Console) stopTorrent(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tid := c.selectedID\n\tc.m.Unlock()\n\n\t_, err := c.client.StopTorrent(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.triggerUpdateDetails()\n\treturn nil\n}\n\nfunc (c *Console) switchGeneral(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tc.selectedTab = general\n\tc.m.Unlock()\n\tc.triggerUpdateDetails()\n\treturn nil\n}\n\nfunc (c *Console) switchTrackers(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tc.selectedTab = trackers\n\tc.m.Unlock()\n\tc.triggerUpdateDetails()\n\treturn nil\n}\n\nfunc (c *Console) switchPeers(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tc.selectedTab = peers\n\tc.m.Unlock()\n\tc.triggerUpdateDetails()\n\treturn nil\n}\n\nfunc (c *Console) triggerUpdateDetails() {\n\tselect {\n\tcase c.updateDetailsC <- struct{}{}:\n\tdefault:\n\t}\n}\n\nfunc (c *Console) triggerUpdateTorrents() {\n\tselect {\n\tcase c.updateTorrentsC <- struct{}{}:\n\tdefault:\n\t}\n}\nmore responsive consolepackage console\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/cenkalti\/rain\/internal\/jsonutil\"\n\t\"github.com\/cenkalti\/rain\/rainrpc\"\n\t\"github.com\/cenkalti\/rain\/torrent\"\n\t\"github.com\/jroimartin\/gocui\"\n)\n\nconst (\n\t\/\/ tabs\n\tgeneral int = iota\n\ttrackers\n\tpeers\n)\n\ntype Console struct {\n\tclient *rainrpc.Client\n\ttorrents []rainrpc.Torrent\n\terrTorrents error\n\tselectedID uint64\n\tselectedTab int\n\tstats torrent.Stats\n\ttrackers []torrent.Tracker\n\tpeers []torrent.Peer\n\terrDetails error\n\tupdatingDetails bool\n\tm sync.Mutex\n\tupdateTorrentsC chan struct{}\n\tupdateDetailsC chan struct{}\n}\n\nfunc New(clt *rainrpc.Client) *Console {\n\treturn &Console{\n\t\tclient: clt,\n\t\tupdateTorrentsC: make(chan struct{}),\n\t\tupdateDetailsC: make(chan struct{}),\n\t}\n}\n\nfunc (c *Console) Run() error {\n\tg, err := gocui.NewGui(gocui.OutputNormal)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer g.Close()\n\n\tg.SetManagerFunc(c.layout)\n\n\tg.SetKeybinding(\"\", gocui.KeyCtrlC, gocui.ModNone, quit)\n\tg.SetKeybinding(\"\", 'q', gocui.ModNone, quit)\n\tg.SetKeybinding(\"torrents\", 'j', gocui.ModNone, c.cursorDown)\n\tg.SetKeybinding(\"torrents\", 'k', gocui.ModNone, c.cursorUp)\n\tg.SetKeybinding(\"torrents\", 'R', gocui.ModNone, c.removeTorrent)\n\tg.SetKeybinding(\"torrents\", 's', gocui.ModNone, c.startTorrent)\n\tg.SetKeybinding(\"torrents\", 'S', gocui.ModNone, c.stopTorrent)\n\tg.SetKeybinding(\"torrents\", gocui.KeyCtrlG, gocui.ModNone, c.switchGeneral)\n\tg.SetKeybinding(\"torrents\", gocui.KeyCtrlT, gocui.ModNone, c.switchTrackers)\n\tg.SetKeybinding(\"torrents\", gocui.KeyCtrlP, gocui.ModNone, c.switchPeers)\n\n\tgo c.updateLoop(g)\n\n\terr = g.MainLoop()\n\tif err == gocui.ErrQuit {\n\t\terr = nil\n\t}\n\treturn err\n}\n\nfunc (c *Console) layout(g *gocui.Gui) error {\n\terr := c.drawTorrents(g)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = c.drawDetails(g)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = g.SetCurrentView(\"torrents\")\n\treturn err\n}\n\nfunc (c *Console) drawTorrents(g *gocui.Gui) error {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\n\tmaxX, maxY := g.Size()\n\thalfY := maxY \/ 2\n\tif v, err := g.SetView(\"torrents\", -1, -1, maxX, halfY); err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\t\tv.Highlight = true\n\t\tv.SelBgColor = gocui.ColorGreen\n\t\tv.SelFgColor = gocui.ColorBlack\n\t\tfmt.Fprintln(v, \"loading torrents...\")\n\t} else {\n\t\tv.Clear()\n\t\tif c.errTorrents != nil {\n\t\t\tfmt.Fprintln(v, \"error:\", c.errTorrents)\n\t\t\tc.selectedID = 0\n\t\t} else {\n\t\t\tfor _, t := range c.torrents {\n\t\t\t\tfmt.Fprintf(v, \"%5d %s %5d %s\\n\", t.ID, t.InfoHash, t.Port, t.Name)\n\t\t\t}\n\t\t\t_, cy := v.Cursor()\n\t\t\t_, oy := v.Origin()\n\t\t\tselectedRow := cy + oy\n\t\t\tif selectedRow < len(c.torrents) {\n\t\t\t\tc.setSelectedID(c.torrents[selectedRow].ID)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *Console) drawDetails(g *gocui.Gui) error {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\n\tmaxX, maxY := g.Size()\n\thalfY := maxY \/ 2\n\tif v, err := g.SetView(\"details\", -1, halfY, maxX, maxY); err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\t\tv.Wrap = true\n\t\tfmt.Fprintln(v, \"loading details...\")\n\t} else {\n\t\tv.Clear()\n\t\tif c.updatingDetails {\n\t\t\tfmt.Fprintln(v, \"refreshing...\")\n\t\t\treturn nil\n\t\t}\n\t\tif c.errDetails != nil {\n\t\t\tfmt.Fprintln(v, \"error:\", c.errDetails)\n\t\t} else {\n\t\t\tswitch c.selectedTab {\n\t\t\tcase general:\n\t\t\t\tb, err := jsonutil.MarshalCompactPretty(c.stats)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Fprintln(v, \"error:\", c.errDetails)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprintln(v, string(b))\n\t\t\t\t}\n\t\t\tcase trackers:\n\t\t\t\tfor i, t := range c.trackers {\n\t\t\t\t\tfmt.Fprintf(v, \"#%d [%s] Status: %s, Seeders: %d, Leechers: %d\\n\", i, t.URL, t.Status, t.Seeders, t.Leechers)\n\t\t\t\t}\n\t\t\tcase peers:\n\t\t\t\tfor i, p := range c.peers {\n\t\t\t\t\tfmt.Fprintf(v, \"#%d Addr: %s\\n\", i, p.Addr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *Console) updateLoop(g *gocui.Gui) {\n\tc.updateTorrents(g)\n\tc.updateDetails(g)\n\n\tticker := time.NewTicker(time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tc.updateTorrents(g)\n\t\t\tc.updateDetails(g)\n\t\tcase <-c.updateTorrentsC:\n\t\t\tc.updateTorrents(g)\n\t\tcase <-c.updateDetailsC:\n\t\t\tc.updateDetails(g)\n\t\t}\n\t}\n}\n\nfunc (c *Console) updateTorrents(g *gocui.Gui) {\n\tresp, err := c.client.ListTorrents()\n\n\tsort.Slice(resp.Torrents, func(i, j int) bool { return resp.Torrents[i].ID < resp.Torrents[j].ID })\n\n\tc.m.Lock()\n\tc.torrents = resp.Torrents\n\tc.errTorrents = err\n\tif len(c.torrents) == 0 {\n\t\tc.setSelectedID(0)\n\t} else if c.selectedID == 0 {\n\t\tc.setSelectedID(c.torrents[0].ID)\n\t}\n\tc.m.Unlock()\n\n\tg.Update(c.drawTorrents)\n}\n\nfunc (c *Console) updateDetails(g *gocui.Gui) {\n\tc.m.Lock()\n\tselectedID := c.selectedID\n\tc.m.Unlock()\n\n\tif selectedID != 0 {\n\t\tswitch c.selectedTab {\n\t\tcase general:\n\t\t\tresp, err := c.client.GetTorrentStats(selectedID)\n\t\t\tc.m.Lock()\n\t\t\tc.stats = resp.Stats\n\t\t\tc.errDetails = err\n\t\t\tc.m.Unlock()\n\t\tcase trackers:\n\t\t\tresp, err := c.client.GetTorrentTrackers(selectedID)\n\t\t\tsort.Slice(resp.Trackers, func(i, j int) bool { return strings.Compare(resp.Trackers[i].URL, resp.Trackers[j].URL) < 0 })\n\t\t\tc.m.Lock()\n\t\t\tc.trackers = resp.Trackers\n\t\t\tc.errDetails = err\n\t\t\tc.m.Unlock()\n\t\tcase peers:\n\t\t\tresp, err := c.client.GetTorrentPeers(selectedID)\n\t\t\tsort.Slice(resp.Peers, func(i, j int) bool { return strings.Compare(resp.Peers[i].Addr, resp.Peers[j].Addr) < 0 })\n\t\t\tc.m.Lock()\n\t\t\tc.peers = resp.Peers\n\t\t\tc.errDetails = err\n\t\t\tc.m.Unlock()\n\t\t}\n\t} else {\n\t\tc.m.Lock()\n\t\tc.errDetails = errors.New(\"no torrent selected\")\n\t\tc.m.Unlock()\n\t}\n\n\tc.m.Lock()\n\tc.updatingDetails = false\n\tc.m.Unlock()\n\tg.Update(c.drawDetails)\n}\n\nfunc quit(g *gocui.Gui, v *gocui.View) error {\n\treturn gocui.ErrQuit\n}\n\nfunc (c *Console) cursorDown(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\n\tcx, cy := v.Cursor()\n\tox, oy := v.Origin()\n\tif cy+oy >= len(c.torrents)-1 {\n\t\treturn nil\n\t}\n\tif err := v.SetCursor(cx, cy+1); err != nil {\n\t\tif err := v.SetOrigin(ox, oy+1); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\trow := cy + oy + 1\n\tif row >= 0 && row < len(c.torrents) {\n\t\tc.updatingDetails = true\n\t\tc.setSelectedID(c.torrents[row].ID)\n\t}\n\treturn nil\n}\n\nfunc (c *Console) cursorUp(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\n\tcx, cy := v.Cursor()\n\tox, oy := v.Origin()\n\tif cy+oy <= 0 {\n\t\treturn nil\n\t}\n\tif err := v.SetCursor(cx, cy-1); err != nil && oy > 0 {\n\t\tif err := v.SetOrigin(ox, oy-1); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\trow := cy + oy - 1\n\tif row >= 0 && row < len(c.torrents) {\n\t\tc.updatingDetails = true\n\t\tc.setSelectedID(c.torrents[row].ID)\n\t}\n\treturn nil\n}\n\nfunc (c *Console) removeTorrent(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tid := c.selectedID\n\tc.m.Unlock()\n\n\t_, err := c.client.RemoveTorrent(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.triggerUpdateTorrents()\n\treturn nil\n}\n\nfunc (c *Console) setSelectedID(id uint64) {\n\tchanged := id != c.selectedID\n\tc.selectedID = id\n\tif changed {\n\t\tc.triggerUpdateDetails()\n\t}\n}\n\nfunc (c *Console) startTorrent(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tid := c.selectedID\n\tc.m.Unlock()\n\n\t_, err := c.client.StartTorrent(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.triggerUpdateDetails()\n\treturn nil\n}\n\nfunc (c *Console) stopTorrent(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tid := c.selectedID\n\tc.m.Unlock()\n\n\t_, err := c.client.StopTorrent(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.triggerUpdateDetails()\n\treturn nil\n}\n\nfunc (c *Console) switchGeneral(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tc.selectedTab = general\n\tc.m.Unlock()\n\tc.triggerUpdateDetails()\n\treturn nil\n}\n\nfunc (c *Console) switchTrackers(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tc.selectedTab = trackers\n\tc.m.Unlock()\n\tc.triggerUpdateDetails()\n\treturn nil\n}\n\nfunc (c *Console) switchPeers(g *gocui.Gui, v *gocui.View) error {\n\tc.m.Lock()\n\tc.selectedTab = peers\n\tc.m.Unlock()\n\tc.triggerUpdateDetails()\n\treturn nil\n}\n\nfunc (c *Console) triggerUpdateDetails() {\n\tselect {\n\tcase c.updateDetailsC <- struct{}{}:\n\tdefault:\n\t}\n}\n\nfunc (c *Console) triggerUpdateTorrents() {\n\tselect {\n\tcase c.updateTorrentsC <- struct{}{}:\n\tdefault:\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage cache\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/token\"\n\t\"go\/types\"\n\t\"path\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"golang.org\/x\/tools\/go\/packages\"\n\t\"golang.org\/x\/tools\/internal\/lsp\/source\"\n\t\"golang.org\/x\/tools\/internal\/lsp\/telemetry\"\n\t\"golang.org\/x\/tools\/internal\/memoize\"\n\t\"golang.org\/x\/tools\/internal\/span\"\n\t\"golang.org\/x\/tools\/internal\/telemetry\/log\"\n\t\"golang.org\/x\/tools\/internal\/telemetry\/trace\"\n\terrors \"golang.org\/x\/xerrors\"\n)\n\ntype packageHandleKey string\n\n\/\/ packageHandle implements source.PackageHandle.\ntype packageHandle struct {\n\thandle *memoize.Handle\n\n\tgoFiles []source.ParseGoHandle\n\n\t\/\/ compiledGoFiles are the ParseGoHandles that compose the package.\n\tcompiledGoFiles []source.ParseGoHandle\n\n\t\/\/ mode is the mode the the files were parsed in.\n\tmode source.ParseMode\n\n\t\/\/ m is the metadata associated with the package.\n\tm *metadata\n\n\t\/\/ key is the hashed key for the package.\n\tkey packageHandleKey\n}\n\nfunc (ph *packageHandle) packageKey() packageKey {\n\treturn packageKey{\n\t\tid: ph.m.id,\n\t\tmode: ph.mode,\n\t}\n}\n\n\/\/ packageData contains the data produced by type-checking a package.\ntype packageData struct {\n\tmemoize.NoCopy\n\n\tpkg *pkg\n\terr error\n}\n\n\/\/ buildPackageHandle returns a source.PackageHandle for a given package and config.\nfunc (s *snapshot) buildPackageHandle(ctx context.Context, id packageID, mode source.ParseMode) (*packageHandle, error) {\n\tif ph := s.getPackage(id, mode); ph != nil {\n\t\treturn ph, nil\n\t}\n\n\t\/\/ Build the PackageHandle for this ID and its dependencies.\n\tph, deps, err := s.buildKey(ctx, id, mode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Do not close over the packageHandle or the snapshot in the Bind function.\n\t\/\/ This creates a cycle, which causes the finalizers to never run on the handles.\n\t\/\/ The possible cycles are:\n\t\/\/\n\t\/\/ packageHandle.h.function -> packageHandle\n\t\/\/ packageHandle.h.function -> snapshot -> packageHandle\n\t\/\/\n\n\tm := ph.m\n\tgoFiles := ph.goFiles\n\tcompiledGoFiles := ph.compiledGoFiles\n\tkey := ph.key\n\tfset := s.view.session.cache.fset\n\n\th := s.view.session.cache.store.Bind(key, func(ctx context.Context) interface{} {\n\t\t\/\/ Begin loading the direct dependencies, in parallel.\n\t\tfor _, dep := range deps {\n\t\t\tgo func(dep *packageHandle) {\n\t\t\t\tdep.check(ctx)\n\t\t\t}(dep)\n\t\t}\n\t\tdata := &packageData{}\n\t\tdata.pkg, data.err = typeCheck(ctx, fset, m, mode, goFiles, compiledGoFiles, deps)\n\t\treturn data\n\t})\n\tph.handle = h\n\n\t\/\/ Cache the PackageHandle in the snapshot.\n\ts.addPackage(ph)\n\n\treturn ph, nil\n}\n\n\/\/ buildKey computes the key for a given packageHandle.\nfunc (s *snapshot) buildKey(ctx context.Context, id packageID, mode source.ParseMode) (*packageHandle, map[packagePath]*packageHandle, error) {\n\tm := s.getMetadata(id)\n\tif m == nil {\n\t\treturn nil, nil, errors.Errorf(\"no metadata for %s\", id)\n\t}\n\tgoFiles, err := s.parseGoHandles(ctx, m.goFiles, mode)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tcompiledGoFiles, err := s.parseGoHandles(ctx, m.compiledGoFiles, mode)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tph := &packageHandle{\n\t\tm: m,\n\t\tgoFiles: goFiles,\n\t\tcompiledGoFiles: compiledGoFiles,\n\t\tmode: mode,\n\t}\n\t\/\/ Make sure all of the depList are sorted.\n\tdepList := append([]packageID{}, m.deps...)\n\tsort.Slice(depList, func(i, j int) bool {\n\t\treturn depList[i] < depList[j]\n\t})\n\n\tdeps := make(map[packagePath]*packageHandle)\n\n\t\/\/ Begin computing the key by getting the depKeys for all dependencies.\n\tvar depKeys []packageHandleKey\n\tfor _, depID := range depList {\n\t\tmode := source.ParseExported\n\t\tif _, ok := s.isWorkspacePackage(depID); ok {\n\t\t\tmode = source.ParseFull\n\t\t}\n\t\tdepHandle, err := s.buildPackageHandle(ctx, depID, mode)\n\t\tif err != nil {\n\t\t\tlog.Error(ctx, \"no dep handle\", err, telemetry.Package.Of(depID))\n\n\t\t\t\/\/ One bad dependency should not prevent us from checking the entire package.\n\t\t\t\/\/ Add a special key to mark a bad dependency.\n\t\t\tdepKeys = append(depKeys, packageHandleKey(fmt.Sprintf(\"%s import not found\", id)))\n\t\t\tcontinue\n\t\t}\n\t\tdeps[depHandle.m.pkgPath] = depHandle\n\t\tdepKeys = append(depKeys, depHandle.key)\n\t}\n\tph.key = checkPackageKey(ph.m.id, ph.compiledGoFiles, m.config, depKeys)\n\treturn ph, deps, nil\n}\n\nfunc checkPackageKey(id packageID, pghs []source.ParseGoHandle, cfg *packages.Config, deps []packageHandleKey) packageHandleKey {\n\tvar depBytes []byte\n\tfor _, dep := range deps {\n\t\tdepBytes = append(depBytes, []byte(dep)...)\n\t}\n\treturn packageHandleKey(hashContents([]byte(fmt.Sprintf(\"%s%s%s%s\", id, hashParseKeys(pghs), hashConfig(cfg), hashContents(depBytes)))))\n}\n\n\/\/ hashConfig returns the hash for the *packages.Config.\nfunc hashConfig(config *packages.Config) string {\n\tb := bytes.NewBuffer(nil)\n\n\t\/\/ Dir, Mode, Env, BuildFlags are the parts of the config that can change.\n\tb.WriteString(config.Dir)\n\tb.WriteString(string(config.Mode))\n\n\tfor _, e := range config.Env {\n\t\tb.WriteString(e)\n\t}\n\tfor _, f := range config.BuildFlags {\n\t\tb.WriteString(f)\n\t}\n\treturn hashContents(b.Bytes())\n}\n\nfunc (ph *packageHandle) Check(ctx context.Context) (source.Package, error) {\n\treturn ph.check(ctx)\n}\n\nfunc (ph *packageHandle) check(ctx context.Context) (*pkg, error) {\n\tv := ph.handle.Get(ctx)\n\tif v == nil {\n\t\treturn nil, ctx.Err()\n\t}\n\tdata := v.(*packageData)\n\treturn data.pkg, data.err\n}\n\nfunc (ph *packageHandle) CompiledGoFiles() []source.ParseGoHandle {\n\treturn ph.compiledGoFiles\n}\n\nfunc (ph *packageHandle) ID() string {\n\treturn string(ph.m.id)\n}\n\nfunc (ph *packageHandle) MissingDependencies() []string {\n\tvar md []string\n\tfor i := range ph.m.missingDeps {\n\t\tmd = append(md, string(i))\n\t}\n\treturn md\n}\n\nfunc hashImports(ctx context.Context, wsPackages []source.PackageHandle) (string, error) {\n\tresults := make(map[string]bool)\n\tvar imports []string\n\tfor _, ph := range wsPackages {\n\t\t\/\/ Check package since we do not always invalidate the metadata.\n\t\tpkg, err := ph.Check(ctx)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tfor _, path := range pkg.Imports() {\n\t\t\timp := path.PkgPath()\n\t\t\tif _, ok := results[imp]; !ok {\n\t\t\t\tresults[imp] = true\n\t\t\t\timports = append(imports, imp)\n\t\t\t}\n\t\t}\n\t}\n\tsort.Strings(imports)\n\thashed := strings.Join(imports, \",\")\n\treturn hashContents([]byte(hashed)), nil\n}\n\nfunc (ph *packageHandle) Cached() (source.Package, error) {\n\treturn ph.cached()\n}\n\nfunc (ph *packageHandle) cached() (*pkg, error) {\n\tv := ph.handle.Cached()\n\tif v == nil {\n\t\treturn nil, errors.Errorf(\"no cached type information for %s\", ph.m.pkgPath)\n\t}\n\tdata := v.(*packageData)\n\treturn data.pkg, data.err\n}\n\nfunc (s *snapshot) parseGoHandles(ctx context.Context, files []span.URI, mode source.ParseMode) ([]source.ParseGoHandle, error) {\n\tphs := make([]source.ParseGoHandle, 0, len(files))\n\tfor _, uri := range files {\n\t\tfh, err := s.GetFile(uri)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tphs = append(phs, s.view.session.cache.ParseGoHandle(fh, mode))\n\t}\n\treturn phs, nil\n}\n\nfunc typeCheck(ctx context.Context, fset *token.FileSet, m *metadata, mode source.ParseMode, goFiles []source.ParseGoHandle, compiledGoFiles []source.ParseGoHandle, deps map[packagePath]*packageHandle) (*pkg, error) {\n\tctx, done := trace.StartSpan(ctx, \"cache.importer.typeCheck\", telemetry.Package.Of(m.id))\n\tdefer done()\n\n\tvar rawErrors []error\n\tfor _, err := range m.errors {\n\t\trawErrors = append(rawErrors, err)\n\t}\n\n\tpkg := &pkg{\n\t\tid: m.id,\n\t\tpkgPath: m.pkgPath,\n\t\tmode: mode,\n\t\tgoFiles: goFiles,\n\t\tcompiledGoFiles: compiledGoFiles,\n\t\tmodule: m.module,\n\t\timports: make(map[packagePath]*pkg),\n\t\ttypesSizes: m.typesSizes,\n\t\ttypesInfo: &types.Info{\n\t\t\tTypes: make(map[ast.Expr]types.TypeAndValue),\n\t\t\tDefs: make(map[*ast.Ident]types.Object),\n\t\t\tUses: make(map[*ast.Ident]types.Object),\n\t\t\tImplicits: make(map[ast.Node]types.Object),\n\t\t\tSelections: make(map[*ast.SelectorExpr]*types.Selection),\n\t\t\tScopes: make(map[ast.Node]*types.Scope),\n\t\t},\n\t\tforTest: m.forTest,\n\t}\n\tvar (\n\t\tfiles = make([]*ast.File, len(pkg.compiledGoFiles))\n\t\tparseErrors = make([]error, len(pkg.compiledGoFiles))\n\t\tactualErrors = make([]error, len(pkg.compiledGoFiles))\n\t\twg sync.WaitGroup\n\t)\n\tfor i, ph := range pkg.compiledGoFiles {\n\t\twg.Add(1)\n\t\tgo func(i int, ph source.ParseGoHandle) {\n\t\t\tfiles[i], _, _, parseErrors[i], actualErrors[i] = ph.Parse(ctx)\n\t\t\twg.Done()\n\t\t}(i, ph)\n\t}\n\tfor _, ph := range pkg.goFiles {\n\t\twg.Add(1)\n\t\t\/\/ We need to parse the non-compiled go files, but we don't care about their errors.\n\t\tgo func(ph source.ParseGoHandle) {\n\t\t\tph.Parse(ctx)\n\t\t\twg.Done()\n\t\t}(ph)\n\t}\n\twg.Wait()\n\n\tfor _, e := range parseErrors {\n\t\tif e != nil {\n\t\t\trawErrors = append(rawErrors, e)\n\t\t}\n\t}\n\n\tvar i int\n\tfor _, f := range files {\n\t\tif f != nil {\n\t\t\tfiles[i] = f\n\t\t\ti++\n\t\t}\n\t}\n\tfiles = files[:i]\n\n\t\/\/ Use the default type information for the unsafe package.\n\tif pkg.pkgPath == \"unsafe\" {\n\t\tpkg.types = types.Unsafe\n\t\t\/\/ Don't type check Unsafe: it's unnecessary, and doing so exposes a data\n\t\t\/\/ race to Unsafe.completed.\n\t\treturn pkg, nil\n\t} else if len(files) == 0 { \/\/ not the unsafe package, no parsed files\n\t\treturn nil, errors.Errorf(\"no parsed files for package %s, expected: %s, errors: %v, list errors: %v\", pkg.pkgPath, pkg.compiledGoFiles, actualErrors, rawErrors)\n\t} else {\n\t\tpkg.types = types.NewPackage(string(m.pkgPath), m.name)\n\t}\n\n\tcfg := &types.Config{\n\t\tError: func(e error) {\n\t\t\trawErrors = append(rawErrors, e)\n\t\t},\n\t\tImporter: importerFunc(func(pkgPath string) (*types.Package, error) {\n\t\t\t\/\/ If the context was cancelled, we should abort.\n\t\t\tif ctx.Err() != nil {\n\t\t\t\treturn nil, ctx.Err()\n\t\t\t}\n\t\t\tdep := deps[packagePath(pkgPath)]\n\t\t\tif dep == nil {\n\t\t\t\t\/\/ We may be in GOPATH mode, in which case we need to check vendor dirs.\n\t\t\t\tsearchDir := path.Dir(pkg.PkgPath())\n\t\t\t\tfor {\n\t\t\t\t\tvdir := packagePath(path.Join(searchDir, \"vendor\", pkgPath))\n\t\t\t\t\tif vdep := deps[vdir]; vdep != nil {\n\t\t\t\t\t\tdep = vdep\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Search until Dir doesn't take us anywhere new, e.g. \".\" or \"\/\".\n\t\t\t\t\tnext := path.Dir(searchDir)\n\t\t\t\t\tif searchDir == next {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tsearchDir = next\n\t\t\t\t}\n\t\t\t}\n\t\t\tif dep == nil {\n\t\t\t\treturn nil, errors.Errorf(\"no package for import %s\", pkgPath)\n\t\t\t}\n\t\t\tdepPkg, err := dep.check(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tpkg.imports[depPkg.pkgPath] = depPkg\n\t\t\treturn depPkg.types, nil\n\t\t}),\n\t}\n\tcheck := types.NewChecker(cfg, fset, pkg.types, pkg.typesInfo)\n\n\t\/\/ Type checking errors are handled via the config, so ignore them here.\n\t_ = check.Files(files)\n\t\/\/ If the context was cancelled, we may have returned a ton of transient\n\t\/\/ errors to the type checker. Swallow them.\n\tif ctx.Err() != nil {\n\t\treturn nil, ctx.Err()\n\t}\n\n\t\/\/ We don't care about a package's errors unless we have parsed it in full.\n\tif mode == source.ParseFull {\n\t\tfor _, e := range rawErrors {\n\t\t\tsrcErr, err := sourceError(ctx, fset, pkg, e)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(ctx, \"unable to compute error positions\", err, telemetry.Package.Of(pkg.ID()))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tpkg.errors = append(pkg.errors, srcErr)\n\t\t}\n\t}\n\treturn pkg, nil\n}\n\n\/\/ An importFunc is an implementation of the single-method\n\/\/ types.Importer interface based on a function value.\ntype importerFunc func(path string) (*types.Package, error)\n\nfunc (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }\ninternal\/lsp: report use of disallowed internal packages\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage cache\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/token\"\n\t\"go\/types\"\n\t\"path\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"golang.org\/x\/tools\/go\/packages\"\n\t\"golang.org\/x\/tools\/internal\/lsp\/source\"\n\t\"golang.org\/x\/tools\/internal\/lsp\/telemetry\"\n\t\"golang.org\/x\/tools\/internal\/memoize\"\n\t\"golang.org\/x\/tools\/internal\/span\"\n\t\"golang.org\/x\/tools\/internal\/telemetry\/log\"\n\t\"golang.org\/x\/tools\/internal\/telemetry\/trace\"\n\terrors \"golang.org\/x\/xerrors\"\n)\n\ntype packageHandleKey string\n\n\/\/ packageHandle implements source.PackageHandle.\ntype packageHandle struct {\n\thandle *memoize.Handle\n\n\tgoFiles []source.ParseGoHandle\n\n\t\/\/ compiledGoFiles are the ParseGoHandles that compose the package.\n\tcompiledGoFiles []source.ParseGoHandle\n\n\t\/\/ mode is the mode the the files were parsed in.\n\tmode source.ParseMode\n\n\t\/\/ m is the metadata associated with the package.\n\tm *metadata\n\n\t\/\/ key is the hashed key for the package.\n\tkey packageHandleKey\n}\n\nfunc (ph *packageHandle) packageKey() packageKey {\n\treturn packageKey{\n\t\tid: ph.m.id,\n\t\tmode: ph.mode,\n\t}\n}\n\nfunc (ph *packageHandle) isValidImportFor(parentPkgPath string) bool {\n\timportPath := string(ph.m.pkgPath)\n\n\tpkgRootIndex := strings.Index(importPath, \"\/internal\/\")\n\tif pkgRootIndex != -1 && parentPkgPath != \"command-line-arguments\" {\n\t\tif !strings.HasPrefix(parentPkgPath, importPath[0:pkgRootIndex]) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n\/\/ packageData contains the data produced by type-checking a package.\ntype packageData struct {\n\tmemoize.NoCopy\n\n\tpkg *pkg\n\terr error\n}\n\n\/\/ buildPackageHandle returns a source.PackageHandle for a given package and config.\nfunc (s *snapshot) buildPackageHandle(ctx context.Context, id packageID, mode source.ParseMode) (*packageHandle, error) {\n\tif ph := s.getPackage(id, mode); ph != nil {\n\t\treturn ph, nil\n\t}\n\n\t\/\/ Build the PackageHandle for this ID and its dependencies.\n\tph, deps, err := s.buildKey(ctx, id, mode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Do not close over the packageHandle or the snapshot in the Bind function.\n\t\/\/ This creates a cycle, which causes the finalizers to never run on the handles.\n\t\/\/ The possible cycles are:\n\t\/\/\n\t\/\/ packageHandle.h.function -> packageHandle\n\t\/\/ packageHandle.h.function -> snapshot -> packageHandle\n\t\/\/\n\n\tm := ph.m\n\tgoFiles := ph.goFiles\n\tcompiledGoFiles := ph.compiledGoFiles\n\tkey := ph.key\n\tfset := s.view.session.cache.fset\n\n\th := s.view.session.cache.store.Bind(key, func(ctx context.Context) interface{} {\n\t\t\/\/ Begin loading the direct dependencies, in parallel.\n\t\tfor _, dep := range deps {\n\t\t\tgo func(dep *packageHandle) {\n\t\t\t\tdep.check(ctx)\n\t\t\t}(dep)\n\t\t}\n\t\tdata := &packageData{}\n\t\tdata.pkg, data.err = typeCheck(ctx, fset, m, mode, goFiles, compiledGoFiles, deps)\n\t\treturn data\n\t})\n\tph.handle = h\n\n\t\/\/ Cache the PackageHandle in the snapshot.\n\ts.addPackage(ph)\n\n\treturn ph, nil\n}\n\n\/\/ buildKey computes the key for a given packageHandle.\nfunc (s *snapshot) buildKey(ctx context.Context, id packageID, mode source.ParseMode) (*packageHandle, map[packagePath]*packageHandle, error) {\n\tm := s.getMetadata(id)\n\tif m == nil {\n\t\treturn nil, nil, errors.Errorf(\"no metadata for %s\", id)\n\t}\n\tgoFiles, err := s.parseGoHandles(ctx, m.goFiles, mode)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tcompiledGoFiles, err := s.parseGoHandles(ctx, m.compiledGoFiles, mode)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tph := &packageHandle{\n\t\tm: m,\n\t\tgoFiles: goFiles,\n\t\tcompiledGoFiles: compiledGoFiles,\n\t\tmode: mode,\n\t}\n\t\/\/ Make sure all of the depList are sorted.\n\tdepList := append([]packageID{}, m.deps...)\n\tsort.Slice(depList, func(i, j int) bool {\n\t\treturn depList[i] < depList[j]\n\t})\n\n\tdeps := make(map[packagePath]*packageHandle)\n\n\t\/\/ Begin computing the key by getting the depKeys for all dependencies.\n\tvar depKeys []packageHandleKey\n\tfor _, depID := range depList {\n\t\tmode := source.ParseExported\n\t\tif _, ok := s.isWorkspacePackage(depID); ok {\n\t\t\tmode = source.ParseFull\n\t\t}\n\t\tdepHandle, err := s.buildPackageHandle(ctx, depID, mode)\n\t\tif err != nil {\n\t\t\tlog.Error(ctx, \"no dep handle\", err, telemetry.Package.Of(depID))\n\n\t\t\t\/\/ One bad dependency should not prevent us from checking the entire package.\n\t\t\t\/\/ Add a special key to mark a bad dependency.\n\t\t\tdepKeys = append(depKeys, packageHandleKey(fmt.Sprintf(\"%s import not found\", id)))\n\t\t\tcontinue\n\t\t}\n\t\tdeps[depHandle.m.pkgPath] = depHandle\n\t\tdepKeys = append(depKeys, depHandle.key)\n\t}\n\tph.key = checkPackageKey(ph.m.id, ph.compiledGoFiles, m.config, depKeys)\n\treturn ph, deps, nil\n}\n\nfunc checkPackageKey(id packageID, pghs []source.ParseGoHandle, cfg *packages.Config, deps []packageHandleKey) packageHandleKey {\n\tvar depBytes []byte\n\tfor _, dep := range deps {\n\t\tdepBytes = append(depBytes, []byte(dep)...)\n\t}\n\treturn packageHandleKey(hashContents([]byte(fmt.Sprintf(\"%s%s%s%s\", id, hashParseKeys(pghs), hashConfig(cfg), hashContents(depBytes)))))\n}\n\n\/\/ hashConfig returns the hash for the *packages.Config.\nfunc hashConfig(config *packages.Config) string {\n\tb := bytes.NewBuffer(nil)\n\n\t\/\/ Dir, Mode, Env, BuildFlags are the parts of the config that can change.\n\tb.WriteString(config.Dir)\n\tb.WriteString(string(config.Mode))\n\n\tfor _, e := range config.Env {\n\t\tb.WriteString(e)\n\t}\n\tfor _, f := range config.BuildFlags {\n\t\tb.WriteString(f)\n\t}\n\treturn hashContents(b.Bytes())\n}\n\nfunc (ph *packageHandle) Check(ctx context.Context) (source.Package, error) {\n\treturn ph.check(ctx)\n}\n\nfunc (ph *packageHandle) check(ctx context.Context) (*pkg, error) {\n\tv := ph.handle.Get(ctx)\n\tif v == nil {\n\t\treturn nil, ctx.Err()\n\t}\n\tdata := v.(*packageData)\n\treturn data.pkg, data.err\n}\n\nfunc (ph *packageHandle) CompiledGoFiles() []source.ParseGoHandle {\n\treturn ph.compiledGoFiles\n}\n\nfunc (ph *packageHandle) ID() string {\n\treturn string(ph.m.id)\n}\n\nfunc (ph *packageHandle) MissingDependencies() []string {\n\tvar md []string\n\tfor i := range ph.m.missingDeps {\n\t\tmd = append(md, string(i))\n\t}\n\treturn md\n}\n\nfunc hashImports(ctx context.Context, wsPackages []source.PackageHandle) (string, error) {\n\tresults := make(map[string]bool)\n\tvar imports []string\n\tfor _, ph := range wsPackages {\n\t\t\/\/ Check package since we do not always invalidate the metadata.\n\t\tpkg, err := ph.Check(ctx)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tfor _, path := range pkg.Imports() {\n\t\t\timp := path.PkgPath()\n\t\t\tif _, ok := results[imp]; !ok {\n\t\t\t\tresults[imp] = true\n\t\t\t\timports = append(imports, imp)\n\t\t\t}\n\t\t}\n\t}\n\tsort.Strings(imports)\n\thashed := strings.Join(imports, \",\")\n\treturn hashContents([]byte(hashed)), nil\n}\n\nfunc (ph *packageHandle) Cached() (source.Package, error) {\n\treturn ph.cached()\n}\n\nfunc (ph *packageHandle) cached() (*pkg, error) {\n\tv := ph.handle.Cached()\n\tif v == nil {\n\t\treturn nil, errors.Errorf(\"no cached type information for %s\", ph.m.pkgPath)\n\t}\n\tdata := v.(*packageData)\n\treturn data.pkg, data.err\n}\n\nfunc (s *snapshot) parseGoHandles(ctx context.Context, files []span.URI, mode source.ParseMode) ([]source.ParseGoHandle, error) {\n\tphs := make([]source.ParseGoHandle, 0, len(files))\n\tfor _, uri := range files {\n\t\tfh, err := s.GetFile(uri)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tphs = append(phs, s.view.session.cache.ParseGoHandle(fh, mode))\n\t}\n\treturn phs, nil\n}\n\nfunc typeCheck(ctx context.Context, fset *token.FileSet, m *metadata, mode source.ParseMode, goFiles []source.ParseGoHandle, compiledGoFiles []source.ParseGoHandle, deps map[packagePath]*packageHandle) (*pkg, error) {\n\tctx, done := trace.StartSpan(ctx, \"cache.importer.typeCheck\", telemetry.Package.Of(m.id))\n\tdefer done()\n\n\tvar rawErrors []error\n\tfor _, err := range m.errors {\n\t\trawErrors = append(rawErrors, err)\n\t}\n\n\tpkg := &pkg{\n\t\tid: m.id,\n\t\tpkgPath: m.pkgPath,\n\t\tmode: mode,\n\t\tgoFiles: goFiles,\n\t\tcompiledGoFiles: compiledGoFiles,\n\t\tmodule: m.module,\n\t\timports: make(map[packagePath]*pkg),\n\t\ttypesSizes: m.typesSizes,\n\t\ttypesInfo: &types.Info{\n\t\t\tTypes: make(map[ast.Expr]types.TypeAndValue),\n\t\t\tDefs: make(map[*ast.Ident]types.Object),\n\t\t\tUses: make(map[*ast.Ident]types.Object),\n\t\t\tImplicits: make(map[ast.Node]types.Object),\n\t\t\tSelections: make(map[*ast.SelectorExpr]*types.Selection),\n\t\t\tScopes: make(map[ast.Node]*types.Scope),\n\t\t},\n\t\tforTest: m.forTest,\n\t}\n\tvar (\n\t\tfiles = make([]*ast.File, len(pkg.compiledGoFiles))\n\t\tparseErrors = make([]error, len(pkg.compiledGoFiles))\n\t\tactualErrors = make([]error, len(pkg.compiledGoFiles))\n\t\twg sync.WaitGroup\n\t)\n\tfor i, ph := range pkg.compiledGoFiles {\n\t\twg.Add(1)\n\t\tgo func(i int, ph source.ParseGoHandle) {\n\t\t\tfiles[i], _, _, parseErrors[i], actualErrors[i] = ph.Parse(ctx)\n\t\t\twg.Done()\n\t\t}(i, ph)\n\t}\n\tfor _, ph := range pkg.goFiles {\n\t\twg.Add(1)\n\t\t\/\/ We need to parse the non-compiled go files, but we don't care about their errors.\n\t\tgo func(ph source.ParseGoHandle) {\n\t\t\tph.Parse(ctx)\n\t\t\twg.Done()\n\t\t}(ph)\n\t}\n\twg.Wait()\n\n\tfor _, e := range parseErrors {\n\t\tif e != nil {\n\t\t\trawErrors = append(rawErrors, e)\n\t\t}\n\t}\n\n\tvar i int\n\tfor _, f := range files {\n\t\tif f != nil {\n\t\t\tfiles[i] = f\n\t\t\ti++\n\t\t}\n\t}\n\tfiles = files[:i]\n\n\t\/\/ Use the default type information for the unsafe package.\n\tif pkg.pkgPath == \"unsafe\" {\n\t\tpkg.types = types.Unsafe\n\t\t\/\/ Don't type check Unsafe: it's unnecessary, and doing so exposes a data\n\t\t\/\/ race to Unsafe.completed.\n\t\treturn pkg, nil\n\t} else if len(files) == 0 { \/\/ not the unsafe package, no parsed files\n\t\treturn nil, errors.Errorf(\"no parsed files for package %s, expected: %s, errors: %v, list errors: %v\", pkg.pkgPath, pkg.compiledGoFiles, actualErrors, rawErrors)\n\t} else {\n\t\tpkg.types = types.NewPackage(string(m.pkgPath), m.name)\n\t}\n\n\tcfg := &types.Config{\n\t\tError: func(e error) {\n\t\t\trawErrors = append(rawErrors, e)\n\t\t},\n\t\tImporter: importerFunc(func(pkgPath string) (*types.Package, error) {\n\t\t\t\/\/ If the context was cancelled, we should abort.\n\t\t\tif ctx.Err() != nil {\n\t\t\t\treturn nil, ctx.Err()\n\t\t\t}\n\t\t\tdep := deps[packagePath(pkgPath)]\n\t\t\tif dep == nil {\n\t\t\t\t\/\/ We may be in GOPATH mode, in which case we need to check vendor dirs.\n\t\t\t\tsearchDir := path.Dir(pkg.PkgPath())\n\t\t\t\tfor {\n\t\t\t\t\tvdir := packagePath(path.Join(searchDir, \"vendor\", pkgPath))\n\t\t\t\t\tif vdep := deps[vdir]; vdep != nil {\n\t\t\t\t\t\tdep = vdep\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Search until Dir doesn't take us anywhere new, e.g. \".\" or \"\/\".\n\t\t\t\t\tnext := path.Dir(searchDir)\n\t\t\t\t\tif searchDir == next {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tsearchDir = next\n\t\t\t\t}\n\t\t\t}\n\t\t\tif dep == nil {\n\t\t\t\treturn nil, errors.Errorf(\"no package for import %s\", pkgPath)\n\t\t\t}\n\t\t\tif !dep.isValidImportFor(pkg.PkgPath()) {\n\t\t\t\treturn nil, errors.Errorf(\"invalid use of internal package %s\", pkgPath)\n\t\t\t}\n\t\t\tdepPkg, err := dep.check(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tpkg.imports[depPkg.pkgPath] = depPkg\n\t\t\treturn depPkg.types, nil\n\t\t}),\n\t}\n\tcheck := types.NewChecker(cfg, fset, pkg.types, pkg.typesInfo)\n\n\t\/\/ Type checking errors are handled via the config, so ignore them here.\n\t_ = check.Files(files)\n\t\/\/ If the context was cancelled, we may have returned a ton of transient\n\t\/\/ errors to the type checker. Swallow them.\n\tif ctx.Err() != nil {\n\t\treturn nil, ctx.Err()\n\t}\n\n\t\/\/ We don't care about a package's errors unless we have parsed it in full.\n\tif mode == source.ParseFull {\n\t\tfor _, e := range rawErrors {\n\t\t\tsrcErr, err := sourceError(ctx, fset, pkg, e)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(ctx, \"unable to compute error positions\", err, telemetry.Package.Of(pkg.ID()))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tpkg.errors = append(pkg.errors, srcErr)\n\t\t}\n\t}\n\treturn pkg, nil\n}\n\n\/\/ An importFunc is an implementation of the single-method\n\/\/ types.Importer interface based on a function value.\ntype importerFunc func(path string) (*types.Package, error)\n\nfunc (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }\n<|endoftext|>"} {"text":"package oidc\n\n\/*\n\nfunc TestOpenIDConnectStore_GetClientPolicy(t *testing.T) {\n\ts := NewOpenIDConnectStore(&schema.OpenIDConnectConfiguration{\n\t\tIssuerPrivateKey: exampleIssuerPrivateKey,\n\t\tClients: []schema.OpenIDConnectClientConfiguration{\n\t\t\t{\n\t\t\t\tID: \"myclient\",\n\t\t\t\tDescription: \"myclient desc\",\n\t\t\t\tPolicy: \"one_factor\",\n\t\t\t\tScopes: []string{\"openid\", \"profile\"},\n\t\t\t\tSecret: \"mysecret\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tID: \"myotherclient\",\n\t\t\t\tDescription: \"myclient desc\",\n\t\t\t\tPolicy: \"two_factor\",\n\t\t\t\tScopes: []string{\"openid\", \"profile\"},\n\t\t\t\tSecret: \"mysecret\",\n\t\t\t},\n\t\t},\n\t})\n\n\tpolicyOne := s.GetClientPolicy(\"myclient\")\n\tassert.Equal(t, authorization.OneFactor, policyOne)\n\n\tpolicyTwo := s.GetClientPolicy(\"myotherclient\")\n\tassert.Equal(t, authorization.TwoFactor, policyTwo)\n\n\tpolicyInvalid := s.GetClientPolicy(\"invalidclient\")\n\tassert.Equal(t, authorization.TwoFactor, policyInvalid)\n}\n\nfunc TestOpenIDConnectStore_GetInternalClient(t *testing.T) {\n\ts := NewOpenIDConnectStore(&schema.OpenIDConnectConfiguration{\n\t\tIssuerPrivateKey: exampleIssuerPrivateKey,\n\t\tClients: []schema.OpenIDConnectClientConfiguration{\n\t\t\t{\n\t\t\t\tID: \"myclient\",\n\t\t\t\tDescription: \"myclient desc\",\n\t\t\t\tPolicy: \"one_factor\",\n\t\t\t\tScopes: []string{\"openid\", \"profile\"},\n\t\t\t\tSecret: \"mysecret\",\n\t\t\t},\n\t\t},\n\t})\n\n\tclient, err := s.GetClient(context.Background(), \"myinvalidclient\")\n\tassert.EqualError(t, err, \"not_found\")\n\tassert.Nil(t, client)\n\n\tclient, err = s.GetClient(context.Background(), \"myclient\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, client)\n\tassert.Equal(t, \"myclient\", client.GetID())\n}\n\nfunc TestOpenIDConnectStore_GetInternalClient_ValidClient(t *testing.T) {\n\tc1 := schema.OpenIDConnectClientConfiguration{\n\t\tID: \"myclient\",\n\t\tDescription: \"myclient desc\",\n\t\tPolicy: \"one_factor\",\n\t\tScopes: []string{\"openid\", \"profile\"},\n\t\tSecret: \"mysecret\",\n\t}\n\n\ts := NewOpenIDConnectStore(&schema.OpenIDConnectConfiguration{\n\t\tIssuerPrivateKey: exampleIssuerPrivateKey,\n\t\tClients: []schema.OpenIDConnectClientConfiguration{c1},\n\t})\n\n\tclient, err := s.GetFullClient(c1.ID)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, client)\n\tassert.Equal(t, client.ID, c1.ID)\n\tassert.Equal(t, client.Description, c1.Description)\n\tassert.Equal(t, client.Scopes, c1.Scopes)\n\tassert.Equal(t, client.GrantTypes, c1.GrantTypes)\n\tassert.Equal(t, client.ResponseTypes, c1.ResponseTypes)\n\tassert.Equal(t, client.RedirectURIs, c1.RedirectURIs)\n\tassert.Equal(t, client.Policy, authorization.OneFactor)\n\tassert.Equal(t, client.Secret, []byte(c1.Secret))\n}\n\nfunc TestOpenIDConnectStore_GetInternalClient_InvalidClient(t *testing.T) {\n\tc1 := schema.OpenIDConnectClientConfiguration{\n\t\tID: \"myclient\",\n\t\tDescription: \"myclient desc\",\n\t\tPolicy: \"one_factor\",\n\t\tScopes: []string{\"openid\", \"profile\"},\n\t\tSecret: \"mysecret\",\n\t}\n\n\ts := NewOpenIDConnectStore(&schema.OpenIDConnectConfiguration{\n\t\tIssuerPrivateKey: exampleIssuerPrivateKey,\n\t\tClients: []schema.OpenIDConnectClientConfiguration{c1},\n\t})\n\n\tclient, err := s.GetFullClient(\"another-client\")\n\tassert.Nil(t, client)\n\tassert.EqualError(t, err, \"not_found\")\n}\n\nfunc TestOpenIDConnectStore_IsValidClientID(t *testing.T) {\n\ts := NewOpenIDConnectStore(&schema.OpenIDConnectConfiguration{\n\t\tIssuerPrivateKey: exampleIssuerPrivateKey,\n\t\tClients: []schema.OpenIDConnectClientConfiguration{\n\t\t\t{\n\t\t\t\tID: \"myclient\",\n\t\t\t\tDescription: \"myclient desc\",\n\t\t\t\tPolicy: \"one_factor\",\n\t\t\t\tScopes: []string{\"openid\", \"profile\"},\n\t\t\t\tSecret: \"mysecret\",\n\t\t\t},\n\t\t},\n\t})\n\n\tvalidClient := s.IsValidClientID(\"myclient\")\n\tinvalidClient := s.IsValidClientID(\"myinvalidclient\")\n\n\tassert.True(t, validClient)\n\tassert.False(t, invalidClient)\n}.\n*\/\ntest(oidc): fix disabled tests (#3173)package oidc\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\n\t\"github.com\/authelia\/authelia\/v4\/internal\/authorization\"\n\t\"github.com\/authelia\/authelia\/v4\/internal\/configuration\/schema\"\n)\n\nfunc TestOpenIDConnectStore_GetClientPolicy(t *testing.T) {\n\ts := NewOpenIDConnectStore(&schema.OpenIDConnectConfiguration{\n\t\tIssuerPrivateKey: exampleIssuerPrivateKey,\n\t\tClients: []schema.OpenIDConnectClientConfiguration{\n\t\t\t{\n\t\t\t\tID: \"myclient\",\n\t\t\t\tDescription: \"myclient desc\",\n\t\t\t\tPolicy: \"one_factor\",\n\t\t\t\tScopes: []string{\"openid\", \"profile\"},\n\t\t\t\tSecret: \"mysecret\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tID: \"myotherclient\",\n\t\t\t\tDescription: \"myclient desc\",\n\t\t\t\tPolicy: \"two_factor\",\n\t\t\t\tScopes: []string{\"openid\", \"profile\"},\n\t\t\t\tSecret: \"mysecret\",\n\t\t\t},\n\t\t},\n\t}, nil)\n\n\tpolicyOne := s.GetClientPolicy(\"myclient\")\n\tassert.Equal(t, authorization.OneFactor, policyOne)\n\n\tpolicyTwo := s.GetClientPolicy(\"myotherclient\")\n\tassert.Equal(t, authorization.TwoFactor, policyTwo)\n\n\tpolicyInvalid := s.GetClientPolicy(\"invalidclient\")\n\tassert.Equal(t, authorization.TwoFactor, policyInvalid)\n}\n\nfunc TestOpenIDConnectStore_GetInternalClient(t *testing.T) {\n\ts := NewOpenIDConnectStore(&schema.OpenIDConnectConfiguration{\n\t\tIssuerPrivateKey: exampleIssuerPrivateKey,\n\t\tClients: []schema.OpenIDConnectClientConfiguration{\n\t\t\t{\n\t\t\t\tID: \"myclient\",\n\t\t\t\tDescription: \"myclient desc\",\n\t\t\t\tPolicy: \"one_factor\",\n\t\t\t\tScopes: []string{\"openid\", \"profile\"},\n\t\t\t\tSecret: \"mysecret\",\n\t\t\t},\n\t\t},\n\t}, nil)\n\n\tclient, err := s.GetClient(context.Background(), \"myinvalidclient\")\n\tassert.EqualError(t, err, \"not_found\")\n\tassert.Nil(t, client)\n\n\tclient, err = s.GetClient(context.Background(), \"myclient\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, client)\n\tassert.Equal(t, \"myclient\", client.GetID())\n}\n\nfunc TestOpenIDConnectStore_GetInternalClient_ValidClient(t *testing.T) {\n\tc1 := schema.OpenIDConnectClientConfiguration{\n\t\tID: \"myclient\",\n\t\tDescription: \"myclient desc\",\n\t\tPolicy: \"one_factor\",\n\t\tScopes: []string{\"openid\", \"profile\"},\n\t\tSecret: \"mysecret\",\n\t}\n\n\ts := NewOpenIDConnectStore(&schema.OpenIDConnectConfiguration{\n\t\tIssuerPrivateKey: exampleIssuerPrivateKey,\n\t\tClients: []schema.OpenIDConnectClientConfiguration{c1},\n\t}, nil)\n\n\tclient, err := s.GetFullClient(c1.ID)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, client)\n\tassert.Equal(t, client.ID, c1.ID)\n\tassert.Equal(t, client.Description, c1.Description)\n\tassert.Equal(t, client.Scopes, c1.Scopes)\n\tassert.Equal(t, client.GrantTypes, c1.GrantTypes)\n\tassert.Equal(t, client.ResponseTypes, c1.ResponseTypes)\n\tassert.Equal(t, client.RedirectURIs, c1.RedirectURIs)\n\tassert.Equal(t, client.Policy, authorization.OneFactor)\n\tassert.Equal(t, client.Secret, []byte(c1.Secret))\n}\n\nfunc TestOpenIDConnectStore_GetInternalClient_InvalidClient(t *testing.T) {\n\tc1 := schema.OpenIDConnectClientConfiguration{\n\t\tID: \"myclient\",\n\t\tDescription: \"myclient desc\",\n\t\tPolicy: \"one_factor\",\n\t\tScopes: []string{\"openid\", \"profile\"},\n\t\tSecret: \"mysecret\",\n\t}\n\n\ts := NewOpenIDConnectStore(&schema.OpenIDConnectConfiguration{\n\t\tIssuerPrivateKey: exampleIssuerPrivateKey,\n\t\tClients: []schema.OpenIDConnectClientConfiguration{c1},\n\t}, nil)\n\n\tclient, err := s.GetFullClient(\"another-client\")\n\tassert.Nil(t, client)\n\tassert.EqualError(t, err, \"not_found\")\n}\n\nfunc TestOpenIDConnectStore_IsValidClientID(t *testing.T) {\n\ts := NewOpenIDConnectStore(&schema.OpenIDConnectConfiguration{\n\t\tIssuerPrivateKey: exampleIssuerPrivateKey,\n\t\tClients: []schema.OpenIDConnectClientConfiguration{\n\t\t\t{\n\t\t\t\tID: \"myclient\",\n\t\t\t\tDescription: \"myclient desc\",\n\t\t\t\tPolicy: \"one_factor\",\n\t\t\t\tScopes: []string{\"openid\", \"profile\"},\n\t\t\t\tSecret: \"mysecret\",\n\t\t\t},\n\t\t},\n\t}, nil)\n\n\tvalidClient := s.IsValidClientID(\"myclient\")\n\tinvalidClient := s.IsValidClientID(\"myinvalidclient\")\n\n\tassert.True(t, validClient)\n\tassert.False(t, invalidClient)\n}\n<|endoftext|>"} {"text":"package registry\n\nimport (\n\t\"fmt\"\n\n\tpackerSvc \"github.com\/hashicorp\/hcp-sdk-go\/clients\/cloud-packer-service\/preview\/2021-04-30\/client\/packer_service\"\n\torganizationSvc \"github.com\/hashicorp\/hcp-sdk-go\/clients\/cloud-resource-manager\/preview\/2019-12-10\/client\/organization_service\"\n\tprojectSvc \"github.com\/hashicorp\/hcp-sdk-go\/clients\/cloud-resource-manager\/preview\/2019-12-10\/client\/project_service\"\n\trmmodels \"github.com\/hashicorp\/hcp-sdk-go\/clients\/cloud-resource-manager\/preview\/2019-12-10\/models\"\n\t\"github.com\/hashicorp\/hcp-sdk-go\/httpclient\"\n\t\"github.com\/hashicorp\/packer\/internal\/registry\/env\"\n)\n\n\/\/ Client is an HCP client capable of making requests on behalf of a service principal\ntype Client struct {\n\tPacker packerSvc.ClientService\n\tOrganization organizationSvc.ClientService\n\tProject projectSvc.ClientService\n\n\t\/\/ OrganizationID is the organization unique identifier on HCP.\n\tOrganizationID string\n\n\t\/\/ ProjectID is the project unique identifier on HCP.\n\tProjectID string\n}\n\n\/\/ NewClient returns an authenticated client to a HCP Packer Registry.\n\/\/ Client authentication requires the following environment variables be set HCP_CLIENT_ID and HCP_CLIENT_SECRET.\n\/\/ Upon error a HCPClientError will be returned.\nfunc NewClient() (*Client, error) {\n\tif !env.HasHCPCredentials() {\n\t\treturn nil, &ClientError{\n\t\t\tStatusCode: InvalidClientConfig,\n\t\t\tErr: fmt.Errorf(\"the client authentication requires both %s and %s environment variables to be set\", env.HCPClientID, env.HCPClientSecret),\n\t\t}\n\t}\n\n\tcl, err := httpclient.New(httpclient.Config{})\n\tif err != nil {\n\t\treturn nil, &ClientError{\n\t\t\tStatusCode: InvalidClientConfig,\n\t\t\tErr: err,\n\t\t}\n\t}\n\n\tclient := &Client{\n\t\tPacker: packerSvc.New(cl, nil),\n\t\tOrganization: organizationSvc.New(cl, nil),\n\t\tProject: projectSvc.New(cl, nil),\n\t}\n\n\tif err := client.loadOrganizationID(); err != nil {\n\t\treturn nil, &ClientError{\n\t\t\tStatusCode: InvalidClientConfig,\n\t\t\tErr: err,\n\t\t}\n\t}\n\tif err := client.loadProjectID(); err != nil {\n\t\treturn nil, &ClientError{\n\t\t\tStatusCode: InvalidClientConfig,\n\t\t\tErr: err,\n\t\t}\n\t}\n\n\treturn client, nil\n}\n\nfunc (c *Client) loadOrganizationID() error {\n\t\/\/ Get the organization ID.\n\tlistOrgParams := organizationSvc.NewOrganizationServiceListParams()\n\tlistOrgResp, err := c.Organization.OrganizationServiceList(listOrgParams, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to fetch organization list: %v\", err)\n\t}\n\torgLen := len(listOrgResp.Payload.Organizations)\n\tif orgLen != 1 {\n\t\treturn fmt.Errorf(\"unexpected number of organizations: expected 1, actual: %v\", orgLen)\n\t}\n\tc.OrganizationID = listOrgResp.Payload.Organizations[0].ID\n\treturn nil\n}\n\nfunc (c *Client) loadProjectID() error {\n\t\/\/ Get the project using the organization ID.\n\tlistProjParams := projectSvc.NewProjectServiceListParams()\n\tlistProjParams.ScopeID = &c.OrganizationID\n\tscopeType := string(rmmodels.HashicorpCloudResourcemanagerResourceIDResourceTypeORGANIZATION)\n\tlistProjParams.ScopeType = &scopeType\n\tlistProjResp, err := c.Project.ProjectServiceList(listProjParams, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to fetch project id: %v\", err)\n\t}\n\tif len(listProjResp.Payload.Projects) > 1 {\n\t\treturn fmt.Errorf(\"this version of Packer does not support multiple projects\")\n\t}\n\tc.ProjectID = listProjResp.Payload.Projects[0].ID\n\treturn nil\n}\nadd packer user agent to HCP client (#11455)package registry\n\nimport (\n\t\"fmt\"\n\n\tpackerSvc \"github.com\/hashicorp\/hcp-sdk-go\/clients\/cloud-packer-service\/preview\/2021-04-30\/client\/packer_service\"\n\torganizationSvc \"github.com\/hashicorp\/hcp-sdk-go\/clients\/cloud-resource-manager\/preview\/2019-12-10\/client\/organization_service\"\n\tprojectSvc \"github.com\/hashicorp\/hcp-sdk-go\/clients\/cloud-resource-manager\/preview\/2019-12-10\/client\/project_service\"\n\trmmodels \"github.com\/hashicorp\/hcp-sdk-go\/clients\/cloud-resource-manager\/preview\/2019-12-10\/models\"\n\t\"github.com\/hashicorp\/hcp-sdk-go\/httpclient\"\n\t\"github.com\/hashicorp\/packer\/internal\/registry\/env\"\n\t\"github.com\/hashicorp\/packer\/version\"\n)\n\n\/\/ Client is an HCP client capable of making requests on behalf of a service principal\ntype Client struct {\n\tPacker packerSvc.ClientService\n\tOrganization organizationSvc.ClientService\n\tProject projectSvc.ClientService\n\n\t\/\/ OrganizationID is the organization unique identifier on HCP.\n\tOrganizationID string\n\n\t\/\/ ProjectID is the project unique identifier on HCP.\n\tProjectID string\n}\n\n\/\/ NewClient returns an authenticated client to a HCP Packer Registry.\n\/\/ Client authentication requires the following environment variables be set HCP_CLIENT_ID and HCP_CLIENT_SECRET.\n\/\/ Upon error a HCPClientError will be returned.\nfunc NewClient() (*Client, error) {\n\tif !env.HasHCPCredentials() {\n\t\treturn nil, &ClientError{\n\t\t\tStatusCode: InvalidClientConfig,\n\t\t\tErr: fmt.Errorf(\"the client authentication requires both %s and %s environment variables to be set\", env.HCPClientID, env.HCPClientSecret),\n\t\t}\n\t}\n\n\tcl, err := httpclient.New(httpclient.Config{\n\t\tSourceChannel: fmt.Sprintf(\"packer\/%s\", version.PackerVersion.FormattedVersion()),\n\t})\n\tif err != nil {\n\t\treturn nil, &ClientError{\n\t\t\tStatusCode: InvalidClientConfig,\n\t\t\tErr: err,\n\t\t}\n\t}\n\n\tclient := &Client{\n\t\tPacker: packerSvc.New(cl, nil),\n\t\tOrganization: organizationSvc.New(cl, nil),\n\t\tProject: projectSvc.New(cl, nil),\n\t}\n\n\tif err := client.loadOrganizationID(); err != nil {\n\t\treturn nil, &ClientError{\n\t\t\tStatusCode: InvalidClientConfig,\n\t\t\tErr: err,\n\t\t}\n\t}\n\tif err := client.loadProjectID(); err != nil {\n\t\treturn nil, &ClientError{\n\t\t\tStatusCode: InvalidClientConfig,\n\t\t\tErr: err,\n\t\t}\n\t}\n\n\treturn client, nil\n}\n\nfunc (c *Client) loadOrganizationID() error {\n\t\/\/ Get the organization ID.\n\tlistOrgParams := organizationSvc.NewOrganizationServiceListParams()\n\tlistOrgResp, err := c.Organization.OrganizationServiceList(listOrgParams, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to fetch organization list: %v\", err)\n\t}\n\torgLen := len(listOrgResp.Payload.Organizations)\n\tif orgLen != 1 {\n\t\treturn fmt.Errorf(\"unexpected number of organizations: expected 1, actual: %v\", orgLen)\n\t}\n\tc.OrganizationID = listOrgResp.Payload.Organizations[0].ID\n\treturn nil\n}\n\nfunc (c *Client) loadProjectID() error {\n\t\/\/ Get the project using the organization ID.\n\tlistProjParams := projectSvc.NewProjectServiceListParams()\n\tlistProjParams.ScopeID = &c.OrganizationID\n\tscopeType := string(rmmodels.HashicorpCloudResourcemanagerResourceIDResourceTypeORGANIZATION)\n\tlistProjParams.ScopeType = &scopeType\n\tlistProjResp, err := c.Project.ProjectServiceList(listProjParams, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to fetch project id: %v\", err)\n\t}\n\tif len(listProjResp.Payload.Projects) > 1 {\n\t\treturn fmt.Errorf(\"this version of Packer does not support multiple projects\")\n\t}\n\tc.ProjectID = listProjResp.Payload.Projects[0].ID\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/\/ Package storage hold and abstraction of the filesystem\n\npackage storage\n\nimport (\n\t\"io\"\n\t\"io\/fs\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\n\/\/ Storage is an abstraction of the filesystem\ntype Storage interface {\n\tfs.FS\n\t\/\/ WriteFile writes data to the named file, creating it if necessary. If the file does not exist, WriteFile creates it with permissions perm (before umask); otherwise WriteFile truncates it before writing, without changing permissions.\n\tWriteFile(name string, data []byte, perm fs.FileMode) error\n\t\/\/ Mkdir creates a new directory with the specified name and permission bits (before umask). If there is an error, it will be of type *PathError.\n\tMkdir(name string, perm fs.FileMode) error\n\t\/\/ RemoveAll removes path and any children it contains. It removes everything it can but returns the first error it encounters. If the path does not exist, RemoveAll returns nil (no error). If there is an error, it will be of type *PathError.\n\tRemoveAll(name string) error\n\t\/\/ Create creates or truncates the named file. If the file already exists, it is truncated. If the file does not exist, it is created with mode 0666 (before umask). If successful, methods on the returned File can be used for I\/O; the associated file descriptor has mode O_RDWR. If there is an error, it will be of type *PathError.\n\tCreate(name string) (File, error)\n}\n\ntype File interface {\n\tfs.File\n\tio.Writer\n}\n\n\/\/ ReadFile returns the content of name in the filesystem\nfunc ReadFile(fs Storage, name string) ([]byte, error) {\n\tf, err := fs.Open(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\treturn ioutil.ReadAll(f)\n}\nfunc MkdirAll(fs Storage, dir string, perm fs.FileMode) error {\n\tlist := make([]string, 0)\n\tfor dir := filepath.Dir(dir); dir != string(filepath.Separator) && dir != \".\"; dir = filepath.Dir(dir) {\n\t\tlist = append(list, dir)\n\t}\n\tfor i := len(list); i > 0; i-- {\n\t\terr := fs.Mkdir(list[i-1], perm)\n\t\tif err != nil && !os.IsExist(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n\n}\nfix(MkdirAll): the stop condition should work on windows\/\/ Package storage hold and abstraction of the filesystem\n\npackage storage\n\nimport (\n\t\"io\"\n\t\"io\/fs\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\n\/\/ Storage is an abstraction of the filesystem\ntype Storage interface {\n\tfs.FS\n\t\/\/ WriteFile writes data to the named file, creating it if necessary. If the file does not exist, WriteFile creates it with permissions perm (before umask); otherwise WriteFile truncates it before writing, without changing permissions.\n\tWriteFile(name string, data []byte, perm fs.FileMode) error\n\t\/\/ Mkdir creates a new directory with the specified name and permission bits (before umask). If there is an error, it will be of type *PathError.\n\tMkdir(name string, perm fs.FileMode) error\n\t\/\/ RemoveAll removes path and any children it contains. It removes everything it can but returns the first error it encounters. If the path does not exist, RemoveAll returns nil (no error). If there is an error, it will be of type *PathError.\n\tRemoveAll(name string) error\n\t\/\/ Create creates or truncates the named file. If the file already exists, it is truncated. If the file does not exist, it is created with mode 0666 (before umask). If successful, methods on the returned File can be used for I\/O; the associated file descriptor has mode O_RDWR. If there is an error, it will be of type *PathError.\n\tCreate(name string) (File, error)\n}\n\ntype File interface {\n\tfs.File\n\tio.Writer\n}\n\n\/\/ ReadFile returns the content of name in the filesystem\nfunc ReadFile(fs Storage, name string) ([]byte, error) {\n\tf, err := fs.Open(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\treturn ioutil.ReadAll(f)\n}\nfunc MkdirAll(fs Storage, dir string, perm fs.FileMode) error {\n\tlist := make([]string, 0)\n\tstop := \"\"\n\tfor dir := filepath.Dir(dir); dir != stop; dir = filepath.Dir(dir) {\n\t\tlist = append(list, dir)\n\t\tstop = dir\n\t}\n\tfor i := len(list); i > 0; i-- {\n\t\terr := fs.Mkdir(list[i-1], perm)\n\t\tif err != nil && !os.IsExist(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n\n}\n<|endoftext|>"} {"text":"\/\/ Copyright (c) 2013-2014 The btcsuite developers\n\/\/ Copyright (c) 2015-2021 The Decred developers\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package version provides a single location to house the version information\n\/\/ for dcrd and other utilities provided in the same repository.\npackage version\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\t\/\/ semanticAlphabet defines the allowed characters for the pre-release and\n\t\/\/ build metadata portions of a semantic version string.\n\tsemanticAlphabet = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-.\"\n)\n\n\/\/ semverRE is a regular expression used to parse a semantic version string into\n\/\/ its constituent parts.\nvar semverRE = regexp.MustCompile(`^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)` +\n\t`(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*` +\n\t`[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$`)\n\n\/\/ These variables define the application version and follow the semantic\n\/\/ versioning 2.0.0 spec (https:\/\/semver.org\/).\nvar (\n\t\/\/ Note for maintainers:\n\t\/\/\n\t\/\/ The expected process for setting the version in releases is as follows:\n\t\/\/ - Create a release branch of the form 'release-vMAJOR.MINOR'\n\t\/\/ - Modify the Version variable below on that branch to:\n\t\/\/ - Remove the pre-release portion\n\t\/\/ - Set the build metadata to 'release.local'\n\t\/\/ - Update the Version variable below on the master branch to the next\n\t\/\/ expected version while retaining a pre-release of 'pre'\n\t\/\/\n\t\/\/ These steps ensure that building from source produces versions that are\n\t\/\/ distinct from reproducible builds that override the Version via linker\n\t\/\/ flags.\n\n\t\/\/ Version is the application version per the semantic versioning 2.0.0 spec\n\t\/\/ (https:\/\/semver.org\/).\n\t\/\/\n\t\/\/ It is defined as a variable so it can be overridden during the build\n\t\/\/ process with:\n\t\/\/ '-ldflags \"-X github.com\/decred\/dcrd\/internal\/version.Version=fullsemver\"'\n\t\/\/ if needed.\n\t\/\/\n\t\/\/ It MUST be a full semantic version per the semantic versioning spec or\n\t\/\/ the package will panic at runtime. Of particular note is the pre-release\n\t\/\/ and build metadata portions MUST only contain characters from\n\t\/\/ semanticAlphabet.\n\tVersion = \"1.7.0-pre\"\n\n\t\/\/ NOTE: The following values are set via init by parsing the above Version\n\t\/\/ string.\n\n\t\/\/ These fields are the individual semantic version components that define\n\t\/\/ the application version.\n\tMajor uint32\n\tMinor uint32\n\tPatch uint32\n\tPreRelease string\n\tBuildMetadata string\n)\n\n\/\/ parseUint32 converts the passed string to an unsigned integer or returns an\n\/\/ error if it is invalid.\nfunc parseUint32(s string, fieldName string) (uint32, error) {\n\tval, err := strconv.ParseUint(s, 10, 32)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"malformed semver %s: %w\", fieldName, err)\n\t}\n\treturn uint32(val), err\n}\n\n\/\/ checkSemString returns an error if the passed string contains characters that\n\/\/ are not in the provided alphabet.\nfunc checkSemString(s, alphabet, fieldName string) error {\n\tfor _, r := range s {\n\t\tif !strings.ContainsRune(alphabet, r) {\n\t\t\treturn fmt.Errorf(\"malformed semver %s: %q invalid\", fieldName, r)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ parseSemVer parses various semver components from the provided string.\nfunc parseSemVer(s string) (uint32, uint32, uint32, string, string, error) {\n\t\/\/ Parse the various semver component from the version string via a regular\n\t\/\/ expression.\n\tm := semverRE.FindStringSubmatch(s)\n\tif m == nil {\n\t\terr := fmt.Errorf(\"malformed version string %q: does not conform to \"+\n\t\t\t\"semver specification\", s)\n\t\treturn 0, 0, 0, \"\", \"\", err\n\t}\n\n\tmajor, err := parseUint32(m[1], \"major\")\n\tif err != nil {\n\t\treturn 0, 0, 0, \"\", \"\", err\n\t}\n\n\tminor, err := parseUint32(m[2], \"minor\")\n\tif err != nil {\n\t\treturn 0, 0, 0, \"\", \"\", err\n\t}\n\n\tpatch, err := parseUint32(m[3], \"patch\")\n\tif err != nil {\n\t\treturn 0, 0, 0, \"\", \"\", err\n\t}\n\n\tpreRel := m[4]\n\terr = checkSemString(preRel, semanticAlphabet, \"pre-release\")\n\tif err != nil {\n\t\treturn 0, 0, 0, s, s, err\n\t}\n\n\tbuild := m[5]\n\terr = checkSemString(build, semanticAlphabet, \"buildmetadata\")\n\tif err != nil {\n\t\treturn 0, 0, 0, s, s, err\n\t}\n\n\treturn major, minor, patch, preRel, build, nil\n}\n\nfunc init() {\n\tvar err error\n\tMajor, Minor, Patch, PreRelease, BuildMetadata, err = parseSemVer(Version)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif BuildMetadata == \"\" {\n\t\tBuildMetadata = vcsCommitID()\n\t\tif BuildMetadata != \"\" {\n\t\t\tVersion = fmt.Sprintf(\"%d.%d.%d\", Major, Minor, Patch)\n\t\t\tif PreRelease != \"\" {\n\t\t\t\tVersion += \"-\" + PreRelease\n\t\t\t}\n\t\t\tVersion += \"+\" + BuildMetadata\n\t\t}\n\t}\n}\n\n\/\/ String returns the application version as a properly formed string per the\n\/\/ semantic versioning 2.0.0 spec (https:\/\/semver.org\/).\nfunc String() string {\n\treturn Version\n}\n\n\/\/ NormalizeString returns the passed string stripped of all characters which\n\/\/ are not valid according to the semantic versioning guidelines for pre-release\n\/\/ and build metadata strings. In particular they MUST only contain characters\n\/\/ in semanticAlphabet.\nfunc NormalizeString(str string) string {\n\tvar result bytes.Buffer\n\tfor _, r := range str {\n\t\tif strings.ContainsRune(semanticAlphabet, r) {\n\t\t\tresult.WriteRune(r)\n\t\t}\n\t}\n\treturn result.String()\n}\nrelease: Bump for 1.8 release cycle.\/\/ Copyright (c) 2013-2014 The btcsuite developers\n\/\/ Copyright (c) 2015-2021 The Decred developers\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package version provides a single location to house the version information\n\/\/ for dcrd and other utilities provided in the same repository.\npackage version\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\t\/\/ semanticAlphabet defines the allowed characters for the pre-release and\n\t\/\/ build metadata portions of a semantic version string.\n\tsemanticAlphabet = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-.\"\n)\n\n\/\/ semverRE is a regular expression used to parse a semantic version string into\n\/\/ its constituent parts.\nvar semverRE = regexp.MustCompile(`^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)` +\n\t`(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*` +\n\t`[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$`)\n\n\/\/ These variables define the application version and follow the semantic\n\/\/ versioning 2.0.0 spec (https:\/\/semver.org\/).\nvar (\n\t\/\/ Note for maintainers:\n\t\/\/\n\t\/\/ The expected process for setting the version in releases is as follows:\n\t\/\/ - Create a release branch of the form 'release-vMAJOR.MINOR'\n\t\/\/ - Modify the Version variable below on that branch to:\n\t\/\/ - Remove the pre-release portion\n\t\/\/ - Set the build metadata to 'release.local'\n\t\/\/ - Update the Version variable below on the master branch to the next\n\t\/\/ expected version while retaining a pre-release of 'pre'\n\t\/\/\n\t\/\/ These steps ensure that building from source produces versions that are\n\t\/\/ distinct from reproducible builds that override the Version via linker\n\t\/\/ flags.\n\n\t\/\/ Version is the application version per the semantic versioning 2.0.0 spec\n\t\/\/ (https:\/\/semver.org\/).\n\t\/\/\n\t\/\/ It is defined as a variable so it can be overridden during the build\n\t\/\/ process with:\n\t\/\/ '-ldflags \"-X github.com\/decred\/dcrd\/internal\/version.Version=fullsemver\"'\n\t\/\/ if needed.\n\t\/\/\n\t\/\/ It MUST be a full semantic version per the semantic versioning spec or\n\t\/\/ the package will panic at runtime. Of particular note is the pre-release\n\t\/\/ and build metadata portions MUST only contain characters from\n\t\/\/ semanticAlphabet.\n\tVersion = \"1.8.0-pre\"\n\n\t\/\/ NOTE: The following values are set via init by parsing the above Version\n\t\/\/ string.\n\n\t\/\/ These fields are the individual semantic version components that define\n\t\/\/ the application version.\n\tMajor uint32\n\tMinor uint32\n\tPatch uint32\n\tPreRelease string\n\tBuildMetadata string\n)\n\n\/\/ parseUint32 converts the passed string to an unsigned integer or returns an\n\/\/ error if it is invalid.\nfunc parseUint32(s string, fieldName string) (uint32, error) {\n\tval, err := strconv.ParseUint(s, 10, 32)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"malformed semver %s: %w\", fieldName, err)\n\t}\n\treturn uint32(val), err\n}\n\n\/\/ checkSemString returns an error if the passed string contains characters that\n\/\/ are not in the provided alphabet.\nfunc checkSemString(s, alphabet, fieldName string) error {\n\tfor _, r := range s {\n\t\tif !strings.ContainsRune(alphabet, r) {\n\t\t\treturn fmt.Errorf(\"malformed semver %s: %q invalid\", fieldName, r)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ parseSemVer parses various semver components from the provided string.\nfunc parseSemVer(s string) (uint32, uint32, uint32, string, string, error) {\n\t\/\/ Parse the various semver component from the version string via a regular\n\t\/\/ expression.\n\tm := semverRE.FindStringSubmatch(s)\n\tif m == nil {\n\t\terr := fmt.Errorf(\"malformed version string %q: does not conform to \"+\n\t\t\t\"semver specification\", s)\n\t\treturn 0, 0, 0, \"\", \"\", err\n\t}\n\n\tmajor, err := parseUint32(m[1], \"major\")\n\tif err != nil {\n\t\treturn 0, 0, 0, \"\", \"\", err\n\t}\n\n\tminor, err := parseUint32(m[2], \"minor\")\n\tif err != nil {\n\t\treturn 0, 0, 0, \"\", \"\", err\n\t}\n\n\tpatch, err := parseUint32(m[3], \"patch\")\n\tif err != nil {\n\t\treturn 0, 0, 0, \"\", \"\", err\n\t}\n\n\tpreRel := m[4]\n\terr = checkSemString(preRel, semanticAlphabet, \"pre-release\")\n\tif err != nil {\n\t\treturn 0, 0, 0, s, s, err\n\t}\n\n\tbuild := m[5]\n\terr = checkSemString(build, semanticAlphabet, \"buildmetadata\")\n\tif err != nil {\n\t\treturn 0, 0, 0, s, s, err\n\t}\n\n\treturn major, minor, patch, preRel, build, nil\n}\n\nfunc init() {\n\tvar err error\n\tMajor, Minor, Patch, PreRelease, BuildMetadata, err = parseSemVer(Version)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif BuildMetadata == \"\" {\n\t\tBuildMetadata = vcsCommitID()\n\t\tif BuildMetadata != \"\" {\n\t\t\tVersion = fmt.Sprintf(\"%d.%d.%d\", Major, Minor, Patch)\n\t\t\tif PreRelease != \"\" {\n\t\t\t\tVersion += \"-\" + PreRelease\n\t\t\t}\n\t\t\tVersion += \"+\" + BuildMetadata\n\t\t}\n\t}\n}\n\n\/\/ String returns the application version as a properly formed string per the\n\/\/ semantic versioning 2.0.0 spec (https:\/\/semver.org\/).\nfunc String() string {\n\treturn Version\n}\n\n\/\/ NormalizeString returns the passed string stripped of all characters which\n\/\/ are not valid according to the semantic versioning guidelines for pre-release\n\/\/ and build metadata strings. In particular they MUST only contain characters\n\/\/ in semanticAlphabet.\nfunc NormalizeString(str string) string {\n\tvar result bytes.Buffer\n\tfor _, r := range str {\n\t\tif strings.ContainsRune(semanticAlphabet, r) {\n\t\t\tresult.WriteRune(r)\n\t\t}\n\t}\n\treturn result.String()\n}\n<|endoftext|>"} {"text":"package of10\n\nimport (\n\t\"net\"\n\n\t. \"github.com\/oshothebig\/goflow\/openflow\"\n)\n\ntype ActionType uint16\n\nconst (\n\tOFPAT_OUTPUT ActionType = iota\n\tOFPAT_SET_VLAN_VID\n\tOFPAT_SET_VLAN_PCP\n\tOFPAT_SET_STRIP_VLAN\n\tOFPAT_SET_DL_SRC\n\tOFPAT_SET_DL_DST\n\tOFPAT_SET_NW_SRC\n\tOFPAT_SET_NW_DST\n\tOFPAT_SET_NW_TOS\n\tOFPAT_SET_TP_SRC\n\tOFPAT_SET_TP_DST\n\tOFPAT_ENQUEUE\n\tOFPAT_VENDOR ActionType = 0xffff\n)\n\nvar ActionTypes = struct {\n\tOutput ActionType\n\tSetVlanId ActionType\n\tSetVlanPcp ActionType\n\tStripVlan ActionType\n\tSetEtherSrc ActionType\n\tSetEtherDst ActionType\n\tSetIpSrc ActionType\n\tSetIpDst ActionType\n\tSetIpTos ActionType\n\tSetNetworkSrc ActionType\n\tSetNetworkDst ActionType\n\tEnqueue ActionType\n\tVendor ActionType\n}{\n\tOFPAT_OUTPUT,\n\tOFPAT_SET_VLAN_VID,\n\tOFPAT_SET_VLAN_PCP,\n\tOFPAT_SET_STRIP_VLAN,\n\tOFPAT_SET_DL_SRC,\n\tOFPAT_SET_DL_DST,\n\tOFPAT_SET_NW_SRC,\n\tOFPAT_SET_NW_DST,\n\tOFPAT_SET_NW_TOS,\n\tOFPAT_SET_TP_SRC,\n\tOFPAT_SET_TP_DST,\n\tOFPAT_ENQUEUE,\n\tOFPAT_VENDOR,\n}\n\ntype Action interface {\n\tPacketizable\n\tGetType() ActionType\n}\n\ntype ActionHeader struct {\n\tType ActionType\n\tLength uint16\n}\n\nfunc (header *ActionHeader) GetType() ActionType {\n\treturn header.Type\n}\n\ntype SendOutPort struct {\n\tActionHeader\n\tPort PortNumber\n\tMaxLength uint16\n}\n\ntype Enqueue struct {\n\tActionHeader\n\tPort PortNumber\n\tpad [6]uint8\n\tQueueId uint32\n}\n\ntype SetVlanVid struct {\n\tActionHeader\n\tVlanId VlanId\n\tpad [2]uint32\n}\n\ntype SetVlanPcp struct {\n\tActionHeader\n\tVlanPcp VlanPriority\n\tpad [3]uint8\n}\n\ntype SetEtherAddress struct {\n\tActionHeader\n\tEtherAddress net.HardwareAddr\n\tpad [6]uint8\n}\n\ntype SetIpAddress struct {\n\tActionHeader\n\tIpAddress net.IP\n}\n\ntype SetIpTos struct {\n\tActionHeader\n\tIpTos Dscp\n\tpad [3]uint8\n}\n\ntype SetTransportPort struct {\n\tActionHeader\n\tTransportPort NetworkPort\n\tpad [2]uint8\n}\n\ntype VendorHeader struct {\n\tActionHeader\n\tVendor VendorId\n}\n\ntype VendorId uint32\nShorten field names to remove redundancypackage of10\n\nimport (\n\t\"net\"\n\n\t. \"github.com\/oshothebig\/goflow\/openflow\"\n)\n\ntype ActionType uint16\n\nconst (\n\tOFPAT_OUTPUT ActionType = iota\n\tOFPAT_SET_VLAN_VID\n\tOFPAT_SET_VLAN_PCP\n\tOFPAT_SET_STRIP_VLAN\n\tOFPAT_SET_DL_SRC\n\tOFPAT_SET_DL_DST\n\tOFPAT_SET_NW_SRC\n\tOFPAT_SET_NW_DST\n\tOFPAT_SET_NW_TOS\n\tOFPAT_SET_TP_SRC\n\tOFPAT_SET_TP_DST\n\tOFPAT_ENQUEUE\n\tOFPAT_VENDOR ActionType = 0xffff\n)\n\nvar ActionTypes = struct {\n\tOutput ActionType\n\tSetVlanId ActionType\n\tSetVlanPcp ActionType\n\tStripVlan ActionType\n\tSetEtherSrc ActionType\n\tSetEtherDst ActionType\n\tSetIpSrc ActionType\n\tSetIpDst ActionType\n\tSetIpTos ActionType\n\tSetNetworkSrc ActionType\n\tSetNetworkDst ActionType\n\tEnqueue ActionType\n\tVendor ActionType\n}{\n\tOFPAT_OUTPUT,\n\tOFPAT_SET_VLAN_VID,\n\tOFPAT_SET_VLAN_PCP,\n\tOFPAT_SET_STRIP_VLAN,\n\tOFPAT_SET_DL_SRC,\n\tOFPAT_SET_DL_DST,\n\tOFPAT_SET_NW_SRC,\n\tOFPAT_SET_NW_DST,\n\tOFPAT_SET_NW_TOS,\n\tOFPAT_SET_TP_SRC,\n\tOFPAT_SET_TP_DST,\n\tOFPAT_ENQUEUE,\n\tOFPAT_VENDOR,\n}\n\ntype Action interface {\n\tPacketizable\n\tGetType() ActionType\n}\n\ntype ActionHeader struct {\n\tType ActionType\n\tLength uint16\n}\n\nfunc (header *ActionHeader) GetType() ActionType {\n\treturn header.Type\n}\n\ntype SendOutPort struct {\n\tActionHeader\n\tPort PortNumber\n\tMaxLength uint16\n}\n\ntype Enqueue struct {\n\tActionHeader\n\tPort PortNumber\n\tpad [6]uint8\n\tQueueId uint32\n}\n\ntype SetVlanVid struct {\n\tActionHeader\n\tId VlanId\n\tpad [2]uint32\n}\n\ntype SetVlanPcp struct {\n\tActionHeader\n\tPriority VlanPriority\n\tpad [3]uint8\n}\n\ntype SetEtherAddress struct {\n\tActionHeader\n\tAddress net.HardwareAddr\n\tpad [6]uint8\n}\n\ntype SetIpAddress struct {\n\tActionHeader\n\tAddress net.IP\n}\n\ntype SetIpTos struct {\n\tActionHeader\n\tTos Dscp\n\tpad [3]uint8\n}\n\ntype SetTransportPort struct {\n\tActionHeader\n\tPort NetworkPort\n\tpad [2]uint8\n}\n\ntype VendorHeader struct {\n\tActionHeader\n\tVendor VendorId\n}\n\ntype VendorId uint32\n<|endoftext|>"} {"text":"package chalk\n\nconst (\n\tbgBlack attribute = iota + 40\n\tbgRed\n\tbgGreen\n\tbgYellow\n\tbgBlue\n\tbgMagenta\n\tbgCyan\n\tbgWhite\n)\n\n\/\/ BlackBackground reports Formatter with Black as initial format\nfunc BlackBackground() Formatter {\n\treturn Formatter{bgBlack}\n}\n\n\/\/ AsBlackBackground reports Black string based on provided content\nfunc AsBlackBackground(a ...interface{}) string {\n\treturn BlackBackground().Sprint(a...)\n}\n\n\/\/ BlackBackground reports Formatter with Black as additional format\nfunc (f Formatter) BlackBackground() Formatter {\n\treturn append(f, bgBlack)\n}\n\n\/\/ RedBackground reports Formatter with Red as initial format\nfunc RedBackground() Formatter {\n\treturn Formatter{bgRed}\n}\n\n\/\/ AsRedBackground reports Red string based on provided content\nfunc AsRedBackground(a ...interface{}) string {\n\treturn Red().Sprint(a...)\n}\n\n\/\/ RedBackground reports Formatter with Red as additional format\nfunc (f Formatter) RedBackground() Formatter {\n\treturn append(f, bgRed)\n}\n\n\/\/ GreenBackground reports Formatter with Green as initial format\nfunc GreenBackground() Formatter {\n\treturn Formatter{bgGreen}\n}\n\n\/\/ AsGreenBackground reports Green string based on provided content\nfunc AsGreenBackground(a ...interface{}) string {\n\treturn Green().Sprint(a...)\n}\n\n\/\/ GreenBackground reports Formatter with Green as additional format\nfunc (f Formatter) GreenBackground() Formatter {\n\treturn append(f, bgGreen)\n}\n\n\/\/ YellowBackground reports Formatter with Yellow as initial format\nfunc YellowBackground() Formatter {\n\treturn Formatter{bgYellow}\n}\n\n\/\/ AsYellowBackground reports Yellow string based on provided content\nfunc AsYellowBackground(a ...interface{}) string {\n\treturn Yellow().Sprint(a...)\n}\n\n\/\/ YellowBackground reports Formatter with Yellow as additional format\nfunc (f Formatter) YellowBackground() Formatter {\n\treturn append(f, bgYellow)\n}\n\n\/\/ BlueBackground reports Formatter with Blue as initial format\nfunc BlueBackground() Formatter {\n\treturn Formatter{bgBlue}\n}\n\n\/\/ AsBlueBackground reports Blue string based on provided content\nfunc AsBlueBackground(a ...interface{}) string {\n\treturn Blue().Sprint(a...)\n}\n\n\/\/ BlueBackground reports Formatter with Blue as additional format\nfunc (f Formatter) BlueBackground() Formatter {\n\treturn append(f, bgBlue)\n}\n\n\/\/ MagentaBackground reports Formatter with Magenta as initial format\nfunc MagentaBackground() Formatter {\n\treturn Formatter{bgMagenta}\n}\n\n\/\/ AsMagentaBackground reports Magenta string based on provided content\nfunc AsMagentaBackground(a ...interface{}) string {\n\treturn Magenta().Sprint(a...)\n}\n\n\/\/ MagentaBackground reports Formatter with Magenta as additional format\nfunc (f Formatter) MagentaBackground() Formatter {\n\treturn append(f, bgMagenta)\n}\n\n\/\/ CyanBackground reports Formatter with Cyan as initial format\nfunc CyanBackground() Formatter {\n\treturn Formatter{bgCyan}\n}\n\n\/\/ AsCyanBackground reports Cyan string based on provided content\nfunc AsCyanBackground(a ...interface{}) string {\n\treturn Cyan().Sprint(a...)\n}\n\n\/\/ CyanBackground reports Formatter with Cyan as additional format\nfunc (f Formatter) CyanBackground() Formatter {\n\treturn append(f, bgCyan)\n}\n\n\/\/ WhiteBackground reports Formatter with White as initial format\nfunc WhiteBackground() Formatter {\n\treturn Formatter{bgWhite}\n}\n\n\/\/ AsWhiteBackground reports White string based on provided content\nfunc AsWhiteBackground(a ...interface{}) string {\n\treturn White().Sprint(a...)\n}\n\n\/\/ WhiteBackground reports Formatter with White as additional format\nfunc (f Formatter) WhiteBackground() Formatter {\n\treturn append(f, bgWhite)\n}\n++missing commentpackage chalk\n\n\/\/ Background text colors\nconst (\n\tbgBlack attribute = iota + 40\n\tbgRed\n\tbgGreen\n\tbgYellow\n\tbgBlue\n\tbgMagenta\n\tbgCyan\n\tbgWhite\n)\n\n\/\/ BlackBackground reports Formatter with Black as initial format\nfunc BlackBackground() Formatter {\n\treturn Formatter{bgBlack}\n}\n\n\/\/ AsBlackBackground reports Black string based on provided content\nfunc AsBlackBackground(a ...interface{}) string {\n\treturn BlackBackground().Sprint(a...)\n}\n\n\/\/ BlackBackground reports Formatter with Black as additional format\nfunc (f Formatter) BlackBackground() Formatter {\n\treturn append(f, bgBlack)\n}\n\n\/\/ RedBackground reports Formatter with Red as initial format\nfunc RedBackground() Formatter {\n\treturn Formatter{bgRed}\n}\n\n\/\/ AsRedBackground reports Red string based on provided content\nfunc AsRedBackground(a ...interface{}) string {\n\treturn Red().Sprint(a...)\n}\n\n\/\/ RedBackground reports Formatter with Red as additional format\nfunc (f Formatter) RedBackground() Formatter {\n\treturn append(f, bgRed)\n}\n\n\/\/ GreenBackground reports Formatter with Green as initial format\nfunc GreenBackground() Formatter {\n\treturn Formatter{bgGreen}\n}\n\n\/\/ AsGreenBackground reports Green string based on provided content\nfunc AsGreenBackground(a ...interface{}) string {\n\treturn Green().Sprint(a...)\n}\n\n\/\/ GreenBackground reports Formatter with Green as additional format\nfunc (f Formatter) GreenBackground() Formatter {\n\treturn append(f, bgGreen)\n}\n\n\/\/ YellowBackground reports Formatter with Yellow as initial format\nfunc YellowBackground() Formatter {\n\treturn Formatter{bgYellow}\n}\n\n\/\/ AsYellowBackground reports Yellow string based on provided content\nfunc AsYellowBackground(a ...interface{}) string {\n\treturn Yellow().Sprint(a...)\n}\n\n\/\/ YellowBackground reports Formatter with Yellow as additional format\nfunc (f Formatter) YellowBackground() Formatter {\n\treturn append(f, bgYellow)\n}\n\n\/\/ BlueBackground reports Formatter with Blue as initial format\nfunc BlueBackground() Formatter {\n\treturn Formatter{bgBlue}\n}\n\n\/\/ AsBlueBackground reports Blue string based on provided content\nfunc AsBlueBackground(a ...interface{}) string {\n\treturn Blue().Sprint(a...)\n}\n\n\/\/ BlueBackground reports Formatter with Blue as additional format\nfunc (f Formatter) BlueBackground() Formatter {\n\treturn append(f, bgBlue)\n}\n\n\/\/ MagentaBackground reports Formatter with Magenta as initial format\nfunc MagentaBackground() Formatter {\n\treturn Formatter{bgMagenta}\n}\n\n\/\/ AsMagentaBackground reports Magenta string based on provided content\nfunc AsMagentaBackground(a ...interface{}) string {\n\treturn Magenta().Sprint(a...)\n}\n\n\/\/ MagentaBackground reports Formatter with Magenta as additional format\nfunc (f Formatter) MagentaBackground() Formatter {\n\treturn append(f, bgMagenta)\n}\n\n\/\/ CyanBackground reports Formatter with Cyan as initial format\nfunc CyanBackground() Formatter {\n\treturn Formatter{bgCyan}\n}\n\n\/\/ AsCyanBackground reports Cyan string based on provided content\nfunc AsCyanBackground(a ...interface{}) string {\n\treturn Cyan().Sprint(a...)\n}\n\n\/\/ CyanBackground reports Formatter with Cyan as additional format\nfunc (f Formatter) CyanBackground() Formatter {\n\treturn append(f, bgCyan)\n}\n\n\/\/ WhiteBackground reports Formatter with White as initial format\nfunc WhiteBackground() Formatter {\n\treturn Formatter{bgWhite}\n}\n\n\/\/ AsWhiteBackground reports White string based on provided content\nfunc AsWhiteBackground(a ...interface{}) string {\n\treturn White().Sprint(a...)\n}\n\n\/\/ WhiteBackground reports Formatter with White as additional format\nfunc (f Formatter) WhiteBackground() Formatter {\n\treturn append(f, bgWhite)\n}\n<|endoftext|>"} {"text":"\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package spinner is a simple package to add a spinner \/ progress indicator to any terminal application.\npackage spinner\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/fatih\/color\"\n)\n\n\/\/ errInvalidColor is returned when attempting to set an invalid color\nvar errInvalidColor = errors.New(\"invalid color\")\n\n\/\/ validColors holds an array of the only colors allowed\nvar validColors = []string{\"red\", \"green\", \"yellow\", \"blue\", \"magenta\", \"cyan\", \"white\"}\n\n\/\/ validColor will make sure the given color is actually allowed\nfunc validColor(c string) bool {\n\tfor _, i := range validColors {\n\t\tif c == i {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Spinner struct to hold the provided options\ntype Spinner struct {\n\tDelay time.Duration \/\/ Delay is the speed of the indicator\n\tchars []string \/\/ chars holds the chosen character set\n\tPrefix string \/\/ Prefix is the text preppended to the indicator\n\tSuffix string \/\/ Suffix is the text appended to the indicator\n\tFinalMSG string \/\/ string displayed after Stop() is called\n\tlastOutput string \/\/ last character(set) written\n\tcolor func(a ...interface{}) string \/\/ default color is white\n\tactive bool \/\/ active holds the state of the spinner\n\tlock *sync.RWMutex \/\/ Lock useed for\n\tWriter io.Writer \/\/ to make testing better, exported so users have access\n\tstopChan chan struct{} \/\/ stopChan is a channel used to stop the indicator\n}\n\n\/\/ New provides a pointer to an instance of Spinner with the supplied options\nfunc New(cs []string, d time.Duration) *Spinner {\n\treturn &Spinner{\n\t\tDelay: d,\n\t\tchars: cs,\n\t\tcolor: color.New(color.FgWhite).SprintFunc(),\n\t\tactive: false,\n\t\tlock: &sync.RWMutex{},\n\t\tWriter: color.Output,\n\t\tstopChan: make(chan struct{}, 1),\n\t}\n}\n\n\/\/ Start will start the indicator\nfunc (s *Spinner) Start() {\n\tif s.active {\n\t\treturn\n\t}\n\ts.active = true\n\n\tgo func() {\n\t\tfor {\n\t\t\tfor i := 0; i < len(s.chars); i++ {\n\t\t\t\tselect {\n\t\t\t\tcase <-s.stopChan:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tfmt.Fprint(s.Writer, fmt.Sprintf(\"%s%s%s \", s.Prefix, s.color(s.chars[i]), s.Suffix))\n\t\t\t\t\tout := fmt.Sprintf(\"%s%s%s \", s.Prefix, s.chars[i], s.Suffix)\n\t\t\t\t\ts.lastOutput = out\n\t\t\t\t\ts.lock.RLock()\n\t\t\t\t\ttime.Sleep(s.Delay)\n\t\t\t\t\ts.lock.RUnlock()\n\t\t\t\t\ts.erase(out)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ Stop stops the indicator\nfunc (s *Spinner) Stop() {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\tif s.active {\n\t\ts.stopChan <- struct{}{}\n\t\ts.active = false\n\t\tif s.FinalMSG != \"\" {\n\t\t\tfmt.Fprintf(s.Writer, s.FinalMSG)\n\t\t}\n\t}\n}\n\n\/\/ Restart will stop and start the indicator\nfunc (s *Spinner) Restart() {\n\ts.Stop()\n\ts.Start()\n}\n\n\/\/ Reverse will reverse the order of the slice assigned to the indicator\nfunc (s *Spinner) Reverse() {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\tfor i, j := 0, len(s.chars)-1; i < j; i, j = i+1, j-1 {\n\t\ts.chars[i], s.chars[j] = s.chars[j], s.chars[i]\n\t}\n}\n\n\/\/ Color will set the struct field for the given color to be used\nfunc (s *Spinner) Color(c string) error {\n\tif validColor(c) {\n\t\tswitch c {\n\t\tcase \"red\":\n\t\t\ts.color = color.New(color.FgRed).SprintFunc()\n\t\t\ts.Restart()\n\t\tcase \"yellow\":\n\t\t\ts.color = color.New(color.FgYellow).SprintFunc()\n\t\t\ts.Restart()\n\t\tcase \"green\":\n\t\t\ts.color = color.New(color.FgGreen).SprintFunc()\n\t\t\ts.Restart()\n\t\tcase \"magenta\":\n\t\t\ts.color = color.New(color.FgMagenta).SprintFunc()\n\t\t\ts.Restart()\n\t\tcase \"blue\":\n\t\t\ts.color = color.New(color.FgBlue).SprintFunc()\n\t\t\ts.Restart()\n\t\tcase \"cyan\":\n\t\t\ts.color = color.New(color.FgCyan).SprintFunc()\n\t\t\ts.Restart()\n\t\tcase \"white\":\n\t\t\ts.color = color.New(color.FgWhite).SprintFunc()\n\t\t\ts.Restart()\n\t\tdefault:\n\t\t\treturn errInvalidColor\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ UpdateSpeed will set the indicator delay to the given value\nfunc (s *Spinner) UpdateSpeed(d time.Duration) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\ts.Delay = d\n}\n\n\/\/ UpdateCharSet will change the current character set to the given one\nfunc (s *Spinner) UpdateCharSet(cs []string) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\ts.chars = cs\n}\n\n\/\/ erase deletes written characters\nfunc (s *Spinner) erase(a string) {\n\tn := utf8.RuneCountInString(a)\n\ts.lock.RLock()\n\tdefer s.lock.RUnlock()\n\tfor i := 0; i < n; i++ {\n\t\tfmt.Fprintf(s.Writer, \"\\b\")\n\t}\n}\n\n\/\/ GenerateNumberSequence will generate a slice of integers at the\n\/\/ provided length and convert them each to a string\nfunc GenerateNumberSequence(length int) []string {\n\tvar numSeq []string\n\tfor i := 0; i < length; i++ {\n\t\tnumSeq = append(numSeq, strconv.Itoa(i))\n\t}\n\treturn numSeq\n}\nsmall change in struct field ordering\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package spinner is a simple package to add a spinner \/ progress indicator to any terminal application.\npackage spinner\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/fatih\/color\"\n)\n\n\/\/ errInvalidColor is returned when attempting to set an invalid color\nvar errInvalidColor = errors.New(\"invalid color\")\n\n\/\/ validColors holds an array of the only colors allowed\nvar validColors = []string{\"red\", \"green\", \"yellow\", \"blue\", \"magenta\", \"cyan\", \"white\"}\n\n\/\/ validColor will make sure the given color is actually allowed\nfunc validColor(c string) bool {\n\tfor _, i := range validColors {\n\t\tif c == i {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Spinner struct to hold the provided options\ntype Spinner struct {\n\tDelay time.Duration \/\/ Delay is the speed of the indicator\n\tchars []string \/\/ chars holds the chosen character set\n\tPrefix string \/\/ Prefix is the text preppended to the indicator\n\tSuffix string \/\/ Suffix is the text appended to the indicator\n\tFinalMSG string \/\/ string displayed after Stop() is called\n\tlastOutput string \/\/ last character(set) written\n\tcolor func(a ...interface{}) string \/\/ default color is white\n\tlock *sync.RWMutex \/\/ Lock useed for\n\tWriter io.Writer \/\/ to make testing better, exported so users have access\n\tactive bool \/\/ active holds the state of the spinner\n\tstopChan chan struct{} \/\/ stopChan is a channel used to stop the indicator\n}\n\n\/\/ New provides a pointer to an instance of Spinner with the supplied options\nfunc New(cs []string, d time.Duration) *Spinner {\n\treturn &Spinner{\n\t\tDelay: d,\n\t\tchars: cs,\n\t\tcolor: color.New(color.FgWhite).SprintFunc(),\n\t\tlock: &sync.RWMutex{},\n\t\tWriter: color.Output,\n\t\tactive: false,\n\t\tstopChan: make(chan struct{}, 1),\n\t}\n}\n\n\/\/ Start will start the indicator\nfunc (s *Spinner) Start() {\n\tif s.active {\n\t\treturn\n\t}\n\ts.active = true\n\n\tgo func() {\n\t\tfor {\n\t\t\tfor i := 0; i < len(s.chars); i++ {\n\t\t\t\tselect {\n\t\t\t\tcase <-s.stopChan:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tfmt.Fprint(s.Writer, fmt.Sprintf(\"%s%s%s \", s.Prefix, s.color(s.chars[i]), s.Suffix))\n\t\t\t\t\tout := fmt.Sprintf(\"%s%s%s \", s.Prefix, s.chars[i], s.Suffix)\n\t\t\t\t\ts.lastOutput = out\n\t\t\t\t\ts.lock.RLock()\n\t\t\t\t\ttime.Sleep(s.Delay)\n\t\t\t\t\ts.lock.RUnlock()\n\t\t\t\t\ts.erase(out)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ Stop stops the indicator\nfunc (s *Spinner) Stop() {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\tif s.active {\n\t\ts.stopChan <- struct{}{}\n\t\ts.active = false\n\t\tif s.FinalMSG != \"\" {\n\t\t\tfmt.Fprintf(s.Writer, s.FinalMSG)\n\t\t}\n\t}\n}\n\n\/\/ Restart will stop and start the indicator\nfunc (s *Spinner) Restart() {\n\ts.Stop()\n\ts.Start()\n}\n\n\/\/ Reverse will reverse the order of the slice assigned to the indicator\nfunc (s *Spinner) Reverse() {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\tfor i, j := 0, len(s.chars)-1; i < j; i, j = i+1, j-1 {\n\t\ts.chars[i], s.chars[j] = s.chars[j], s.chars[i]\n\t}\n}\n\n\/\/ Color will set the struct field for the given color to be used\nfunc (s *Spinner) Color(c string) error {\n\tif validColor(c) {\n\t\tswitch c {\n\t\tcase \"red\":\n\t\t\ts.color = color.New(color.FgRed).SprintFunc()\n\t\t\ts.Restart()\n\t\tcase \"yellow\":\n\t\t\ts.color = color.New(color.FgYellow).SprintFunc()\n\t\t\ts.Restart()\n\t\tcase \"green\":\n\t\t\ts.color = color.New(color.FgGreen).SprintFunc()\n\t\t\ts.Restart()\n\t\tcase \"magenta\":\n\t\t\ts.color = color.New(color.FgMagenta).SprintFunc()\n\t\t\ts.Restart()\n\t\tcase \"blue\":\n\t\t\ts.color = color.New(color.FgBlue).SprintFunc()\n\t\t\ts.Restart()\n\t\tcase \"cyan\":\n\t\t\ts.color = color.New(color.FgCyan).SprintFunc()\n\t\t\ts.Restart()\n\t\tcase \"white\":\n\t\t\ts.color = color.New(color.FgWhite).SprintFunc()\n\t\t\ts.Restart()\n\t\tdefault:\n\t\t\treturn errInvalidColor\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ UpdateSpeed will set the indicator delay to the given value\nfunc (s *Spinner) UpdateSpeed(d time.Duration) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\ts.Delay = d\n}\n\n\/\/ UpdateCharSet will change the current character set to the given one\nfunc (s *Spinner) UpdateCharSet(cs []string) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\ts.chars = cs\n}\n\n\/\/ erase deletes written characters\nfunc (s *Spinner) erase(a string) {\n\tn := utf8.RuneCountInString(a)\n\ts.lock.RLock()\n\tdefer s.lock.RUnlock()\n\tfor i := 0; i < n; i++ {\n\t\tfmt.Fprintf(s.Writer, \"\\b\")\n\t}\n}\n\n\/\/ GenerateNumberSequence will generate a slice of integers at the\n\/\/ provided length and convert them each to a string\nfunc GenerateNumberSequence(length int) []string {\n\tvar numSeq []string\n\tfor i := 0; i < length; i++ {\n\t\tnumSeq = append(numSeq, strconv.Itoa(i))\n\t}\n\treturn numSeq\n}\n<|endoftext|>"} {"text":"package gnuplot\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\/\/ \"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ Plotter\ntype Plotter struct {\n\tconfigures map[string]string\n}\n\nfunc NewPlotter() *Plotter {\n\tplotter := new(Plotter)\n\tplotter.configures = map[string]string{}\n\treturn plotter\n}\n\nfunc (p *Plotter) Configure(key, val string) {\n\tp.configures[key] = val\n}\n\nfunc (p *Plotter) GetC(key string) string {\n\treturn p.configures[key]\n}\n\nconst DefaultFunction2dSplitNum int = 1000\n\ntype Function2d struct {\n\tplotter Plotter\n\tsplitNum int\n\tf func(float64) float64\n}\n\nfunc NewFunction2d() *Function2d {\n\tfun := new(Function2d)\n\tfun.splitNum = DefaultFunction2dSplitNum\n\tfun.plotter.configures = map[string]string{\n\t\t\"_xMin\": \"-10.0\",\n\t\t\"_xMax\": \"10.0\"}\n\treturn fun\n}\n\nfunc (fun *Function2d) Configure(key, val string) {\n\tfun.plotter.Configure(key, val)\n}\n\nfunc (fun *Function2d) Configures(m map[string]string) {\n\tfor key, val := range m {\n\t\tfun.plotter.Configure(key, val)\n\t}\n}\n\nfunc (fun *Function2d) UpdatePlotter(plotter *Plotter) {\n\tfor key, val := range plotter.configures {\n\t\tfun.plotter.configures[key] = val\n\t}\n}\n\nfunc (fun *Function2d) GetData() [][2]float64 { \/\/ TODO: テスト書く\n\txMin, _ := strconv.ParseFloat(fun.plotter.configures[\"_xMin\"], 32)\n\txMax, _ := strconv.ParseFloat(fun.plotter.configures[\"_xMax\"], 32)\n\tvar sep = float64(xMax-xMin) \/ float64(fun.splitNum-1)\n\n\tvar a [][2]float64\n\tfor j := 0; j < fun.splitNum; j++ {\n\t\tt := xMin + float64(j)*sep\n\t\ty := fun.f(t)\n\t\ta = append(a, [2]float64{t, y})\n\t}\n\treturn a\n}\n\nfunc (fun *Function2d) getGnuData() string {\n\tvar s string\n\tfor _, xs := range fun.GetData() {\n\t\ts += fmt.Sprintf(\"%f %f\\n\", xs[0], xs[1])\n\t}\n\treturn s\n}\n\nfunc (fun *Function2d) SetF(_f func(float64) float64) {\n\tfun.f = _f\n}\n\nfunc (fun Function2d) gnuplot(filename string) string {\n\tvar s = fmt.Sprintf(\"\\\"%v\\\"\", filename)\n\tfor key, val := range fun.plotter.configures {\n\t\tif !strings.HasPrefix(key, \"_\") {\n\t\t\ts += fmt.Sprintf(\" %v %v\", key, val)\n\t\t}\n\t}\n\treturn s\n}\n\nfunc (fun *Function2d) writeIntoGnufile(f os.File) {\n\tf.WriteString(fun.getGnuData())\n}\n\nconst DefaultCurve2dSplitNum int = 100\n\ntype Curve2d struct {\n\tplotter Plotter\n\tsplitNum int\n\tc func(float64) [2]float64\n}\n\nfunc NewCurve2d() *Curve2d {\n\tc := new(Curve2d)\n\tc.splitNum = DefaultCurve2dSplitNum\n\tc.plotter.configures = map[string]string{\n\t\t\"_tMin\": \"-10.0\",\n\t\t\"_tMax\": \"10.0\"}\n\treturn c\n}\n\nfunc (c *Curve2d) Configure(key, val string) {\n\tc.plotter.Configure(key, val)\n}\n\nfunc (c *Curve2d) Configures(m map[string]string) {\n\tfor key, val := range m {\n\t\tc.plotter.Configure(key, val)\n\t}\n}\n\nfunc (c *Curve2d) UpdatePlotter(plotter *Plotter) {\n\tfor key, val := range plotter.configures {\n\t\tc.plotter.Configure(key, val)\n\t}\n}\n\nfunc (c *Curve2d) GetData() [][2]float64 { \/\/ TODO: test\n\ttMin, _ := strconv.ParseFloat(c.plotter.configures[\"_tMin\"], 32)\n\ttMax, _ := strconv.ParseFloat(c.plotter.configures[\"_tMax\"], 32)\n\tvar sep = float64(tMax-tMin) \/ float64(c.splitNum-1)\n\n\tvar a [][2]float64\n\tfor j := 0; j < c.splitNum; j++ {\n\t\tvar t float64 = tMin + float64(j)*sep\n\t\tcs := c.c(tMin + t*float64(j))\n\t\ta = append(a, [2]float64{cs[0], cs[1]})\n\t}\n\treturn a\n}\n\nfunc (c *Curve2d) getGnuData() string {\n\tvar s string\n\tfor _, xs := range c.GetData() {\n\t\ts += fmt.Sprintf(\"%f %f\\n\", xs[0], xs[1])\n\t}\n\treturn s\n}\n\nfunc (c *Curve2d) SetC(_c func(float64) [2]float64) {\n\tc.c = _c\n}\n\nfunc (c Curve2d) gnuplot(fileName string) string {\n\tvar s = fmt.Sprintf(\"\\\"%v\\\" \", fileName)\n\tfor key, val := range c.plotter.configures {\n\t\tif !strings.HasPrefix(key, \"_\") {\n\t\t\ts += fmt.Sprintf(\" %v %v\", key, val)\n\t\t}\n\t}\n\treturn s\n}\n\n\/\/ Graph\ntype Graph2d struct {\n\tplotter Plotter\n\tfunctions []Function2d\n\tcurves []Curve2d\n}\n\nfunc NewGraph2d() *Graph2d {\n\tg := new(Graph2d)\n\tg.plotter.configures = map[string]string{}\n\treturn g\n}\n\nfunc (g *Graph2d) Configure(key, val string) {\n\tg.plotter.Configure(key, val)\n}\n\nfunc (g *Graph2d) Configures(m map[string]string) {\n\tfor key, val := range m {\n\t\tg.plotter.Configure(key, val)\n\t}\n}\n\nfunc (g *Graph2d) AppendFunc(f Function2d) {\n\tg.functions = append(g.functions, f)\n}\n\nfunc (g *Graph2d) AppendCurve(c Curve2d) {\n\tg.curves = append(g.curves, c)\n}\n\nfunc (g Graph2d) writeIntoFile(data string, f *os.File) {\n\tf.WriteString(data)\n}\n\nfunc (g *Graph2d) UpdatePlotter(plotter *Plotter) {\n\tfor key, val := range plotter.configures {\n\t\tg.plotter.Configure(key, val)\n\t}\n}\n\nfunc (g Graph2d) gnuplot(funcFilenames []string, curveFilenames []string) string {\n\tvar s string\n\n\tfor key, val := range g.plotter.configures {\n\t\tif !strings.HasPrefix(key, \"_\") {\n\t\t\tif val == \"true\" {\n\t\t\t\ts += fmt.Sprintf(\"set %v;\\n\", key)\n\t\t\t} else if val == \"false\" {\n\t\t\t\ts += fmt.Sprintf(\"set no%v;\\n\", key)\n\t\t\t} else {\n\t\t\t\ts += fmt.Sprintf(\"set %v %v;\\n\", key, val)\n\t\t\t}\n\t\t}\n\t}\n\n\ts += \"plot \"\n\tfor j, _ := range g.functions {\n\t\ts += g.functions[j].gnuplot(funcFilenames[j]) + \", \"\n\t}\n\tfor j, _ := range g.curves {\n\t\ts += g.curves[j].gnuplot(curveFilenames[j])\n\t\tif j != len(g.curves)-1 {\n\t\t\ts += \", \"\n\t\t}\n\t}\n\ts += \";\\n\"\n\ts += \"pause -1;\\n\"\n\treturn s\n}\n\nfunc (g *Graph2d) Run() {\n\ttmpDir := os.TempDir() + \"\/gnuplot.go\/\"\n\t\/\/ TODO: tmpDirがなければ作る\n\t\/\/ execFilename := tmpDir + \"exec.gnu\"\n\texecFilename := \"exec.gnu\"\n\n\t\/\/ それぞれのfunctionのdataをtempファイルに書き込む\n\t\/\/ また, それらのファイルの名前を func_filenames []string に格納する\n\tvar funcFilenames []string\n\tfor _, fun := range g.functions {\n\t\tfile, err := ioutil.TempFile(tmpDir, \"\")\n\t\tdefer func() {\n\t\t\tfile.Close()\n\t\t}()\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"%v\", err))\n\t\t} else {\n\t\t\tg.writeIntoFile(fun.getGnuData(), file)\n\t\t\tfuncFilenames = append(funcFilenames, file.Name())\n\t\t}\n\t}\n\n\t\/\/ それぞれのcurveのdataをtempファイルに書き込む\n\t\/\/ また, それらのファイルの名前を curve_filenames []stringに格納する\n\tvar curveFilenames []string\n\tfor _, c := range g.curves {\n\t\tfile, _ := ioutil.TempFile(tmpDir, \"\")\n\t\tdefer func() {\n\t\t\tfile.Close()\n\t\t}()\n\t\tg.writeIntoFile(c.getGnuData(), file)\n\t\tcurveFilenames = append(curveFilenames, file.Name())\n\t}\n\n\t\/\/ 実行するgnuplotの実行ファイルをtempファイルに書き込む\n\tos.Remove(execFilename)\n\texecFile, _ := os.OpenFile(execFilename, os.O_CREATE|os.O_WRONLY, 0666)\n\tdefer func() {\n\t\texecFile.Close()\n\t}()\n\texecFile.WriteString(g.gnuplot(funcFilenames, curveFilenames))\n}\nbasic of configurepackage gnuplot\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\/\/ \"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ Plotter\ntype Plotter struct {\n\tconfigures map[string]string\n}\n\nfunc NewPlotter() *Plotter {\n\tplotter := new(Plotter)\n\tplotter.configures = map[string]string{}\n\treturn plotter\n}\n\nfunc (p *Plotter) Configure(key, val string) {\n\tp.configures[key] = val\n}\n\nfunc (p *Plotter) GetC(key string) string {\n\treturn p.configures[key]\n}\n\n\/\/ Configure\ntype Configure struct {\n\tkey string\n\tval string\n\trequiredCondition func(val string) bool\n}\n\nfunc NewConfigure(key, defaultVal string, requiredCondition func(val string) bool) *Configure {\n\tconf := new(Configure)\n\tconf.key = key\n\tconf.val = defaultVal\n\tconf.requiredCondition = requiredCondition\n\treturn conf\n}\n\nvar WITH_CONF = NewConfigure(\"with\", \"line\", func(val string) bool {\n\treturn val == \"line\" || val == \"dots\"\n})\n\nfunc (conf *Configure) SetVal(val string) {\n\tif conf.requiredCondition(val) {\n\t\tconf.val = val\n\t} else {\n\t\tpanic(fmt.Sprintf(\"%v is illegal value of %v.\", val, conf.key))\n\t}\n}\n\n\/\/ Function2d\nconst DefaultFunction2dSplitNum int = 1000\n\ntype Function2d struct {\n\tplotter Plotter\n\tsplitNum int\n\tf func(float64) float64\n}\n\nfunc NewFunction2d() *Function2d {\n\tfun := new(Function2d)\n\tfun.splitNum = DefaultFunction2dSplitNum\n\tfun.plotter.configures = map[string]string{\n\t\t\"_xMin\": \"-10.0\",\n\t\t\"_xMax\": \"10.0\"}\n\treturn fun\n}\n\nfunc (fun *Function2d) Configure(key, val string) {\n\tfun.plotter.Configure(key, val)\n}\n\nfunc (fun *Function2d) Configures(m map[string]string) {\n\tfor key, val := range m {\n\t\tfun.plotter.Configure(key, val)\n\t}\n}\n\nfunc (fun *Function2d) UpdatePlotter(plotter *Plotter) {\n\tfor key, val := range plotter.configures {\n\t\tfun.plotter.configures[key] = val\n\t}\n}\n\nfunc (fun *Function2d) GetData() [][2]float64 { \/\/ TODO: テスト書く\n\txMin, _ := strconv.ParseFloat(fun.plotter.configures[\"_xMin\"], 32)\n\txMax, _ := strconv.ParseFloat(fun.plotter.configures[\"_xMax\"], 32)\n\tvar sep = float64(xMax-xMin) \/ float64(fun.splitNum-1)\n\n\tvar a [][2]float64\n\tfor j := 0; j < fun.splitNum; j++ {\n\t\tt := xMin + float64(j)*sep\n\t\ty := fun.f(t)\n\t\ta = append(a, [2]float64{t, y})\n\t}\n\treturn a\n}\n\nfunc (fun *Function2d) getGnuData() string {\n\tvar s string\n\tfor _, xs := range fun.GetData() {\n\t\ts += fmt.Sprintf(\"%f %f\\n\", xs[0], xs[1])\n\t}\n\treturn s\n}\n\nfunc (fun *Function2d) SetF(_f func(float64) float64) {\n\tfun.f = _f\n}\n\nfunc (fun Function2d) gnuplot(filename string) string {\n\tvar s = fmt.Sprintf(\"\\\"%v\\\"\", filename)\n\tfor key, val := range fun.plotter.configures {\n\t\tif !strings.HasPrefix(key, \"_\") {\n\t\t\ts += fmt.Sprintf(\" %v %v\", key, val)\n\t\t}\n\t}\n\treturn s\n}\n\nfunc (fun *Function2d) writeIntoGnufile(f os.File) {\n\tf.WriteString(fun.getGnuData())\n}\n\nconst DefaultCurve2dSplitNum int = 100\n\ntype Curve2d struct {\n\tplotter Plotter\n\tsplitNum int\n\tc func(float64) [2]float64\n}\n\nfunc NewCurve2d() *Curve2d {\n\tc := new(Curve2d)\n\tc.splitNum = DefaultCurve2dSplitNum\n\tc.plotter.configures = map[string]string{\n\t\t\"_tMin\": \"-10.0\",\n\t\t\"_tMax\": \"10.0\"}\n\treturn c\n}\n\nfunc (c *Curve2d) Configure(key, val string) {\n\tc.plotter.Configure(key, val)\n}\n\nfunc (c *Curve2d) Configures(m map[string]string) {\n\tfor key, val := range m {\n\t\tc.plotter.Configure(key, val)\n\t}\n}\n\nfunc (c *Curve2d) UpdatePlotter(plotter *Plotter) {\n\tfor key, val := range plotter.configures {\n\t\tc.plotter.Configure(key, val)\n\t}\n}\n\nfunc (c *Curve2d) GetData() [][2]float64 { \/\/ TODO: test\n\ttMin, _ := strconv.ParseFloat(c.plotter.configures[\"_tMin\"], 32)\n\ttMax, _ := strconv.ParseFloat(c.plotter.configures[\"_tMax\"], 32)\n\tvar sep = float64(tMax-tMin) \/ float64(c.splitNum-1)\n\n\tvar a [][2]float64\n\tfor j := 0; j < c.splitNum; j++ {\n\t\tvar t float64 = tMin + float64(j)*sep\n\t\tcs := c.c(tMin + t*float64(j))\n\t\ta = append(a, [2]float64{cs[0], cs[1]})\n\t}\n\treturn a\n}\n\nfunc (c *Curve2d) getGnuData() string {\n\tvar s string\n\tfor _, xs := range c.GetData() {\n\t\ts += fmt.Sprintf(\"%f %f\\n\", xs[0], xs[1])\n\t}\n\treturn s\n}\n\nfunc (c *Curve2d) SetC(_c func(float64) [2]float64) {\n\tc.c = _c\n}\n\nfunc (c Curve2d) gnuplot(fileName string) string {\n\tvar s = fmt.Sprintf(\"\\\"%v\\\" \", fileName)\n\tfor key, val := range c.plotter.configures {\n\t\tif !strings.HasPrefix(key, \"_\") {\n\t\t\ts += fmt.Sprintf(\" %v %v\", key, val)\n\t\t}\n\t}\n\treturn s\n}\n\n\/\/ Graph\ntype Graph2d struct {\n\tplotter Plotter\n\tfunctions []Function2d\n\tcurves []Curve2d\n}\n\nfunc NewGraph2d() *Graph2d {\n\tg := new(Graph2d)\n\tg.plotter.configures = map[string]string{}\n\treturn g\n}\n\nfunc (g *Graph2d) Configure(key, val string) {\n\tg.plotter.Configure(key, val)\n}\n\nfunc (g *Graph2d) Configures(m map[string]string) {\n\tfor key, val := range m {\n\t\tg.plotter.Configure(key, val)\n\t}\n}\n\nfunc (g *Graph2d) AppendFunc(f Function2d) {\n\tg.functions = append(g.functions, f)\n}\n\nfunc (g *Graph2d) AppendCurve(c Curve2d) {\n\tg.curves = append(g.curves, c)\n}\n\nfunc (g Graph2d) writeIntoFile(data string, f *os.File) {\n\tf.WriteString(data)\n}\n\nfunc (g *Graph2d) UpdatePlotter(plotter *Plotter) {\n\tfor key, val := range plotter.configures {\n\t\tg.plotter.Configure(key, val)\n\t}\n}\n\nfunc (g Graph2d) gnuplot(funcFilenames []string, curveFilenames []string) string {\n\tvar s string\n\n\tfor key, val := range g.plotter.configures {\n\t\tif !strings.HasPrefix(key, \"_\") {\n\t\t\tif val == \"true\" {\n\t\t\t\ts += fmt.Sprintf(\"set %v;\\n\", key)\n\t\t\t} else if val == \"false\" {\n\t\t\t\ts += fmt.Sprintf(\"set no%v;\\n\", key)\n\t\t\t} else {\n\t\t\t\ts += fmt.Sprintf(\"set %v %v;\\n\", key, val)\n\t\t\t}\n\t\t}\n\t}\n\n\ts += \"plot \"\n\tfor j, _ := range g.functions {\n\t\ts += g.functions[j].gnuplot(funcFilenames[j]) + \", \"\n\t}\n\tfor j, _ := range g.curves {\n\t\ts += g.curves[j].gnuplot(curveFilenames[j])\n\t\tif j != len(g.curves)-1 {\n\t\t\ts += \", \"\n\t\t}\n\t}\n\ts += \";\\n\"\n\ts += \"pause -1;\\n\"\n\treturn s\n}\n\nfunc (g *Graph2d) Run() {\n\ttmpDir := os.TempDir() + \"\/gnuplot.go\/\"\n\t\/\/ TODO: tmpDirがなければ作る\n\t\/\/ execFilename := tmpDir + \"exec.gnu\"\n\texecFilename := \"exec.gnu\"\n\n\t\/\/ それぞれのfunctionのdataをtempファイルに書き込む\n\t\/\/ また, それらのファイルの名前を func_filenames []string に格納する\n\tvar funcFilenames []string\n\tfor _, fun := range g.functions {\n\t\tfile, err := ioutil.TempFile(tmpDir, \"\")\n\t\tdefer func() {\n\t\t\tfile.Close()\n\t\t}()\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"%v\", err))\n\t\t} else {\n\t\t\tg.writeIntoFile(fun.getGnuData(), file)\n\t\t\tfuncFilenames = append(funcFilenames, file.Name())\n\t\t}\n\t}\n\n\t\/\/ それぞれのcurveのdataをtempファイルに書き込む\n\t\/\/ また, それらのファイルの名前を curve_filenames []stringに格納する\n\tvar curveFilenames []string\n\tfor _, c := range g.curves {\n\t\tfile, _ := ioutil.TempFile(tmpDir, \"\")\n\t\tdefer func() {\n\t\t\tfile.Close()\n\t\t}()\n\t\tg.writeIntoFile(c.getGnuData(), file)\n\t\tcurveFilenames = append(curveFilenames, file.Name())\n\t}\n\n\t\/\/ 実行するgnuplotの実行ファイルをtempファイルに書き込む\n\tos.Remove(execFilename)\n\texecFile, _ := os.OpenFile(execFilename, os.O_CREATE|os.O_WRONLY, 0666)\n\tdefer func() {\n\t\texecFile.Close()\n\t}()\n\texecFile.WriteString(g.gnuplot(funcFilenames, curveFilenames))\n}\n<|endoftext|>"} {"text":"package upstream\n\nimport (\n\t\"fmt\"\n\t\"github.com\/ngmoco\/falcore\"\n\t\"net\"\n\t\"net\/http\"\n\t\"time\"\n\t\"io\"\n\t\"bytes\"\n)\n\ntype passThruReadCloser struct {\n\tio.Reader\n\tio.Closer\n}\n\ntype Upstream struct {\n\t\/\/ The upstream host to connect to\n\tHost string\n\t\/\/ The port on the upstream host\n\tPort int\n\t\/\/ Default 60 seconds\n\tTimeout time.Duration\n\t\/\/ Will ignore https on the incoming request and always upstream http\n\tForceHttp bool\n\t\/\/ Ping URL Path-only for checking upness\n\tPingPath string\n\n\ttransport *http.Transport\n\thost string\n\ttcpaddr *net.TCPAddr\n\ttcpconn *net.TCPConn\n}\n\nfunc NewUpstream(host string, port int, forceHttp bool) *Upstream {\n\tu := new(Upstream)\n\tu.Host = host\n\tu.Port = port\n\tu.ForceHttp = forceHttp\n\tips, err := net.LookupIP(host)\n\tvar ip net.IP = nil\n\tfor i := range ips {\n\t\tip = ips[i].To4()\n\t\tif ip != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err == nil && ip != nil {\n\t\tu.tcpaddr = new(net.TCPAddr)\n\t\tu.tcpaddr.Port = port\n\t\tu.tcpaddr.IP = ip\n\t} else {\n\t\tfalcore.Warn(\"Can't get IP addr for %v: %v\", host, err)\n\t}\n\tu.Timeout = 60e9\n\tu.host = fmt.Sprintf(\"%v:%v\", u.Host, u.Port)\n\n\tu.transport = new(http.Transport)\n\n\tu.transport.Dial = func(n, addr string) (c net.Conn, err error) {\n\t\tfalcore.Fine(\"Dialing connection to %v\", u.tcpaddr)\n\t\tvar ctcp *net.TCPConn\n\t\tctcp, err = net.DialTCP(\"tcp4\", nil, u.tcpaddr)\n\t\tif ctcp != nil {\n\t\t\tu.tcpconn = ctcp\n\t\t\tu.tcpconn.SetDeadline(time.Now().Add(u.Timeout))\n\t\t}\n\t\tif err != nil {\n\t\t\tfalcore.Error(\"Dial Failed: %v\", err)\n\t\t}\n\t\treturn ctcp, err\n\t}\n\tu.transport.MaxIdleConnsPerHost = 15\n\treturn u\n}\n\n\/\/ Alter the number of connections to multiplex with\nfunc (u *Upstream) SetPoolSize(size int) {\n\tu.transport.MaxIdleConnsPerHost = size\n}\n\nfunc (u *Upstream) FilterRequest(request *falcore.Request) (res *http.Response) {\n\tvar err error\n\treq := request.HttpRequest\n\n\t\/\/ Force the upstream to use http \n\tif u.ForceHttp || req.URL.Scheme == \"\" {\n\t\treq.URL.Scheme = \"http\"\n\t\treq.URL.Host = req.Host\n\t}\n\tbefore := time.Now()\n\treq.Header.Set(\"Connection\", \"Keep-Alive\")\n\tif u.tcpconn != nil {\n\t\tu.tcpconn.SetDeadline(time.Now().Add(u.Timeout))\n\t}\n\tvar upstrRes *http.Response\n\tupstrRes, err = u.transport.RoundTrip(req)\n\tdiff := falcore.TimeDiff(before, time.Now())\n\tif err == nil {\n\t\t\/\/ Copy response over to new record. Remove connection noise. Add some sanity.\n\t\tres = falcore.SimpleResponse(req, upstrRes.StatusCode, nil, \"\")\n\t\tif upstrRes.ContentLength > 0 && upstrRes.Body != nil {\n\t\t\tres.Body = upstrRes.Body\n\t\t} else if upstrRes.ContentLength == 0 && upstrRes.Body != nil {\n\t\t\t\/\/ Any bytes?\n\t\t\tvar testBuf [1]byte\n\t\t\tn, _ := io.ReadFull(upstrRes.Body, testBuf[:])\n\t\t\tif n == 1 {\n\t\t\t\t\/\/ Yes there are. Chunked it is.\n\t\t\t\tres.TransferEncoding = []string{\"chunked\"}\n\t\t\t\trc := &passThruReadCloser{\n\t\t\t\t\tio.MultiReader(bytes.NewBuffer(testBuf[:]), upstrRes.Body),\n\t\t\t\t\tupstrRes.Body,\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tres.Body = rc\n\t\t\t}\n\t\t} else if upstrRes.Body != nil {\n\t\t\tres.Body = upstrRes.Body\n\t\t\tres.TransferEncoding = []string{\"chunked\"}\n\t\t}\n\t\t\/\/ Copy over headers with a few exceptions\n\t\tres.Header = make(http.Header)\n\t\tfor hn, hv := range upstrRes.Header {\n\t\t\tswitch hn {\n\t\t\tcase \"Content-Length\":\n\t\t\tcase \"Connection\":\n\t\t\tcase \"Transfer-Encoding\":\n\t\t\tdefault:\n\t\t\t\tres.Header[hn] = hv\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif nerr, ok := err.(net.Error); ok && nerr.Timeout() {\n\t\t\tfalcore.Error(\"%s Upstream Timeout error: %v\", request.ID, err)\n\t\t\tres = falcore.SimpleResponse(req, 504, nil, \"Gateway Timeout\\n\")\n\t\t\trequest.CurrentStage.Status = 2 \/\/ Fail\n\t\t} else {\n\t\t\tfalcore.Error(\"%s Upstream error: %v\", request.ID, err)\n\t\t\tres = falcore.SimpleResponse(req, 502, nil, \"Bad Gateway\\n\")\n\t\t\trequest.CurrentStage.Status = 2 \/\/ Fail\n\t\t}\n\t}\n\tfalcore.Debug(\"%s [%s] [%s] %s s=%d Time=%.4f\", request.ID, req.Method, u.host, req.URL, res.StatusCode, diff)\n\treturn\n}\n\nfunc (u *Upstream) ping() (up bool, ok bool) {\n\tif u.PingPath != \"\" {\n\t\t\/\/ the url must be syntactically valid for this to work but the host will be ignored because we\n\t\t\/\/ are overriding the connection always\n\t\trequest, err := http.NewRequest(\"GET\", \"http:\/\/localhost\"+u.PingPath, nil)\n\t\trequest.Header.Set(\"Connection\", \"Keep-Alive\") \/\/ not sure if this should be here for a ping\n\t\tif err != nil {\n\t\t\tfalcore.Error(\"Bad Ping request: %v\", err)\n\t\t\treturn false, true\n\t\t}\n\t\tif u.tcpconn != nil {\n\t\t\tu.tcpconn.SetDeadline(time.Now().Add(u.Timeout))\n\t\t}\n\t\tres, err := u.transport.RoundTrip(request)\n\n\t\tif err != nil {\n\t\t\tfalcore.Error(\"Failed Ping to %v:%v: %v\", u.Host, u.Port, err)\n\t\t\treturn false, true\n\t\t} else {\n\t\t\tres.Body.Close()\n\t\t}\n\t\tif res.StatusCode == 200 {\n\t\t\treturn true, true\n\t\t}\n\t\tfalcore.Error(\"Failed Ping to %v:%v: %v\", u.Host, u.Port, res.Status)\n\t\t\/\/ bad status\n\t\treturn false, true\n\t}\n\treturn false, false\n}\nleaving ContentLength at 0 breaks other thingspackage upstream\n\nimport (\n\t\"fmt\"\n\t\"github.com\/ngmoco\/falcore\"\n\t\"net\"\n\t\"net\/http\"\n\t\"time\"\n\t\"io\"\n\t\"bytes\"\n)\n\ntype passThruReadCloser struct {\n\tio.Reader\n\tio.Closer\n}\n\ntype Upstream struct {\n\t\/\/ The upstream host to connect to\n\tHost string\n\t\/\/ The port on the upstream host\n\tPort int\n\t\/\/ Default 60 seconds\n\tTimeout time.Duration\n\t\/\/ Will ignore https on the incoming request and always upstream http\n\tForceHttp bool\n\t\/\/ Ping URL Path-only for checking upness\n\tPingPath string\n\n\ttransport *http.Transport\n\thost string\n\ttcpaddr *net.TCPAddr\n\ttcpconn *net.TCPConn\n}\n\nfunc NewUpstream(host string, port int, forceHttp bool) *Upstream {\n\tu := new(Upstream)\n\tu.Host = host\n\tu.Port = port\n\tu.ForceHttp = forceHttp\n\tips, err := net.LookupIP(host)\n\tvar ip net.IP = nil\n\tfor i := range ips {\n\t\tip = ips[i].To4()\n\t\tif ip != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err == nil && ip != nil {\n\t\tu.tcpaddr = new(net.TCPAddr)\n\t\tu.tcpaddr.Port = port\n\t\tu.tcpaddr.IP = ip\n\t} else {\n\t\tfalcore.Warn(\"Can't get IP addr for %v: %v\", host, err)\n\t}\n\tu.Timeout = 60e9\n\tu.host = fmt.Sprintf(\"%v:%v\", u.Host, u.Port)\n\n\tu.transport = new(http.Transport)\n\n\tu.transport.Dial = func(n, addr string) (c net.Conn, err error) {\n\t\tfalcore.Fine(\"Dialing connection to %v\", u.tcpaddr)\n\t\tvar ctcp *net.TCPConn\n\t\tctcp, err = net.DialTCP(\"tcp4\", nil, u.tcpaddr)\n\t\tif ctcp != nil {\n\t\t\tu.tcpconn = ctcp\n\t\t\tu.tcpconn.SetDeadline(time.Now().Add(u.Timeout))\n\t\t}\n\t\tif err != nil {\n\t\t\tfalcore.Error(\"Dial Failed: %v\", err)\n\t\t}\n\t\treturn ctcp, err\n\t}\n\tu.transport.MaxIdleConnsPerHost = 15\n\treturn u\n}\n\n\/\/ Alter the number of connections to multiplex with\nfunc (u *Upstream) SetPoolSize(size int) {\n\tu.transport.MaxIdleConnsPerHost = size\n}\n\nfunc (u *Upstream) FilterRequest(request *falcore.Request) (res *http.Response) {\n\tvar err error\n\treq := request.HttpRequest\n\n\t\/\/ Force the upstream to use http \n\tif u.ForceHttp || req.URL.Scheme == \"\" {\n\t\treq.URL.Scheme = \"http\"\n\t\treq.URL.Host = req.Host\n\t}\n\tbefore := time.Now()\n\treq.Header.Set(\"Connection\", \"Keep-Alive\")\n\tif u.tcpconn != nil {\n\t\tu.tcpconn.SetDeadline(time.Now().Add(u.Timeout))\n\t}\n\tvar upstrRes *http.Response\n\tupstrRes, err = u.transport.RoundTrip(req)\n\tdiff := falcore.TimeDiff(before, time.Now())\n\tif err == nil {\n\t\t\/\/ Copy response over to new record. Remove connection noise. Add some sanity.\n\t\tres = falcore.SimpleResponse(req, upstrRes.StatusCode, nil, \"\")\n\t\tif upstrRes.ContentLength > 0 && upstrRes.Body != nil {\n\t\t\tres.Body = upstrRes.Body\n\t\t} else if upstrRes.ContentLength == 0 && upstrRes.Body != nil {\n\t\t\t\/\/ Any bytes?\n\t\t\tvar testBuf [1]byte\n\t\t\tn, _ := io.ReadFull(upstrRes.Body, testBuf[:])\n\t\t\tif n == 1 {\n\t\t\t\t\/\/ Yes there are. Chunked it is.\n\t\t\t\tres.TransferEncoding = []string{\"chunked\"}\n\t\t\t\tres.ContentLength = -1\n\t\t\t\trc := &passThruReadCloser{\n\t\t\t\t\tio.MultiReader(bytes.NewBuffer(testBuf[:]), upstrRes.Body),\n\t\t\t\t\tupstrRes.Body,\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tres.Body = rc\n\t\t\t}\n\t\t} else if upstrRes.Body != nil {\n\t\t\tres.Body = upstrRes.Body\n\t\t\tres.ContentLength = -1\n\t\t\tres.TransferEncoding = []string{\"chunked\"}\n\t\t}\n\t\t\/\/ Copy over headers with a few exceptions\n\t\tres.Header = make(http.Header)\n\t\tfor hn, hv := range upstrRes.Header {\n\t\t\tswitch hn {\n\t\t\tcase \"Content-Length\":\n\t\t\tcase \"Connection\":\n\t\t\tcase \"Transfer-Encoding\":\n\t\t\tdefault:\n\t\t\t\tres.Header[hn] = hv\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif nerr, ok := err.(net.Error); ok && nerr.Timeout() {\n\t\t\tfalcore.Error(\"%s Upstream Timeout error: %v\", request.ID, err)\n\t\t\tres = falcore.SimpleResponse(req, 504, nil, \"Gateway Timeout\\n\")\n\t\t\trequest.CurrentStage.Status = 2 \/\/ Fail\n\t\t} else {\n\t\t\tfalcore.Error(\"%s Upstream error: %v\", request.ID, err)\n\t\t\tres = falcore.SimpleResponse(req, 502, nil, \"Bad Gateway\\n\")\n\t\t\trequest.CurrentStage.Status = 2 \/\/ Fail\n\t\t}\n\t}\n\tfalcore.Debug(\"%s [%s] [%s] %s s=%d Time=%.4f\", request.ID, req.Method, u.host, req.URL, res.StatusCode, diff)\n\treturn\n}\n\nfunc (u *Upstream) ping() (up bool, ok bool) {\n\tif u.PingPath != \"\" {\n\t\t\/\/ the url must be syntactically valid for this to work but the host will be ignored because we\n\t\t\/\/ are overriding the connection always\n\t\trequest, err := http.NewRequest(\"GET\", \"http:\/\/localhost\"+u.PingPath, nil)\n\t\trequest.Header.Set(\"Connection\", \"Keep-Alive\") \/\/ not sure if this should be here for a ping\n\t\tif err != nil {\n\t\t\tfalcore.Error(\"Bad Ping request: %v\", err)\n\t\t\treturn false, true\n\t\t}\n\t\tif u.tcpconn != nil {\n\t\t\tu.tcpconn.SetDeadline(time.Now().Add(u.Timeout))\n\t\t}\n\t\tres, err := u.transport.RoundTrip(request)\n\n\t\tif err != nil {\n\t\t\tfalcore.Error(\"Failed Ping to %v:%v: %v\", u.Host, u.Port, err)\n\t\t\treturn false, true\n\t\t} else {\n\t\t\tres.Body.Close()\n\t\t}\n\t\tif res.StatusCode == 200 {\n\t\t\treturn true, true\n\t\t}\n\t\tfalcore.Error(\"Failed Ping to %v:%v: %v\", u.Host, u.Port, res.Status)\n\t\t\/\/ bad status\n\t\treturn false, true\n\t}\n\treturn false, false\n}\n<|endoftext|>"} {"text":"\/* A relatively simple Chef server implementation in Go, as a learning project\n * to learn more about programming in Go. *\/\n\n\/*\n * Copyright (c) 2013, Jeremy Bingham ()\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"net\/http\"\n\t\"path\"\n\t\"log\"\n\t\"github.com\/ctdk\/goiardi\/config\"\n\t\"github.com\/ctdk\/goiardi\/actor\"\n)\n\ntype InterceptHandler struct {} \/\/ Doesn't need to do anything, just sit there.\n\nfunc main(){\n\tconfig.ParseConfigOptions()\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\n\t\/* Create default clients and users. Currently chef-validator,\n\t * chef-webui, and admin. *\/\n\tcreateDefaultActors()\n\n\t\/* Register the various handlers, found in their own source files. *\/\n\thttp.HandleFunc(\"\/authenticate_user\", authenticate_user_handler)\n\thttp.HandleFunc(\"\/clients\", list_handler)\n\thttp.HandleFunc(\"\/clients\/\", actor_handler)\n\thttp.HandleFunc(\"\/cookbooks\", cookbook_handler)\n\thttp.HandleFunc(\"\/cookbooks\/\", cookbook_handler)\n\thttp.HandleFunc(\"\/data\", data_handler)\n\thttp.HandleFunc(\"\/data\/\", data_handler)\n\thttp.HandleFunc(\"\/environments\", environment_handler)\n\thttp.HandleFunc(\"\/environments\/\", environment_handler)\n\thttp.HandleFunc(\"\/nodes\", list_handler)\n\thttp.HandleFunc(\"\/nodes\/\", node_handler)\n\thttp.HandleFunc(\"\/principals\/\", principal_handler)\n\thttp.HandleFunc(\"\/roles\", list_handler)\n\thttp.HandleFunc(\"\/roles\/\", role_handler)\n\thttp.HandleFunc(\"\/sandboxes\", sandbox_handler)\n\thttp.HandleFunc(\"\/sandboxes\/\", sandbox_handler)\n\thttp.HandleFunc(\"\/search\", search_handler)\n\thttp.HandleFunc(\"\/search\/\", search_handler)\n\thttp.HandleFunc(\"\/users\", list_handler)\n\thttp.HandleFunc(\"\/users\/\", actor_handler)\n\thttp.HandleFunc(\"\/file_store\/\", file_store_handler)\n\n\t\/* TODO: figure out how to handle the root & not found pages *\/\n\thttp.HandleFunc(\"\/\", root_handler)\n\n\tlisten_addr := config.ListenAddr()\n\thttp.ListenAndServe(listen_addr, &InterceptHandler{})\n}\n\nfunc root_handler(w http.ResponseWriter, r *http.Request){\n\t\/\/ TODO: make root do something useful\n\treturn\n}\n\nfunc (h *InterceptHandler) ServeHTTP(w http.ResponseWriter, r *http.Request){\n\t\/* knife sometimes sends URL paths that start with \/\/. Redirecting\n\t * worked for GETs, but since it was breaking POSTs and screwing with \n\t * GETs with query params, we just clean up the path and move on. *\/\n\n\t\/* log the URL *\/\n\t\/\/ TODO: set this to verbosity level 4 or so\n\t\/\/log.Printf(\"Serving %s\\n\", r.URL.Path)\n\n\tif r.Method != \"CONNECT\" { \n\t\tif p := cleanPath(r.URL.Path); p != r.URL.Path{\n\t\t\tr.URL.Path = p\n\t\t}\n\t}\n\n\t\/* Make configurable, I guess, but Chef wants it to be 1000000 *\/\n\tif r.ContentLength > 1000000 {\n\t\thttp.Error(w, \"Content-length too long!\", http.StatusRequestEntityTooLarge)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"X-Goiardi\", \"yes\")\n\tw.Header().Set(\"X-Goiardi-Version\", config.Version)\n\tw.Header().Set(\"X-Chef-Version\", config.ChefVersion)\n\n\thttp.DefaultServeMux.ServeHTTP(w, r)\n}\n\nfunc cleanPath(p string) string {\n\t\/* Borrowing cleanPath from net\/http *\/\n\tif p == \"\" {\n\t\treturn \"\/\"\n\t}\n\tif p[0] != '\/' {\n\t\tp = \"\/\" + p\n\t}\n np := path.Clean(p)\n\t\/\/ path.Clean removes trailing slash except for root;\n\t\/\/ put the trailing slash back if necessary.\n\tif p[len(p)-1] == '\/' && np != \"\/\" {\n\t\tnp += \"\/\"\n\t}\n\treturn np\n}\n\nfunc createDefaultActors() {\n\tif webui, err := actor.New(\"chef-webui\", \"client\"); err != nil {\n\t\tlog.Fatalln(err)\n\t} else {\n\t\twebui.Admin = true\n\t\t_, err = webui.GenerateKeys()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\twebui.Save()\n\t}\n\n\tif validator, err := actor.New(\"chef-validator\", \"client\"); err != nil {\n\t\tlog.Fatalln(err)\n\t} else {\n\t\tvalidator.Validator = true\n\t\t_, err = validator.GenerateKeys()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\tvalidator.Save()\n\t}\n\n\tif admin, err := actor.New(\"admin\", \"user\"); err != nil {\n\t\tlog.Fatalln(err)\n\t} else {\n\t\tadmin.Admin = true\n\t\t_, err = admin.GenerateKeys()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\tadmin.Save()\n\t}\n\n\treturn\n}\nAnd a header like open source Chef server sends\/* A relatively simple Chef server implementation in Go, as a learning project\n * to learn more about programming in Go. *\/\n\n\/*\n * Copyright (c) 2013, Jeremy Bingham ()\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"net\/http\"\n\t\"path\"\n\t\"log\"\n\t\"github.com\/ctdk\/goiardi\/config\"\n\t\"github.com\/ctdk\/goiardi\/actor\"\n\t\"fmt\"\n)\n\ntype InterceptHandler struct {} \/\/ Doesn't need to do anything, just sit there.\n\nfunc main(){\n\tconfig.ParseConfigOptions()\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\n\t\/* Create default clients and users. Currently chef-validator,\n\t * chef-webui, and admin. *\/\n\tcreateDefaultActors()\n\n\t\/* Register the various handlers, found in their own source files. *\/\n\thttp.HandleFunc(\"\/authenticate_user\", authenticate_user_handler)\n\thttp.HandleFunc(\"\/clients\", list_handler)\n\thttp.HandleFunc(\"\/clients\/\", actor_handler)\n\thttp.HandleFunc(\"\/cookbooks\", cookbook_handler)\n\thttp.HandleFunc(\"\/cookbooks\/\", cookbook_handler)\n\thttp.HandleFunc(\"\/data\", data_handler)\n\thttp.HandleFunc(\"\/data\/\", data_handler)\n\thttp.HandleFunc(\"\/environments\", environment_handler)\n\thttp.HandleFunc(\"\/environments\/\", environment_handler)\n\thttp.HandleFunc(\"\/nodes\", list_handler)\n\thttp.HandleFunc(\"\/nodes\/\", node_handler)\n\thttp.HandleFunc(\"\/principals\/\", principal_handler)\n\thttp.HandleFunc(\"\/roles\", list_handler)\n\thttp.HandleFunc(\"\/roles\/\", role_handler)\n\thttp.HandleFunc(\"\/sandboxes\", sandbox_handler)\n\thttp.HandleFunc(\"\/sandboxes\/\", sandbox_handler)\n\thttp.HandleFunc(\"\/search\", search_handler)\n\thttp.HandleFunc(\"\/search\/\", search_handler)\n\thttp.HandleFunc(\"\/users\", list_handler)\n\thttp.HandleFunc(\"\/users\/\", actor_handler)\n\thttp.HandleFunc(\"\/file_store\/\", file_store_handler)\n\n\t\/* TODO: figure out how to handle the root & not found pages *\/\n\thttp.HandleFunc(\"\/\", root_handler)\n\n\tlisten_addr := config.ListenAddr()\n\thttp.ListenAndServe(listen_addr, &InterceptHandler{})\n}\n\nfunc root_handler(w http.ResponseWriter, r *http.Request){\n\t\/\/ TODO: make root do something useful\n\treturn\n}\n\nfunc (h *InterceptHandler) ServeHTTP(w http.ResponseWriter, r *http.Request){\n\t\/* knife sometimes sends URL paths that start with \/\/. Redirecting\n\t * worked for GETs, but since it was breaking POSTs and screwing with \n\t * GETs with query params, we just clean up the path and move on. *\/\n\n\t\/* log the URL *\/\n\t\/\/ TODO: set this to verbosity level 4 or so\n\t\/\/log.Printf(\"Serving %s\\n\", r.URL.Path)\n\n\tif r.Method != \"CONNECT\" { \n\t\tif p := cleanPath(r.URL.Path); p != r.URL.Path{\n\t\t\tr.URL.Path = p\n\t\t}\n\t}\n\n\t\/* Make configurable, I guess, but Chef wants it to be 1000000 *\/\n\tif r.ContentLength > 1000000 {\n\t\thttp.Error(w, \"Content-length too long!\", http.StatusRequestEntityTooLarge)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"X-Goiardi\", \"yes\")\n\tw.Header().Set(\"X-Goiardi-Version\", config.Version)\n\tw.Header().Set(\"X-Chef-Version\", config.ChefVersion)\n\tapi_info := fmt.Sprintf(\"flavor=osc;version:%s;goiardi=%s\", config.ChefVersion, config.Version)\n\tw.Header().Set(\"X-Ops-API-Info\", api_info)\n\n\thttp.DefaultServeMux.ServeHTTP(w, r)\n}\n\nfunc cleanPath(p string) string {\n\t\/* Borrowing cleanPath from net\/http *\/\n\tif p == \"\" {\n\t\treturn \"\/\"\n\t}\n\tif p[0] != '\/' {\n\t\tp = \"\/\" + p\n\t}\n np := path.Clean(p)\n\t\/\/ path.Clean removes trailing slash except for root;\n\t\/\/ put the trailing slash back if necessary.\n\tif p[len(p)-1] == '\/' && np != \"\/\" {\n\t\tnp += \"\/\"\n\t}\n\treturn np\n}\n\nfunc createDefaultActors() {\n\tif webui, err := actor.New(\"chef-webui\", \"client\"); err != nil {\n\t\tlog.Fatalln(err)\n\t} else {\n\t\twebui.Admin = true\n\t\t_, err = webui.GenerateKeys()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\twebui.Save()\n\t}\n\n\tif validator, err := actor.New(\"chef-validator\", \"client\"); err != nil {\n\t\tlog.Fatalln(err)\n\t} else {\n\t\tvalidator.Validator = true\n\t\t_, err = validator.GenerateKeys()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\tvalidator.Save()\n\t}\n\n\tif admin, err := actor.New(\"admin\", \"user\"); err != nil {\n\t\tlog.Fatalln(err)\n\t} else {\n\t\tadmin.Admin = true\n\t\t_, err = admin.GenerateKeys()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\tadmin.Save()\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"\/\/ Godless is a peer-to-peer database running over IPFS.\n\/\/\n\/\/ Godless uses a Consistent Replicated Data Type called a Namespace to share schemaless data with peers.\n\/\/\n\/\/ This package is a facade to Godless internals.\n\/\/\n\/\/ Godless is in alpha, and should be considered experimental software.\npackage godless\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\tgohttp \"net\/http\"\n\n\t\"github.com\/johnny-morrice\/godless\/api\"\n\t\"github.com\/johnny-morrice\/godless\/cache\"\n\t\"github.com\/johnny-morrice\/godless\/crdt\"\n\t\"github.com\/johnny-morrice\/godless\/internal\/crypto\"\n\t\"github.com\/johnny-morrice\/godless\/internal\/http\"\n\t\"github.com\/johnny-morrice\/godless\/internal\/ipfs\"\n\t\"github.com\/johnny-morrice\/godless\/internal\/service\"\n\t\"github.com\/johnny-morrice\/godless\/log\"\n\t\"github.com\/johnny-morrice\/godless\/query\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ TODO allow single cache option.\n\/\/ Godless options.\ntype Options struct {\n\t\/\/ IpfsServiceUrl is required.\n\tIpfsServiceUrl string\n\t\/\/ KeyStore is required. A private Key store.\n\tKeyStore api.KeyStore\n\t\/\/ WebServiceAddr is optional. If not set, the webservice will be disabled.\n\tWebServiceAddr string\n\t\/\/ IndexHash is optional. Set to load an existing index from IPFS.\n\tIndexHash string\n\t\/\/ FailEarly will cause the godless process to crash if it cannot contact IPFS on startup.\n\tFailEarly bool\n\t\/\/ ReplicateInterval is optional. The duration between peer-to-peer replications.\n\tReplicateInterval time.Duration\n\tPulse time.Duration\n\t\/\/ Topics is optional. Two godless servers which share a topic will replicate indices. An empty topics slice will disable replication.\n\tTopics []string\n\t\/\/ IpfsClient is optional. Specify a HTTP client for IPFS.\n\tIpfsClient *gohttp.Client\n\t\/\/ IpfsPingTimeout is optional. Specify a lower timeout for \"Am I Connected?\" checks.\n\tIpfsPingTimeout time.Duration\n\t\/\/ Cache is optional. Build a 12-factor app by supplying your own remote cache.\n\t\/\/ HeadCache, IndexCache and NamespaceCache can be used to specify different caches for different data types.\n\tCache api.Cache\n\t\/\/ HeadCache is optional. Build a 12-factor app by supplying your own remote cache.\n\tHeadCache api.HeadCache\n\t\/\/ IndexCache is optional. Build a 12-factor app by supplying your own remote cache.\n\tIndexCache api.IndexCache\n\t\/\/ NamespaceCache is optional. Build a 12-factor app by supplying your own remote cache.\n\tNamespaceCache api.NamespaceCache\n\t\/\/ PriorityQueue is optional. Build a 12-factor app by supplying your own remote cache.\n\tPriorityQueue api.RequestPriorityQueue\n\t\/\/ APIQueryLimit is optional. Tune performance by setting the number of simultaneous queries.\n\tAPIQueryLimit int\n\t\/\/ PublicServer is optional. If false, the index will only be updated from peers who are in your public key list.\n\tPublicServer bool\n}\n\n\/\/ Godless is a peer-to-peer database. It shares structured data between peers, using IPFS as a backing store.\n\/\/ The core datastructure is a CRDT namespace which resembles a relational scheme in that it has tables, rows, and entries.\ntype Godless struct {\n\tOptions\n\terrch chan error\n\terrwg sync.WaitGroup\n\tstopch chan struct{}\n\tstoppers []chan<- struct{}\n\tstore api.RemoteStore\n\tremote api.RemoteNamespace\n\tapi api.APIService\n}\n\n\/\/ New creates a godless instance, connecting to any services, and providing any services, specified in the options.\nfunc New(options Options) (*Godless, error) {\n\tgodless := &Godless{Options: options}\n\n\tmissing := godless.findMissingParameters()\n\n\tif missing != nil {\n\t\treturn nil, missing\n\t}\n\n\tsetupFuncs := []func() error{\n\t\tgodless.connectIpfs,\n\t\tgodless.connectCache,\n\t\tgodless.setupNamespace,\n\t\tgodless.launchAPI,\n\t\tgodless.serveWeb,\n\t\tgodless.replicate,\n\t}\n\n\terr := breakOnError(setupFuncs)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgodless.report()\n\n\treturn godless, nil\n}\n\nfunc (godless *Godless) report() {\n\tif godless.PublicServer {\n\t\tlog.Info(\"Running public Godless API\")\n\t} else {\n\t\tlog.Info(\"Running private Godless API\")\n\t}\n\n\tprivCount := len(godless.KeyStore.GetAllPrivateKeys())\n\tpubCount := len(godless.KeyStore.GetAllPublicKeys())\n\n\tlog.Info(\"Godless API using %v private and %v public keys\", privCount, pubCount)\n}\n\nfunc (godless *Godless) findMissingParameters() error {\n\tvar missing error\n\tif godless.IpfsServiceUrl == \"\" {\n\t\tmsg := godless.missingParameterText(\"IpfsServiceUrl\")\n\t\tmissing = errors.New(msg)\n\t}\n\n\tif godless.KeyStore == nil {\n\t\tmsg := godless.missingParameterText(\"KeyStore\")\n\t\tif missing == nil {\n\t\t\tmissing = errors.New(msg)\n\t\t} else {\n\t\t\tmissing = errors.Wrap(missing, msg)\n\t\t}\n\t}\n\n\treturn missing\n}\n\nfunc (godless *Godless) missingParameterText(param string) string {\n\treturn fmt.Sprintf(\"Missing required parameter '%v'\", param)\n}\n\n\/\/ Errors provides a stream of errors from godless. Godless will attempt to handle any errors it can. Any errors received here indicate that bad things have happened.\nfunc (godless *Godless) Errors() <-chan error {\n\treturn godless.errch\n}\n\n\/\/ Shutdown stops all godless processes. It does not wait for those goroutines to stop.\nfunc (godless *Godless) Shutdown() {\n\tgodless.stopch <- struct{}{}\n}\n\nfunc (godless *Godless) connectIpfs() error {\n\tclient := godless.IpfsClient\n\tpingTimeout := godless.IpfsPingTimeout\n\n\tpeer := &ipfs.IPFSPeer{\n\t\tUrl: godless.IpfsServiceUrl,\n\t\tClient: client,\n\t\tPingTimeout: pingTimeout,\n\t}\n\n\tif godless.FailEarly {\n\t\terr := peer.Connect()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tgodless.store = peer\n\n\treturn nil\n}\n\nfunc (godless *Godless) connectCache() error {\n\tif godless.Cache != nil {\n\t\tgodless.HeadCache = godless.Cache\n\t\tgodless.IndexCache = godless.Cache\n\t\tgodless.NamespaceCache = godless.Cache\n\t\treturn nil\n\t}\n\n\tif godless.HeadCache == nil {\n\t\tgodless.HeadCache = cache.MakeResidentHeadCache()\n\t}\n\n\tif godless.IndexCache == nil {\n\t\tgodless.IndexCache = cache.MakeResidentIndexCache(__UNKNOWN_BUFFER_SIZE)\n\t}\n\n\tif godless.NamespaceCache == nil {\n\t\tgodless.NamespaceCache = cache.MakeResidentNamespaceCache(__UNKNOWN_BUFFER_SIZE)\n\t}\n\n\treturn nil\n}\n\nfunc (godless *Godless) setupNamespace() error {\n\tif godless.IndexHash != \"\" {\n\t\thead := crdt.IPFSPath(godless.IndexHash)\n\n\t\terr := godless.HeadCache.SetHead(head)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tnamespaceOptions := service.RemoteNamespaceOptions{\n\t\tPulse: godless.Pulse,\n\t\tStore: godless.store,\n\t\tHeadCache: godless.Cache,\n\t\tIndexCache: godless.Cache,\n\t\tNamespaceCache: godless.Cache,\n\t\tKeyStore: godless.KeyStore,\n\t\tIsPublicIndex: godless.PublicServer,\n\t\tMemoryImage: cache.MakeResidentMemoryImage(),\n\t}\n\n\tgodless.remote = service.MakeRemoteNamespace(namespaceOptions)\n\treturn nil\n}\n\nfunc (godless *Godless) launchAPI() error {\n\tlimit := godless.APIQueryLimit\n\n\tif limit == 0 {\n\t\tlimit = 1\n\t}\n\n\tqueue := godless.PriorityQueue\n\n\tif queue == nil {\n\t\tqueue = cache.MakeResidentBufferQueue(__UNKNOWN_BUFFER_SIZE)\n\t}\n\n\tapi, errch := service.LaunchKeyValueStore(godless.remote, queue, limit)\n\n\tgodless.addErrors(errch)\n\tgodless.api = api\n\n\treturn nil\n}\n\n\/\/ Serve serves the Godless webservice.\nfunc (godless *Godless) serveWeb() error {\n\taddr := godless.WebServiceAddr\n\n\tif addr == \"\" {\n\t\treturn nil\n\t}\n\n\twebService := &service.WebService{API: godless.api}\n\tstopch, err := http.Serve(addr, webService.Handler())\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgodless.addStopper(stopch)\n\treturn nil\n}\n\n\/\/ Replicate shares data via the IPFS pubsub mechanism.\nfunc (godless *Godless) replicate() error {\n\ttopics := godless.Topics\n\tinterval := godless.ReplicateInterval\n\n\tif len(topics) == 0 {\n\t\treturn nil\n\t}\n\n\tpubsubTopics := make([]api.PubSubTopic, len(topics))\n\n\tfor i, t := range topics {\n\t\tpubsubTopics[i] = api.PubSubTopic(t)\n\t}\n\n\toptions := service.ReplicateOptions{\n\t\tAPI: godless.api,\n\t\tRemoteStore: godless.store,\n\t\tInterval: interval,\n\t\tTopics: pubsubTopics,\n\t\tKeyStore: godless.KeyStore,\n\t}\n\tstopch, errch := service.Replicate(options)\n\tgodless.addStopper(stopch)\n\tgodless.addErrors(errch)\n\treturn nil\n}\n\nfunc (godless *Godless) addStopper(stopch chan<- struct{}) {\n\tif godless.stopch == nil {\n\t\tgodless.stopch = make(chan struct{})\n\t\tgo func() {\n\t\t\tgodless.handleShutdown()\n\t\t}()\n\t}\n\n\tgodless.stoppers = append(godless.stoppers, stopch)\n}\n\nfunc (godless *Godless) handleShutdown() {\n\t<-godless.stopch\n\tlog.Info(\"Shutting down\")\n\tfor _, stopper := range godless.stoppers {\n\t\tgo close(stopper)\n\t}\n\n}\n\nfunc (godless *Godless) addErrors(errch <-chan error) {\n\tgodless.errwg.Add(1)\n\n\tif godless.errch == nil {\n\t\tgodless.errch = make(chan error)\n\t\tgo func() {\n\t\t\tgodless.errwg.Wait()\n\t\t\tclose(godless.errch)\n\t\t}()\n\t}\n\n\tgo func() {\n\t\tfor err := range errch {\n\t\t\tgodless.errch <- err\n\t\t}\n\t\tgodless.errwg.Done()\n\t}()\n}\n\n\/\/ Client is a Godless HTTP client.\ntype Client interface {\n\tSendQuery(*query.Query) (api.APIResponse, error)\n\tSendReflection(api.APIReflectionType) (api.APIResponse, error)\n}\n\n\/\/ MakeClient creates a Godless HTTP Client.\nfunc MakeClient(serviceAddr string) Client {\n\treturn service.MakeClient(serviceAddr)\n}\n\nfunc MakeClientWithHttp(serviceAddr string, webClient *gohttp.Client) Client {\n\treturn service.MakeClientWithHttp(serviceAddr, webClient)\n}\n\nfunc MakeKeyStore() api.KeyStore {\n\treturn &crypto.KeyStore{}\n}\n\nfunc breakOnError(pipeline []func() error) error {\n\tfor _, f := range pipeline {\n\t\terr := f()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ We don't know the right buffer size here, so let the cache package handle it.\nconst __UNKNOWN_BUFFER_SIZE = -1\nFix glitch with default caches\/\/ Godless is a peer-to-peer database running over IPFS.\n\/\/\n\/\/ Godless uses a Consistent Replicated Data Type called a Namespace to share schemaless data with peers.\n\/\/\n\/\/ This package is a facade to Godless internals.\n\/\/\n\/\/ Godless is in alpha, and should be considered experimental software.\npackage godless\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\tgohttp \"net\/http\"\n\n\t\"github.com\/johnny-morrice\/godless\/api\"\n\t\"github.com\/johnny-morrice\/godless\/cache\"\n\t\"github.com\/johnny-morrice\/godless\/crdt\"\n\t\"github.com\/johnny-morrice\/godless\/internal\/crypto\"\n\t\"github.com\/johnny-morrice\/godless\/internal\/http\"\n\t\"github.com\/johnny-morrice\/godless\/internal\/ipfs\"\n\t\"github.com\/johnny-morrice\/godless\/internal\/service\"\n\t\"github.com\/johnny-morrice\/godless\/log\"\n\t\"github.com\/johnny-morrice\/godless\/query\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ TODO allow single cache option.\n\/\/ Godless options.\ntype Options struct {\n\t\/\/ IpfsServiceUrl is required.\n\tIpfsServiceUrl string\n\t\/\/ KeyStore is required. A private Key store.\n\tKeyStore api.KeyStore\n\t\/\/ WebServiceAddr is optional. If not set, the webservice will be disabled.\n\tWebServiceAddr string\n\t\/\/ IndexHash is optional. Set to load an existing index from IPFS.\n\tIndexHash string\n\t\/\/ FailEarly will cause the godless process to crash if it cannot contact IPFS on startup.\n\tFailEarly bool\n\t\/\/ ReplicateInterval is optional. The duration between peer-to-peer replications.\n\tReplicateInterval time.Duration\n\tPulse time.Duration\n\t\/\/ Topics is optional. Two godless servers which share a topic will replicate indices. An empty topics slice will disable replication.\n\tTopics []string\n\t\/\/ IpfsClient is optional. Specify a HTTP client for IPFS.\n\tIpfsClient *gohttp.Client\n\t\/\/ IpfsPingTimeout is optional. Specify a lower timeout for \"Am I Connected?\" checks.\n\tIpfsPingTimeout time.Duration\n\t\/\/ Cache is optional. Build a 12-factor app by supplying your own remote cache.\n\t\/\/ HeadCache, IndexCache and NamespaceCache can be used to specify different caches for different data types.\n\tCache api.Cache\n\t\/\/ HeadCache is optional. Build a 12-factor app by supplying your own remote cache.\n\tHeadCache api.HeadCache\n\t\/\/ IndexCache is optional. Build a 12-factor app by supplying your own remote cache.\n\tIndexCache api.IndexCache\n\t\/\/ NamespaceCache is optional. Build a 12-factor app by supplying your own remote cache.\n\tNamespaceCache api.NamespaceCache\n\t\/\/ PriorityQueue is optional. Build a 12-factor app by supplying your own remote cache.\n\tPriorityQueue api.RequestPriorityQueue\n\t\/\/ APIQueryLimit is optional. Tune performance by setting the number of simultaneous queries.\n\tAPIQueryLimit int\n\t\/\/ PublicServer is optional. If false, the index will only be updated from peers who are in your public key list.\n\tPublicServer bool\n}\n\n\/\/ Godless is a peer-to-peer database. It shares structured data between peers, using IPFS as a backing store.\n\/\/ The core datastructure is a CRDT namespace which resembles a relational scheme in that it has tables, rows, and entries.\ntype Godless struct {\n\tOptions\n\terrch chan error\n\terrwg sync.WaitGroup\n\tstopch chan struct{}\n\tstoppers []chan<- struct{}\n\tstore api.RemoteStore\n\tremote api.RemoteNamespace\n\tapi api.APIService\n}\n\n\/\/ New creates a godless instance, connecting to any services, and providing any services, specified in the options.\nfunc New(options Options) (*Godless, error) {\n\tgodless := &Godless{Options: options}\n\n\tmissing := godless.findMissingParameters()\n\n\tif missing != nil {\n\t\treturn nil, missing\n\t}\n\n\tsetupFuncs := []func() error{\n\t\tgodless.connectIpfs,\n\t\tgodless.connectCache,\n\t\tgodless.setupNamespace,\n\t\tgodless.launchAPI,\n\t\tgodless.serveWeb,\n\t\tgodless.replicate,\n\t}\n\n\terr := breakOnError(setupFuncs)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgodless.report()\n\n\treturn godless, nil\n}\n\nfunc (godless *Godless) report() {\n\tif godless.PublicServer {\n\t\tlog.Info(\"Running public Godless API\")\n\t} else {\n\t\tlog.Info(\"Running private Godless API\")\n\t}\n\n\tprivCount := len(godless.KeyStore.GetAllPrivateKeys())\n\tpubCount := len(godless.KeyStore.GetAllPublicKeys())\n\n\tlog.Info(\"Godless API using %v private and %v public keys\", privCount, pubCount)\n}\n\nfunc (godless *Godless) findMissingParameters() error {\n\tvar missing error\n\tif godless.IpfsServiceUrl == \"\" {\n\t\tmsg := godless.missingParameterText(\"IpfsServiceUrl\")\n\t\tmissing = errors.New(msg)\n\t}\n\n\tif godless.KeyStore == nil {\n\t\tmsg := godless.missingParameterText(\"KeyStore\")\n\t\tif missing == nil {\n\t\t\tmissing = errors.New(msg)\n\t\t} else {\n\t\t\tmissing = errors.Wrap(missing, msg)\n\t\t}\n\t}\n\n\treturn missing\n}\n\nfunc (godless *Godless) missingParameterText(param string) string {\n\treturn fmt.Sprintf(\"Missing required parameter '%v'\", param)\n}\n\n\/\/ Errors provides a stream of errors from godless. Godless will attempt to handle any errors it can. Any errors received here indicate that bad things have happened.\nfunc (godless *Godless) Errors() <-chan error {\n\treturn godless.errch\n}\n\n\/\/ Shutdown stops all godless processes. It does not wait for those goroutines to stop.\nfunc (godless *Godless) Shutdown() {\n\tgodless.stopch <- struct{}{}\n}\n\nfunc (godless *Godless) connectIpfs() error {\n\tclient := godless.IpfsClient\n\tpingTimeout := godless.IpfsPingTimeout\n\n\tpeer := &ipfs.IPFSPeer{\n\t\tUrl: godless.IpfsServiceUrl,\n\t\tClient: client,\n\t\tPingTimeout: pingTimeout,\n\t}\n\n\tif godless.FailEarly {\n\t\terr := peer.Connect()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tgodless.store = peer\n\n\treturn nil\n}\n\nfunc (godless *Godless) connectCache() error {\n\tif godless.Cache != nil {\n\t\tgodless.HeadCache = godless.Cache\n\t\tgodless.IndexCache = godless.Cache\n\t\tgodless.NamespaceCache = godless.Cache\n\t\treturn nil\n\t}\n\n\tif godless.HeadCache == nil {\n\t\tgodless.HeadCache = cache.MakeResidentHeadCache()\n\t}\n\n\tif godless.IndexCache == nil {\n\t\tgodless.IndexCache = cache.MakeResidentIndexCache(__UNKNOWN_BUFFER_SIZE)\n\t}\n\n\tif godless.NamespaceCache == nil {\n\t\tgodless.NamespaceCache = cache.MakeResidentNamespaceCache(__UNKNOWN_BUFFER_SIZE)\n\t}\n\n\treturn nil\n}\n\nfunc (godless *Godless) setupNamespace() error {\n\tif godless.IndexHash != \"\" {\n\t\thead := crdt.IPFSPath(godless.IndexHash)\n\n\t\terr := godless.HeadCache.SetHead(head)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tnamespaceOptions := service.RemoteNamespaceOptions{\n\t\tPulse: godless.Pulse,\n\t\tStore: godless.store,\n\t\tHeadCache: godless.HeadCache,\n\t\tIndexCache: godless.IndexCache,\n\t\tNamespaceCache: godless.NamespaceCache,\n\t\tKeyStore: godless.KeyStore,\n\t\tIsPublicIndex: godless.PublicServer,\n\t\tMemoryImage: cache.MakeResidentMemoryImage(),\n\t}\n\n\tgodless.remote = service.MakeRemoteNamespace(namespaceOptions)\n\treturn nil\n}\n\nfunc (godless *Godless) launchAPI() error {\n\tlimit := godless.APIQueryLimit\n\n\tif limit == 0 {\n\t\tlimit = 1\n\t}\n\n\tqueue := godless.PriorityQueue\n\n\tif queue == nil {\n\t\tqueue = cache.MakeResidentBufferQueue(__UNKNOWN_BUFFER_SIZE)\n\t}\n\n\tapi, errch := service.LaunchKeyValueStore(godless.remote, queue, limit)\n\n\tgodless.addErrors(errch)\n\tgodless.api = api\n\n\treturn nil\n}\n\n\/\/ Serve serves the Godless webservice.\nfunc (godless *Godless) serveWeb() error {\n\taddr := godless.WebServiceAddr\n\n\tif addr == \"\" {\n\t\treturn nil\n\t}\n\n\twebService := &service.WebService{API: godless.api}\n\tstopch, err := http.Serve(addr, webService.Handler())\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgodless.addStopper(stopch)\n\treturn nil\n}\n\n\/\/ Replicate shares data via the IPFS pubsub mechanism.\nfunc (godless *Godless) replicate() error {\n\ttopics := godless.Topics\n\tinterval := godless.ReplicateInterval\n\n\tif len(topics) == 0 {\n\t\treturn nil\n\t}\n\n\tpubsubTopics := make([]api.PubSubTopic, len(topics))\n\n\tfor i, t := range topics {\n\t\tpubsubTopics[i] = api.PubSubTopic(t)\n\t}\n\n\toptions := service.ReplicateOptions{\n\t\tAPI: godless.api,\n\t\tRemoteStore: godless.store,\n\t\tInterval: interval,\n\t\tTopics: pubsubTopics,\n\t\tKeyStore: godless.KeyStore,\n\t}\n\tstopch, errch := service.Replicate(options)\n\tgodless.addStopper(stopch)\n\tgodless.addErrors(errch)\n\treturn nil\n}\n\nfunc (godless *Godless) addStopper(stopch chan<- struct{}) {\n\tif godless.stopch == nil {\n\t\tgodless.stopch = make(chan struct{})\n\t\tgo func() {\n\t\t\tgodless.handleShutdown()\n\t\t}()\n\t}\n\n\tgodless.stoppers = append(godless.stoppers, stopch)\n}\n\nfunc (godless *Godless) handleShutdown() {\n\t<-godless.stopch\n\tlog.Info(\"Shutting down\")\n\tfor _, stopper := range godless.stoppers {\n\t\tgo close(stopper)\n\t}\n\n}\n\nfunc (godless *Godless) addErrors(errch <-chan error) {\n\tgodless.errwg.Add(1)\n\n\tif godless.errch == nil {\n\t\tgodless.errch = make(chan error)\n\t\tgo func() {\n\t\t\tgodless.errwg.Wait()\n\t\t\tclose(godless.errch)\n\t\t}()\n\t}\n\n\tgo func() {\n\t\tfor err := range errch {\n\t\t\tgodless.errch <- err\n\t\t}\n\t\tgodless.errwg.Done()\n\t}()\n}\n\n\/\/ Client is a Godless HTTP client.\ntype Client interface {\n\tSendQuery(*query.Query) (api.APIResponse, error)\n\tSendReflection(api.APIReflectionType) (api.APIResponse, error)\n}\n\n\/\/ MakeClient creates a Godless HTTP Client.\nfunc MakeClient(serviceAddr string) Client {\n\treturn service.MakeClient(serviceAddr)\n}\n\nfunc MakeClientWithHttp(serviceAddr string, webClient *gohttp.Client) Client {\n\treturn service.MakeClientWithHttp(serviceAddr, webClient)\n}\n\nfunc MakeKeyStore() api.KeyStore {\n\treturn &crypto.KeyStore{}\n}\n\nfunc breakOnError(pipeline []func() error) error {\n\tfor _, f := range pipeline {\n\t\terr := f()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ We don't know the right buffer size here, so let the cache package handle it.\nconst __UNKNOWN_BUFFER_SIZE = -1\n<|endoftext|>"} {"text":"\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, version 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage gofetch\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ ProgressReport represents the current download progress of a given file\ntype ProgressReport struct {\n\tURL string\n\t\/\/ Total length in bytes of the file being downloaded\n\tTotal int64\n\t\/\/ Written bytes to disk on a write by write basis. It does not accumulate.\n\tWrittenBytes int64\n}\n\n\/\/ goFetch represents an instance of gofetch, holding global configuration options.\ntype goFetch struct {\n\tdestDir string\n\tetag bool\n\tconcurrency int\n}\n\n\/\/ Option as explained in http:\/\/commandcenter.blogspot.com\/2014\/01\/self-referential-functions-and-design.html\ntype Option func(*goFetch)\n\n\/\/ DestDir allows you to set the destination directory for the downloaded files.\nfunc DestDir(dir string) Option {\n\treturn func(f *goFetch) {\n\t\tf.destDir = dir\n\t}\n}\n\n\/\/ Concurrency allows you to set the number of goroutines used to download a specific\n\/\/ file.\nfunc Concurrency(c int) Option {\n\treturn func(f *goFetch) {\n\t\tf.concurrency = c\n\t}\n}\n\n\/\/ ETag allows you to disable or enable ETag support, meaning that if an already\n\/\/ downloaded file is currently on disk and matches the ETag returned by the server,\n\/\/ it will not be downloaded again.\nfunc ETag(enable bool) Option {\n\treturn func(f *goFetch) {\n\t\tf.etag = enable\n\t}\n}\n\n\/\/ New creates a new instance of goFetch with the given options.\nfunc New(opts ...Option) *goFetch {\n\t\/\/ Creates instance and assigns defaults.\n\tgofetch := &goFetch{\n\t\tconcurrency: 1,\n\t\tdestDir: \".\/\",\n\t\tetag: true,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(gofetch)\n\t}\n\treturn gofetch\n}\n\n\/\/ Fetch downloads content from the provided URL. It supports resuming and\n\/\/ parallelizing downloads while being very memory efficient.\nfunc (gf *goFetch) Fetch(url string, progressCh chan<- ProgressReport) (*os.File, error) {\n\tif url == \"\" {\n\t\treturn nil, errors.New(\"URL is required\")\n\t}\n\n\t\/\/ We need to make a preflight request to get the size of the content.\n\tres, err := http.Head(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !strings.HasPrefix(res.Status, \"2\") {\n\t\treturn nil, errors.New(\"HTTP requests returned a non 2xx status code\")\n\t}\n\n\tfileName := path.Base(url)\n\n\tvar etag string\n\tif gf.etag {\n\t\tetag = res.Header.Get(\"ETag\")\n\t\tfileName += etag\n\t}\n\n\tdestFilePath := filepath.Join(gf.destDir, fileName)\n\n\tfi, err := os.Stat(destFilePath)\n\tif err == nil && fi.Size() == res.ContentLength {\n\t\tif progressCh != nil {\n\t\t\tclose(progressCh)\n\t\t}\n\t\treturn os.Open(destFilePath)\n\t}\n\n\treturn gf.parallelFetch(url, destFilePath, res.ContentLength, progressCh)\n}\n\n\/\/ parallelFetch fetches using multiple goroutines, each piece is streamed down\n\/\/ to disk which makes it very efficient in terms of memory usage.\nfunc (gf *goFetch) parallelFetch(url, destFilePath string, length int64, progressCh chan<- ProgressReport) (*os.File, error) {\n\tif progressCh != nil {\n\t\tdefer close(progressCh)\n\t}\n\n\tvar wg sync.WaitGroup\n\n\treport := ProgressReport{Total: length}\n\tconcurrency := int64(gf.concurrency)\n\tchunkSize := length \/ concurrency\n\tremainingSize := length % concurrency\n\tchunksDir := filepath.Join(gf.destDir, path.Base(url)+\".chunks\")\n\n\tif err := os.MkdirAll(chunksDir, 0760); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar errs []error\n\tfor i := int64(0); i < concurrency; i++ {\n\t\tmin := chunkSize * i\n\t\tmax := chunkSize * (i + 1)\n\n\t\tif i == (concurrency - 1) {\n\t\t\t\/\/ Add the remaining bytes in the last request\n\t\t\tmax += remainingSize\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func(min, max int64, chunkNumber int) {\n\t\t\tdefer wg.Done()\n\t\t\tchunkFile := filepath.Join(chunksDir, strconv.Itoa(chunkNumber))\n\n\t\t\terr := gf.fetch(url, chunkFile, min, max, report, progressCh)\n\t\t\tif err != nil {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\t}(min, max, int(i))\n\t}\n\twg.Wait()\n\n\tif len(errs) > 0 {\n\t\treturn nil, fmt.Errorf(\"Errors: \\n %s\", errs)\n\t}\n\n\tfile, err := gf.assembleChunks(destFilePath, chunksDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tos.RemoveAll(chunksDir)\n\n\t\/\/ Makes sure to return the file on the correct offset so it can be\n\t\/\/ consumed by users.\n\t_, err = file.Seek(0, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn file, err\n}\n\n\/\/ assembleChunks join all the data pieces together\nfunc (gf *goFetch) assembleChunks(destFile, chunksDir string) (*os.File, error) {\n\tfile, err := os.Create(destFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor i := 0; i < gf.concurrency; i++ {\n\t\tchunkFile, err := os.Open(filepath.Join(chunksDir, strconv.Itoa(i)))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif _, err := io.Copy(file, chunkFile); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tchunkFile.Close()\n\t}\n\treturn file, nil\n}\n\n\/\/ fetch downloads files using one unbuffered HTTP connection and supports\n\/\/ resuming downloads if interrupted.\nfunc (gf *goFetch) fetch(url, destFile string, min, max int64,\n\treport ProgressReport, progressCh chan<- ProgressReport) error {\n\tclient := new(http.Client)\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ In order to resume previous interrupted downloads we need to open the file\n\t\/\/ in append mode.\n\tfile, err := os.OpenFile(destFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0660)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tfi, err := file.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcurrSize := fi.Size()\n\n\t\/\/ There is nothing to do if file exists and was fully downloaded.\n\t\/\/ We do substraction between max and min to account for the last chunk\n\t\/\/ size, which may be of different size if division between res.ContentLength and config.SizeLimit\n\t\/\/ is not exact.\n\tif currSize == (max - min) {\n\t\treturn nil\n\t}\n\n\t\/\/ Adjusts min to resume file download from where it was left off.\n\tif currSize > 0 {\n\t\tmin = min + currSize\n\t}\n\n\t\/\/ Prepares writer to report download progress.\n\twriter := fetchWriter{\n\t\tWriter: file,\n\t\tprogressCh: progressCh,\n\t\tprogressReport: report,\n\t}\n\n\tbrange := fmt.Sprintf(\"bytes=%d-%d\", min, max-1)\n\tif max == -1 {\n\t\tbrange = fmt.Sprintf(\"bytes=%d-\", min)\n\t}\n\n\treq.Header.Add(\"Range\", brange)\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif !strings.HasPrefix(res.Status, \"2\") {\n\t\treturn errors.New(\"HTTP requests returned a non 2xx status code\")\n\t}\n\n\t_, err = io.Copy(&writer, res.Body)\n\treturn err\n}\n\n\/\/ fetchWriter implements a custom io.Writer so we can send granular\n\/\/ progress reports when streaming down content.\ntype fetchWriter struct {\n\tio.Writer\n\t\/\/progressCh is the channel sent by the user to get download updates.\n\tprogressCh chan<- ProgressReport\n\t\/\/ report is the structure sent through the progress channel.\n\tprogressReport ProgressReport\n}\n\nfunc (fw *fetchWriter) Write(b []byte) (int, error) {\n\tn, err := fw.Writer.Write(b)\n\n\tif fw.progressCh != nil {\n\t\tfw.progressReport.WrittenBytes = int64(n)\n\t\tfw.progressCh <- fw.progressReport\n\t}\n\n\treturn n, err\n}\nReuses HTTP Client instead of creating each time.\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, version 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage gofetch\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ ProgressReport represents the current download progress of a given file\ntype ProgressReport struct {\n\tURL string\n\t\/\/ Total length in bytes of the file being downloaded\n\tTotal int64\n\t\/\/ Written bytes to disk on a write by write basis. It does not accumulate.\n\tWrittenBytes int64\n}\n\n\/\/ goFetch represents an instance of gofetch, holding global configuration options.\ntype goFetch struct {\n\tdestDir string\n\tetag bool\n\tconcurrency int\n\thttpClient *http.Client\n}\n\n\/\/ Option as explained in http:\/\/commandcenter.blogspot.com\/2014\/01\/self-referential-functions-and-design.html\ntype Option func(*goFetch)\n\n\/\/ DestDir allows you to set the destination directory for the downloaded files.\nfunc DestDir(dir string) Option {\n\treturn func(f *goFetch) {\n\t\tf.destDir = dir\n\t}\n}\n\n\/\/ Concurrency allows you to set the number of goroutines used to download a specific\n\/\/ file.\nfunc Concurrency(c int) Option {\n\treturn func(f *goFetch) {\n\t\tf.concurrency = c\n\t}\n}\n\n\/\/ ETag allows you to disable or enable ETag support, meaning that if an already\n\/\/ downloaded file is currently on disk and matches the ETag returned by the server,\n\/\/ it will not be downloaded again.\nfunc ETag(enable bool) Option {\n\treturn func(f *goFetch) {\n\t\tf.etag = enable\n\t}\n}\n\n\/\/ New creates a new instance of goFetch with the given options.\nfunc New(opts ...Option) *goFetch {\n\t\/\/ Creates instance and assigns defaults.\n\tgofetch := &goFetch{\n\t\tconcurrency: 1,\n\t\tdestDir: \".\/\",\n\t\tetag: true,\n\t\thttpClient: new(http.Client),\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(gofetch)\n\t}\n\treturn gofetch\n}\n\n\/\/ Fetch downloads content from the provided URL. It supports resuming and\n\/\/ parallelizing downloads while being very memory efficient.\nfunc (gf *goFetch) Fetch(url string, progressCh chan<- ProgressReport) (*os.File, error) {\n\tif url == \"\" {\n\t\treturn nil, errors.New(\"URL is required\")\n\t}\n\n\t\/\/ We need to make a preflight request to get the size of the content.\n\tres, err := http.Head(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !strings.HasPrefix(res.Status, \"2\") {\n\t\treturn nil, errors.New(\"HTTP requests returned a non 2xx status code\")\n\t}\n\n\tfileName := path.Base(url)\n\n\tvar etag string\n\tif gf.etag {\n\t\tetag = res.Header.Get(\"ETag\")\n\t\tfileName += strings.Trim(etag, `\"`)\n\t}\n\n\tdestFilePath := filepath.Join(gf.destDir, fileName)\n\n\tfi, err := os.Stat(destFilePath)\n\tif err == nil && fi.Size() == res.ContentLength {\n\t\tif progressCh != nil {\n\t\t\tclose(progressCh)\n\t\t}\n\t\treturn os.Open(destFilePath)\n\t}\n\n\treturn gf.parallelFetch(url, destFilePath, res.ContentLength, progressCh)\n}\n\n\/\/ parallelFetch fetches using multiple goroutines, each piece is streamed down\n\/\/ to disk which makes it very efficient in terms of memory usage.\nfunc (gf *goFetch) parallelFetch(url, destFilePath string, length int64, progressCh chan<- ProgressReport) (*os.File, error) {\n\tif progressCh != nil {\n\t\tdefer close(progressCh)\n\t}\n\n\tvar wg sync.WaitGroup\n\n\treport := ProgressReport{Total: length}\n\tconcurrency := int64(gf.concurrency)\n\tchunkSize := length \/ concurrency\n\tremainingSize := length % concurrency\n\tchunksDir := filepath.Join(gf.destDir, path.Base(url)+\".chunks\")\n\n\tif err := os.MkdirAll(chunksDir, 0760); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar errs []error\n\tfor i := int64(0); i < concurrency; i++ {\n\t\tmin := chunkSize * i\n\t\tmax := chunkSize * (i + 1)\n\n\t\tif i == (concurrency - 1) {\n\t\t\t\/\/ Add the remaining bytes in the last request\n\t\t\tmax += remainingSize\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func(min, max int64, chunkNumber int) {\n\t\t\tdefer wg.Done()\n\t\t\tchunkFile := filepath.Join(chunksDir, strconv.Itoa(chunkNumber))\n\n\t\t\terr := gf.fetch(url, chunkFile, min, max, report, progressCh)\n\t\t\tif err != nil {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\t}(min, max, int(i))\n\t}\n\twg.Wait()\n\n\tif len(errs) > 0 {\n\t\treturn nil, fmt.Errorf(\"Errors: \\n %s\", errs)\n\t}\n\n\tfile, err := gf.assembleChunks(destFilePath, chunksDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tos.RemoveAll(chunksDir)\n\n\t\/\/ Makes sure to return the file on the correct offset so it can be\n\t\/\/ consumed by users.\n\t_, err = file.Seek(0, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn file, err\n}\n\n\/\/ assembleChunks join all the data pieces together\nfunc (gf *goFetch) assembleChunks(destFile, chunksDir string) (*os.File, error) {\n\tfile, err := os.Create(destFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor i := 0; i < gf.concurrency; i++ {\n\t\tchunkFile, err := os.Open(filepath.Join(chunksDir, strconv.Itoa(i)))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif _, err := io.Copy(file, chunkFile); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tchunkFile.Close()\n\t}\n\treturn file, nil\n}\n\n\/\/ fetch downloads files using one unbuffered HTTP connection and supports\n\/\/ resuming downloads if interrupted.\nfunc (gf *goFetch) fetch(url, destFile string, min, max int64,\n\treport ProgressReport, progressCh chan<- ProgressReport) error {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ In order to resume previous interrupted downloads we need to open the file\n\t\/\/ in append mode.\n\tfile, err := os.OpenFile(destFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0660)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tfi, err := file.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcurrSize := fi.Size()\n\n\t\/\/ There is nothing to do if file exists and was fully downloaded.\n\t\/\/ We do substraction between max and min to account for the last chunk\n\t\/\/ size, which may be of different size if division between res.ContentLength and config.SizeLimit\n\t\/\/ is not exact.\n\tif currSize == (max - min) {\n\t\treturn nil\n\t}\n\n\t\/\/ Adjusts min to resume file download from where it was left off.\n\tif currSize > 0 {\n\t\tmin = min + currSize\n\t}\n\n\t\/\/ Prepares writer to report download progress.\n\twriter := fetchWriter{\n\t\tWriter: file,\n\t\tprogressCh: progressCh,\n\t\tprogressReport: report,\n\t}\n\n\tbrange := fmt.Sprintf(\"bytes=%d-%d\", min, max-1)\n\tif max == -1 {\n\t\tbrange = fmt.Sprintf(\"bytes=%d-\", min)\n\t}\n\n\treq.Header.Add(\"Range\", brange)\n\tres, err := gf.httpClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif !strings.HasPrefix(res.Status, \"2\") {\n\t\treturn errors.New(\"HTTP requests returned a non 2xx status code\")\n\t}\n\n\t_, err = io.Copy(&writer, res.Body)\n\treturn err\n}\n\n\/\/ fetchWriter implements a custom io.Writer so we can send granular\n\/\/ progress reports when streaming down content.\ntype fetchWriter struct {\n\tio.Writer\n\t\/\/progressCh is the channel sent by the user to get download updates.\n\tprogressCh chan<- ProgressReport\n\t\/\/ report is the structure sent through the progress channel.\n\tprogressReport ProgressReport\n}\n\nfunc (fw *fetchWriter) Write(b []byte) (int, error) {\n\tn, err := fw.Writer.Write(b)\n\n\tif fw.progressCh != nil {\n\t\tfw.progressReport.WrittenBytes = int64(n)\n\t\tfw.progressCh <- fw.progressReport\n\t}\n\n\treturn n, err\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n \"net\"\n \"strconv\"\n \"fmt\"\n\/\/ collectd \"github.com\/paulhammond\/gocollectd\"\n goopt \"github.com\/droundy\/goopt\"\n)\n\n\nvar port = goopt.Int([]string{\"-p\", \"--port\"}, 8126, \"UDP Port to use\")\n\nfunc main() {\n goopt.Description = func() string {\n\t\treturn \"Metric Wrapper for (at first) graphite & elasticsearch.\"\n }\n goopt.Version = \"1.0\"\n goopt.Summary = \"gostats\"\n goopt.Parse(nil)\n\n\n addr, _ := net.ResolveUDPAddr(\"udp\", \":\" + strconv.Itoa(*port))\n sock, _ := net.ListenUDP(\"udp\", addr)\n\n i := 0\n for {\n i++\n buf := make([]byte, 1024)\n rlen, _, err := sock.ReadFromUDP(buf)\n if err != nil {\n fmt.Println(err)\n }\n fmt.Println(string(buf[0:rlen]))\n fmt.Println(i)\n \/\/go handlePacket(buf, rlen)\n }\n}\nformattedpackage main\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strconv\"\n\t\/\/ collectd \"github.com\/paulhammond\/gocollectd\"\n\tgoopt \"github.com\/droundy\/goopt\"\n)\n\nvar port = goopt.Int([]string{\"-p\", \"--port\"}, 8126, \"UDP Port to use\")\n\nfunc main() {\n\tgoopt.Description = func() string {\n\t\treturn \"Metric Wrapper for (at first) graphite & elasticsearch.\"\n\t}\n\tgoopt.Version = \"1.0\"\n\tgoopt.Summary = \"gostats\"\n\tgoopt.Parse(nil)\n\n\taddr, _ := net.ResolveUDPAddr(\"udp\", \":\"+strconv.Itoa(*port))\n\tsock, _ := net.ListenUDP(\"udp\", addr)\n\n\ti := 0\n\tfor {\n\t\ti++\n\t\tbuf := make([]byte, 1024)\n\t\trlen, _, err := sock.ReadFromUDP(buf)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\tfmt.Println(string(buf[0:rlen]))\n\t\tfmt.Println(i)\n\t\t\/\/go handlePacket(buf, rlen)\n\t}\n}\n<|endoftext|>"} {"text":"package gpsutil\n\nimport (\n\t\"math\"\n)\n\ntype LatLng struct {\n\tlat float64\n\tlng float64\n}\n\ntype GeohashDecoded struct {\n\tlat float64\n\tlng float64\n\terr struct {\n\t\tlat float64\n\t\tlgn float64\n\t}\n}\n\ntype BBox struct {\n\tSouthwest *LatLng\n\tNortheast *LatLng\n\tCenter *LatLng\n}\n\nfunc toRad(decDegrees float64) float64 {\n\treturn decDegrees * math.Pi \/ 180.0\n}\n\nfunc toDegrees(radians float64) float64 {\n\treturn 180.0 * radians \/ math.Pi\n}\nProvide public access for lat and lngpackage gpsutil\n\nimport (\n\t\"math\"\n)\n\ntype LatLng struct {\n\tlat float64\n\tlng float64\n}\n\nfunc (latlng *LatLng) Lat() float64 {\n\treturn latlng.lat\n}\n\nfunc (latlng *LatLng) Lng() float64 {\n\treturn latlng.lng\n}\n\ntype GeohashDecoded struct {\n\tlat float64\n\tlng float64\n\terr struct {\n\t\tlat float64\n\t\tlgn float64\n\t}\n}\n\ntype BBox struct {\n\tSouthwest *LatLng\n\tNortheast *LatLng\n\tCenter *LatLng\n}\n\nfunc toRad(decDegrees float64) float64 {\n\treturn decDegrees * math.Pi \/ 180.0\n}\n\nfunc toDegrees(radians float64) float64 {\n\treturn 180.0 * radians \/ math.Pi\n}\n<|endoftext|>"} {"text":"package bytetree\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/getlantern\/zenodb\/encoding\"\n\t. \"github.com\/getlantern\/zenodb\/expr\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nconst ctx = 56\n\nvar (\n\tepoch = time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)\n)\n\nfunc TestByteTree(t *testing.T) {\n\tresolutionOut := 10 * time.Second\n\tresolutionIn := 1 * time.Second\n\n\tasOf := epoch.Add(-1 * resolutionOut)\n\tuntil := epoch\n\n\teOut := ADD(SUM(FIELD(\"a\")), SUM(FIELD(\"b\")))\n\teA := SUM(FIELD(\"a\"))\n\teB := SUM(FIELD(\"b\"))\n\n\tbt := New([]Expr{eOut}, []Expr{eA, eB}, resolutionOut, resolutionIn, asOf, until)\n\tbt.Update([]byte(\"test\"), []encoding.Sequence{encoding.NewValue(eA, epoch, 1), encoding.NewValue(eB, epoch, 1)}, nil)\n\tassert.Equal(t, 1, bt.Length())\n\tbt.Update([]byte(\"slow\"), []encoding.Sequence{encoding.NewValue(eA, epoch, 2), encoding.NewValue(eB, epoch, 2)}, nil)\n\tassert.Equal(t, 2, bt.Length())\n\tbt.Update([]byte(\"water\"), []encoding.Sequence{encoding.NewValue(eA, epoch, 3), encoding.NewValue(eB, epoch, 3)}, nil)\n\tassert.Equal(t, 3, bt.Length())\n\tbt.Update([]byte(\"slower\"), []encoding.Sequence{encoding.NewValue(eA, epoch, 4), encoding.NewValue(eB, epoch, 4)}, nil)\n\tassert.Equal(t, 4, bt.Length())\n\tbt.Update([]byte(\"team\"), []encoding.Sequence{encoding.NewValue(eA, epoch, 5), encoding.NewValue(eB, epoch, 5)}, nil)\n\tassert.Equal(t, 5, bt.Length())\n\tbt.Update([]byte(\"toast\"), []encoding.Sequence{encoding.NewValue(eA, epoch, 6), encoding.NewValue(eB, epoch, 6)}, nil)\n\tassert.Equal(t, 6, bt.Length())\n\n\tbt.Update([]byte(\"test\"), []encoding.Sequence{encoding.NewValue(eA, epoch, 10), encoding.NewValue(eB, epoch, 10)}, nil)\n\tassert.Equal(t, 6, bt.Length())\n\tbt.Update([]byte(\"slow\"), []encoding.Sequence{encoding.NewValue(eA, epoch, 10), encoding.NewValue(eB, epoch, 10)}, nil)\n\tassert.Equal(t, 6, bt.Length())\n\tbt.Update([]byte(\"water\"), []encoding.Sequence{encoding.NewValue(eA, epoch, 10), encoding.NewValue(eB, epoch, 10)}, nil)\n\tassert.Equal(t, 6, bt.Length())\n\tbt.Update([]byte(\"slower\"), []encoding.Sequence{encoding.NewValue(eA, epoch, 10), encoding.NewValue(eB, epoch, 10)}, nil)\n\tassert.Equal(t, 6, bt.Length())\n\tbt.Update([]byte(\"team\"), []encoding.Sequence{encoding.NewValue(eA, epoch, 10), encoding.NewValue(eB, epoch, 10)}, nil)\n\tassert.Equal(t, 6, bt.Length())\n\tbt.Update([]byte(\"toast\"), []encoding.Sequence{encoding.NewValue(eA, epoch, 10), encoding.NewValue(eB, epoch, 10)}, nil)\n\tassert.Equal(t, 6, bt.Length())\n\n\t\/\/ This should be ignored because it's outside of the time range\n\tbt.Update([]byte(\"test\"), []encoding.Sequence{encoding.NewValue(eA, epoch.Add(-1*resolutionOut), 50), encoding.NewValue(eB, epoch.Add(1*resolutionOut), 10)}, nil)\n\n\t\/\/ Check tree twice with different contexts to make sure removals don't affect\n\t\/\/ other contexts.\n\tcheckTree(ctx, t, bt, eOut)\n\tcheckTree(98, t, bt, eOut)\n\n\t\/\/ Copy tree and check again\n\tcheckTree(99, t, bt.Copy(), eOut)\n}\n\nfunc checkTree(ctx int64, t *testing.T, bt *Tree, e Expr) {\n\twalkedValues := 0\n\tbt.Walk(ctx, func(key []byte, data []encoding.Sequence) bool {\n\t\tif assert.Len(t, data, 1) {\n\t\t\twalkedValues++\n\t\t\tval, _ := data[0].ValueAt(0, e)\n\t\t\tswitch string(key) {\n\t\t\tcase \"test\":\n\t\t\t\tassert.EqualValues(t, 22, val, \"test\")\n\t\t\tcase \"slow\":\n\t\t\t\tassert.EqualValues(t, 24, val, \"slow\")\n\t\t\tcase \"water\":\n\t\t\t\tassert.EqualValues(t, 26, val, \"water\")\n\t\t\tcase \"slower\":\n\t\t\t\tassert.EqualValues(t, 28, val, \"slower\")\n\t\t\tcase \"team\":\n\t\t\t\tassert.EqualValues(t, 30, val, \"team\")\n\t\t\tcase \"toast\":\n\t\t\t\tassert.EqualValues(t, 32, val, \"toast\")\n\t\t\tdefault:\n\t\t\t\tassert.Fail(t, \"Unknown key\", string(key))\n\t\t\t}\n\t\t}\n\t\treturn true\n\t})\n\tassert.Equal(t, 6, walkedValues)\n\n\tval, _ := bt.Remove(ctx, []byte(\"test\"))[0].ValueAt(0, e)\n\tassert.EqualValues(t, 22, val)\n\tval, _ = bt.Remove(ctx, []byte(\"slow\"))[0].ValueAt(0, e)\n\tassert.EqualValues(t, 24, val)\n\tval, _ = bt.Remove(ctx, []byte(\"water\"))[0].ValueAt(0, e)\n\tassert.EqualValues(t, 26, val)\n\tval, _ = bt.Remove(ctx, []byte(\"slower\"))[0].ValueAt(0, e)\n\tassert.EqualValues(t, 28, val)\n\tval, _ = bt.Remove(ctx, []byte(\"team\"))[0].ValueAt(0, e)\n\tassert.EqualValues(t, 30, val)\n\tval, _ = bt.Remove(ctx, []byte(\"toast\"))[0].ValueAt(0, e)\n\tassert.EqualValues(t, 32, val)\n\tassert.Nil(t, bt.Remove(ctx, []byte(\"unknown\")))\n}\nFixed bytetree testspackage bytetree\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/getlantern\/zenodb\/encoding\"\n\t. \"github.com\/getlantern\/zenodb\/expr\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nconst ctx = 56\n\nvar (\n\tepoch = time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)\n)\n\nfunc TestByteTree(t *testing.T) {\n\tresolutionOut := 10 * time.Second\n\tresolutionIn := 1 * time.Second\n\n\tasOf := epoch.Add(-1 * resolutionOut)\n\tuntil := epoch\n\n\teOut := ADD(SUM(FIELD(\"a\")), SUM(FIELD(\"b\")))\n\teA := SUM(FIELD(\"a\"))\n\teB := SUM(FIELD(\"b\"))\n\n\tbt := New([]Expr{eOut}, []Expr{eA, eB}, resolutionOut, resolutionIn, asOf, until)\n\tbt.Update([]byte(\"test\"), []encoding.Sequence{encoding.NewFloatValue(eA, epoch, 1), encoding.NewFloatValue(eB, epoch, 1)}, nil)\n\tassert.Equal(t, 1, bt.Length())\n\tbt.Update([]byte(\"slow\"), []encoding.Sequence{encoding.NewFloatValue(eA, epoch, 2), encoding.NewFloatValue(eB, epoch, 2)}, nil)\n\tassert.Equal(t, 2, bt.Length())\n\tbt.Update([]byte(\"water\"), []encoding.Sequence{encoding.NewFloatValue(eA, epoch, 3), encoding.NewFloatValue(eB, epoch, 3)}, nil)\n\tassert.Equal(t, 3, bt.Length())\n\tbt.Update([]byte(\"slower\"), []encoding.Sequence{encoding.NewFloatValue(eA, epoch, 4), encoding.NewFloatValue(eB, epoch, 4)}, nil)\n\tassert.Equal(t, 4, bt.Length())\n\tbt.Update([]byte(\"team\"), []encoding.Sequence{encoding.NewFloatValue(eA, epoch, 5), encoding.NewFloatValue(eB, epoch, 5)}, nil)\n\tassert.Equal(t, 5, bt.Length())\n\tbt.Update([]byte(\"toast\"), []encoding.Sequence{encoding.NewFloatValue(eA, epoch, 6), encoding.NewFloatValue(eB, epoch, 6)}, nil)\n\tassert.Equal(t, 6, bt.Length())\n\n\tbt.Update([]byte(\"test\"), []encoding.Sequence{encoding.NewFloatValue(eA, epoch, 10), encoding.NewFloatValue(eB, epoch, 10)}, nil)\n\tassert.Equal(t, 6, bt.Length())\n\tbt.Update([]byte(\"slow\"), []encoding.Sequence{encoding.NewFloatValue(eA, epoch, 10), encoding.NewFloatValue(eB, epoch, 10)}, nil)\n\tassert.Equal(t, 6, bt.Length())\n\tbt.Update([]byte(\"water\"), []encoding.Sequence{encoding.NewFloatValue(eA, epoch, 10), encoding.NewFloatValue(eB, epoch, 10)}, nil)\n\tassert.Equal(t, 6, bt.Length())\n\tbt.Update([]byte(\"slower\"), []encoding.Sequence{encoding.NewFloatValue(eA, epoch, 10), encoding.NewFloatValue(eB, epoch, 10)}, nil)\n\tassert.Equal(t, 6, bt.Length())\n\tbt.Update([]byte(\"team\"), []encoding.Sequence{encoding.NewFloatValue(eA, epoch, 10), encoding.NewFloatValue(eB, epoch, 10)}, nil)\n\tassert.Equal(t, 6, bt.Length())\n\tbt.Update([]byte(\"toast\"), []encoding.Sequence{encoding.NewFloatValue(eA, epoch, 10), encoding.NewFloatValue(eB, epoch, 10)}, nil)\n\tassert.Equal(t, 6, bt.Length())\n\n\t\/\/ This should be ignored because it's outside of the time range\n\tbt.Update([]byte(\"test\"), []encoding.Sequence{encoding.NewFloatValue(eA, epoch.Add(-1*resolutionOut), 50), encoding.NewFloatValue(eB, epoch.Add(1*resolutionOut), 10)}, nil)\n\n\t\/\/ Check tree twice with different contexts to make sure removals don't affect\n\t\/\/ other contexts.\n\tcheckTree(ctx, t, bt, eOut)\n\tcheckTree(98, t, bt, eOut)\n\n\t\/\/ Copy tree and check again\n\tcheckTree(99, t, bt.Copy(), eOut)\n}\n\nfunc checkTree(ctx int64, t *testing.T, bt *Tree, e Expr) {\n\twalkedValues := 0\n\tbt.Walk(ctx, func(key []byte, data []encoding.Sequence) (bool, bool, error) {\n\t\tif assert.Len(t, data, 1) {\n\t\t\twalkedValues++\n\t\t\tval, _ := data[0].ValueAt(0, e)\n\t\t\tswitch string(key) {\n\t\t\tcase \"test\":\n\t\t\t\tassert.EqualValues(t, 22, val, \"test\")\n\t\t\tcase \"slow\":\n\t\t\t\tassert.EqualValues(t, 24, val, \"slow\")\n\t\t\tcase \"water\":\n\t\t\t\tassert.EqualValues(t, 26, val, \"water\")\n\t\t\tcase \"slower\":\n\t\t\t\tassert.EqualValues(t, 28, val, \"slower\")\n\t\t\tcase \"team\":\n\t\t\t\tassert.EqualValues(t, 30, val, \"team\")\n\t\t\tcase \"toast\":\n\t\t\t\tassert.EqualValues(t, 32, val, \"toast\")\n\t\t\tdefault:\n\t\t\t\tassert.Fail(t, \"Unknown key\", string(key))\n\t\t\t}\n\t\t}\n\t\treturn true, true, nil\n\t})\n\tassert.Equal(t, 6, walkedValues)\n\n\tval, _ := bt.Remove(ctx, []byte(\"test\"))[0].ValueAt(0, e)\n\tassert.EqualValues(t, 22, val)\n\tval, _ = bt.Remove(ctx, []byte(\"slow\"))[0].ValueAt(0, e)\n\tassert.EqualValues(t, 24, val)\n\tval, _ = bt.Remove(ctx, []byte(\"water\"))[0].ValueAt(0, e)\n\tassert.EqualValues(t, 26, val)\n\tval, _ = bt.Remove(ctx, []byte(\"slower\"))[0].ValueAt(0, e)\n\tassert.EqualValues(t, 28, val)\n\tval, _ = bt.Remove(ctx, []byte(\"team\"))[0].ValueAt(0, e)\n\tassert.EqualValues(t, 30, val)\n\tval, _ = bt.Remove(ctx, []byte(\"toast\"))[0].ValueAt(0, e)\n\tassert.EqualValues(t, 32, val)\n\tassert.Nil(t, bt.Remove(ctx, []byte(\"unknown\")))\n}\n<|endoftext|>"} {"text":"package plugin\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"log\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype client struct {\n\tcmd *exec.Cmd\n\texited bool\n}\n\nfunc NewClient(cmd *exec.Cmd) *client {\n\treturn &client{\n\t\tcmd,\n\t\tfalse,\n\t}\n}\n\nfunc (c *client) Exited() bool {\n\treturn c.exited\n}\n\nfunc (c *client) Start() (address string, err error) {\n\tenv := []string{\n\t\t\"PACKER_PLUGIN_MIN_PORT=10000\",\n\t\t\"PACKER_PLUGIN_MAX_PORT=25000\",\n\t}\n\n\tstdout := new(bytes.Buffer)\n\tstderr := new(bytes.Buffer)\n\tc.cmd.Env = append(c.cmd.Env, env...)\n\tc.cmd.Stderr = stderr\n\tc.cmd.Stdout = stdout\n\terr = c.cmd.Start()\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Make sure the command is properly cleaned up if there is an error\n\tdefer func() {\n\t\tr := recover()\n\n\t\tif err != nil || r != nil {\n\t\t\tc.cmd.Process.Kill()\n\t\t}\n\n\t\tif r != nil {\n\t\t\tpanic(r)\n\t\t}\n\t}()\n\n\t\/\/ Start goroutine to wait for process to exit\n\tgo func() {\n\t\tc.cmd.Wait()\n\t\tc.exited = true\n\t}()\n\n\t\/\/ Start goroutine that logs the stderr\n\tgo c.logStderr(stderr)\n\n\t\/\/ Some channels for the next step\n\ttimeout := time.After(1 * time.Minute)\n\n\t\/\/ Start looking for the address\n\tfor done := false; !done; {\n\t\tselect {\n\t\tcase <-timeout:\n\t\t\terr = errors.New(\"timeout while waiting for plugin to start\")\n\t\t\tdone = true\n\t\tdefault:\n\t\t}\n\n\t\tif err == nil && c.Exited() {\n\t\t\terr = errors.New(\"plugin exited before we could connect\")\n\t\t\tdone = true\n\t\t}\n\n\t\tif line, lerr := stdout.ReadBytes('\\n'); lerr == nil {\n\t\t\t\/\/ Trim the address and reset the err since we were able\n\t\t\t\/\/ to read some sort of address.\n\t\t\taddress = strings.TrimSpace(string(line))\n\t\t\terr = nil\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ If error is nil from previously, return now\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Wait a bit\n\t\ttime.Sleep(10 * time.Millisecond)\n\t}\n\n\treturn\n}\n\nfunc (c *client) Kill() {\n\tc.cmd.Process.Kill()\n}\n\nfunc (c *client) logStderr(r io.Reader) {\n\tbuf := bufio.NewReader(r)\n\n\tfor done := false; !done; {\n\t\tif c.Exited() {\n\t\t\tdone = true\n\t\t}\n\n\t\tvar err error\n\t\tfor err == nil {\n\t\t\tvar line string\n\t\t\tline, err = buf.ReadString('\\n')\n\t\t\tif line != \"\" {\n\t\t\t\tlog.Print(line)\n\t\t\t}\n\t\t}\n\n\t\ttime.Sleep(10 * time.Millisecond)\n\t}\n}\npacker\/plugin: client kill waits for logging to completepackage plugin\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"log\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype client struct {\n\tcmd *exec.Cmd\n\texited bool\n\tdoneLogging bool\n}\n\nfunc NewClient(cmd *exec.Cmd) *client {\n\treturn &client{\n\t\tcmd,\n\t\tfalse,\n\t\tfalse,\n\t}\n}\n\nfunc (c *client) Exited() bool {\n\treturn c.exited\n}\n\nfunc (c *client) Start() (address string, err error) {\n\tenv := []string{\n\t\t\"PACKER_PLUGIN_MIN_PORT=10000\",\n\t\t\"PACKER_PLUGIN_MAX_PORT=25000\",\n\t}\n\n\tstdout := new(bytes.Buffer)\n\tstderr := new(bytes.Buffer)\n\tc.cmd.Env = append(c.cmd.Env, env...)\n\tc.cmd.Stderr = stderr\n\tc.cmd.Stdout = stdout\n\terr = c.cmd.Start()\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Make sure the command is properly cleaned up if there is an error\n\tdefer func() {\n\t\tr := recover()\n\n\t\tif err != nil || r != nil {\n\t\t\tc.cmd.Process.Kill()\n\t\t}\n\n\t\tif r != nil {\n\t\t\tpanic(r)\n\t\t}\n\t}()\n\n\t\/\/ Start goroutine to wait for process to exit\n\tgo func() {\n\t\tc.cmd.Wait()\n\t\tc.exited = true\n\t}()\n\n\t\/\/ Start goroutine that logs the stderr\n\tgo c.logStderr(stderr)\n\n\t\/\/ Some channels for the next step\n\ttimeout := time.After(1 * time.Minute)\n\n\t\/\/ Start looking for the address\n\tfor done := false; !done; {\n\t\tselect {\n\t\tcase <-timeout:\n\t\t\terr = errors.New(\"timeout while waiting for plugin to start\")\n\t\t\tdone = true\n\t\tdefault:\n\t\t}\n\n\t\tif err == nil && c.Exited() {\n\t\t\terr = errors.New(\"plugin exited before we could connect\")\n\t\t\tdone = true\n\t\t}\n\n\t\tif line, lerr := stdout.ReadBytes('\\n'); lerr == nil {\n\t\t\t\/\/ Trim the address and reset the err since we were able\n\t\t\t\/\/ to read some sort of address.\n\t\t\taddress = strings.TrimSpace(string(line))\n\t\t\terr = nil\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ If error is nil from previously, return now\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Wait a bit\n\t\ttime.Sleep(10 * time.Millisecond)\n\t}\n\n\treturn\n}\n\nfunc (c *client) Kill() {\n\tc.cmd.Process.Kill()\n\n\t\/\/ Wait for the client to finish logging so we have a complete log\n\tdone := make(chan bool)\n\tgo func() {\n\t\tfor !c.doneLogging {\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\n\t\tdone <- true\n\t}()\n\n\t<-done\n}\n\nfunc (c *client) logStderr(r io.Reader) {\n\tbuf := bufio.NewReader(r)\n\n\tfor done := false; !done; {\n\t\tif c.Exited() {\n\t\t\tdone = true\n\t\t}\n\n\t\tvar err error\n\t\tfor err == nil {\n\t\t\tvar line string\n\t\t\tline, err = buf.ReadString('\\n')\n\t\t\tif line != \"\" {\n\t\t\t\tlog.Print(line)\n\t\t\t}\n\t\t}\n\n\t\ttime.Sleep(10 * time.Millisecond)\n\t}\n\n\t\/\/ Flag that we've completed logging for others\n\tc.doneLogging = true\n}\n<|endoftext|>"} {"text":"package rocserv\n\nimport (\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/shawnfeng\/sutil\/slog\"\n)\n\ntype ClientPool struct {\n\tpoolClient sync.Map\n\tpoolLen int\n\tcount int32\n\tFactory func(addr string) rpcClient\n}\n\nfunc NewClientPool(poolLen int, factory func(addr string) rpcClient) *ClientPool {\n\treturn &ClientPool{poolLen: poolLen, Factory: factory, count: 0}\n}\n\nfunc (m *ClientPool) Get(addr string) rpcClient {\n\tfun := \"ClientPool.Get -->\"\n\n\tpo := m.getPool(addr)\n\tvar c rpcClient\n\t\/\/ if pool full, retry get 3 times, each time sleep 500ms\n\ti := 0\n\tfor i < 3 {\n\t\tselect {\n\t\tcase c = <-po:\n\t\t\tslog.Tracef(\"%s get:%s len:%d\", fun, addr, len(po))\n\t\t\treturn c\n\t\tdefault:\n\t\t\tif atomic.LoadInt32(&m.count) > int32(m.poolLen) {\n\t\t\t\tslog.Errorf(\"get client from addr: %s reach max: %d, retry: %d\", addr, m.count, i)\n\t\t\t\ti++\n\t\t\t\ttime.Sleep(time.Millisecond * 500)\n\t\t\t} else {\n\t\t\t\tc = m.Factory(addr)\n\t\t\t\tif c != nil {\n\t\t\t\t\tatomic.AddInt32(&m.count, 1)\n\t\t\t\t}\n\t\t\t\treturn c\n\t\t\t}\n\t\t}\n\t}\n\tslog.Errorf(\"get client from addr: %s reach max: %d after retry 3 times\", addr, m.count)\n\treturn nil\n}\n\nfunc (m *ClientPool) getPool(addr string) chan rpcClient {\n\tfun := \"ClientPool.getPool -->\"\n\n\tvar tmp chan rpcClient\n\tvalue, ok := m.poolClient.Load(addr)\n\tif ok == true {\n\t\ttmp = value.(chan rpcClient)\n\t} else {\n\t\tslog.Infof(\"%s not found addr:%s\", fun, addr)\n\t\ttmp = make(chan rpcClient, m.poolLen)\n\t\tm.poolClient.Store(addr, tmp)\n\t}\n\treturn tmp\n}\n\n\/\/ 连接池链接回收\nfunc (m *ClientPool) Put(addr string, client rpcClient) {\n\tfun := \"ClientPool.Put -->\"\n\t\/\/ do nothing\n\tif client == nil {\n\t\treturn\n\t}\n\n\t\/\/ po 链接池\n\tpo := m.getPool(addr)\n\tselect {\n\n\t\/\/ 回收连接 client\n\tcase po <- client:\n\t\tslog.Tracef(\"%s payback:%s len:%d\", fun, addr, len(po))\n\n\t\/\/不能回收了,关闭链接(满了)\n\tdefault:\n\t\tslog.Infof(\"%s full not payback:%s len:%d\", fun, addr, len(po))\n\t\tatomic.AddInt32(&m.count, -1)\n\t\tclient.Close()\n\t}\n}\nlog countpackage rocserv\n\nimport (\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/shawnfeng\/sutil\/slog\"\n)\n\ntype ClientPool struct {\n\tpoolClient sync.Map\n\tpoolLen int\n\tcount int32\n\tFactory func(addr string) rpcClient\n}\n\nfunc NewClientPool(poolLen int, factory func(addr string) rpcClient) *ClientPool {\n\treturn &ClientPool{poolLen: poolLen, Factory: factory, count: 0}\n}\n\nfunc (m *ClientPool) Get(addr string) rpcClient {\n\tfun := \"ClientPool.Get -->\"\n\n\tpo := m.getPool(addr)\n\tvar c rpcClient\n\t\/\/ if pool full, retry get 3 times, each time sleep 500ms\n\ti := 0\n\tfor i < 3 {\n\t\tselect {\n\t\tcase c = <-po:\n\t\t\tslog.Tracef(\"%s get:%s len:%d, count:%d\", fun, addr, len(po), atomic.LoadInt32(&m.count))\n\t\t\treturn c\n\t\tdefault:\n\t\t\tif atomic.LoadInt32(&m.count) > int32(m.poolLen) {\n\t\t\t\tslog.Errorf(\"get client from addr: %s reach max: %d, retry: %d\", addr, m.count, i)\n\t\t\t\ti++\n\t\t\t\ttime.Sleep(time.Millisecond * 500)\n\t\t\t} else {\n\t\t\t\tc = m.Factory(addr)\n\t\t\t\tif c != nil {\n\t\t\t\t\tatomic.AddInt32(&m.count, 1)\n\t\t\t\t}\n\t\t\t\treturn c\n\t\t\t}\n\t\t}\n\t}\n\tslog.Errorf(\"get client from addr: %s reach max: %d after retry 3 times\", addr, m.count)\n\treturn nil\n}\n\nfunc (m *ClientPool) getPool(addr string) chan rpcClient {\n\tfun := \"ClientPool.getPool -->\"\n\n\tvar tmp chan rpcClient\n\tvalue, ok := m.poolClient.Load(addr)\n\tif ok == true {\n\t\ttmp = value.(chan rpcClient)\n\t} else {\n\t\tslog.Infof(\"%s not found addr:%s\", fun, addr)\n\t\ttmp = make(chan rpcClient, m.poolLen)\n\t\tm.poolClient.Store(addr, tmp)\n\t}\n\treturn tmp\n}\n\n\/\/ 连接池链接回收\nfunc (m *ClientPool) Put(addr string, client rpcClient) {\n\tfun := \"ClientPool.Put -->\"\n\t\/\/ do nothing\n\tif client == nil {\n\t\treturn\n\t}\n\n\t\/\/ po 链接池\n\tpo := m.getPool(addr)\n\tselect {\n\n\t\/\/ 回收连接 client\n\tcase po <- client:\n\t\tslog.Tracef(\"%s payback:%s len:%d, count:%d\", fun, addr, len(po), atomic.LoadInt32(&m.count))\n\n\t\/\/不能回收了,关闭链接(满了)\n\tdefault:\n\t\tslog.Infof(\"%s full not payback:%s len:%d, count:%d\", fun, addr, len(po), atomic.LoadInt32(&m.count))\n\t\tatomic.AddInt32(&m.count, -1)\n\t\tclient.Close()\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2016 The goscope Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gui\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/png\"\n\t\"os\"\n\n\t\"github.com\/zagrodzki\/goscope\/scope\"\n)\n\ntype aggrPoint struct {\n\tsumY int\n\tsizeY int\n}\n\nfunc (p *aggrPoint) add(y int) {\n\tp.sumY += y\n\tp.sizeY++\n}\n\nfunc (p *aggrPoint) toPoint(x int) image.Point {\n\treturn image.Point{x, p.sumY \/ p.sizeY}\n}\n\n\/\/ ZeroAndScale represents the position of zero and the scale of the plot\ntype ZeroAndScale struct {\n\t\/\/ the position of Y=0 (0 <= Zero <= 1) given as\n\t\/\/ the fraction of the window height counting from the top\n\tZero float64\n\t\/\/ scale of the plot in sample units per pixel\n\tScale float64\n}\n\nfunc samplesToPoints(samples []scope.Sample, zeroAndScale ZeroAndScale, start, end image.Point) []image.Point {\n\tif len(samples) == 0 {\n\t\treturn nil\n\t}\n\n\tsampleMaxY := zeroAndScale.Zero * zeroAndScale.Scale\n\tsampleMinY := (zeroAndScale.Zero - 1) * zeroAndScale.Scale\n\tsampleWidthX := float64(len(samples) - 1)\n\tsampleWidthY := sampleMaxY - sampleMinY\n\n\tpixelStartX := float64(start.X)\n\tpixelEndY := float64(end.Y - 1)\n\tpixelWidthX := float64(end.X - start.X - 1)\n\tpixelWidthY := float64(end.Y - start.Y - 1)\n\tratioX := pixelWidthX \/ sampleWidthX\n\tratioY := pixelWidthY \/ sampleWidthY\n\n\tpoints := make([]image.Point, end.Y-start.Y+1)\n\tlastAggr := aggrPoint{}\n\tlastX := start.X\n\tfor i, y := range samples {\n\t\tmapX := int(pixelStartX + float64(i)*ratioX)\n\t\tmapY := int(pixelEndY - float64(y-scope.Sample(sampleMinY))*ratioY)\n\t\tif lastX != mapX {\n\t\t\tpoints = append(points, lastAggr.toPoint(lastX))\n\t\t\tlastX = mapX\n\t\t\tlastAggr = aggrPoint{}\n\t\t}\n\t\tlastAggr.add(mapY)\n\t}\n\tpoints = append(points, lastAggr.toPoint(lastX))\n\n\treturn points\n}\n\n\/\/ Plot represents the entire plotting area.\ntype Plot struct {\n\t*image.RGBA\n}\n\nvar (\n\tbgCache *image.RGBA\n\tbgColor color.RGBA\n)\n\nfunc background(r image.Rectangle, col color.RGBA) *image.RGBA {\n\timg := image.NewRGBA(r)\n\tpix := img.Pix\n\tfor i := 0; i < len(pix); i = i + 4 {\n\t\tpix[i] = col.R\n\t\tpix[i+1] = col.G\n\t\tpix[i+2] = col.B\n\t\tpix[i+3] = col.A\n\t}\n\treturn img\n}\n\n\/\/ Fill fills the plot with a background image of the same size.\nfunc (plot Plot) Fill(col color.RGBA) {\n\tif bgCache == nil || bgCache.Bounds() != plot.Bounds() || bgColor != col {\n\t\tbgCache = background(plot.Bounds(), col)\n\t\tbgColor = col\n\t}\n\tcopy(plot.Pix, bgCache.Pix)\n}\n\nfunc isInside(x, y int, start, end image.Point) bool {\n\treturn x >= start.X && x <= end.X && y >= start.Y && y <= end.Y\n}\n\n\/\/ DrawLine draws a straight line from pixel p1 to p2.\n\/\/ Only the line fragment inside the image rectangle defined by\n\/\/ starting (upper left) and ending (lower right) pixel is drawn.\nfunc (plot Plot) DrawLine(p1, p2 image.Point, start, end image.Point, col color.RGBA) {\n\tif p1.X == p2.X { \/\/ vertical line\n\t\tfor i := min(p1.Y, p2.Y); i <= max(p1.Y, p2.Y); i++ {\n\t\t\tplot.SetRGBA(p1.X, i, col)\n\t\t}\n\t\treturn\n\t}\n\n\t\/\/ Calculating the parameters of the equation\n\t\/\/ of the straight line (in the form y=a*x+b)\n\t\/\/ passing through p1 and p2.\n\n\t\/\/ slope of the line\n\ta := float64(p1.Y-p2.Y) \/ float64(p1.X-p2.X)\n\t\/\/ intercept of the line\n\tb := float64(p1.Y) - float64(p1.X)*a\n\n\t\/\/ To avoid visual \"gaps\" between the pixels we switch on,\n\t\/\/ we draw the line in one of two ways.\n\tif abs(p1.X-p2.X) >= abs(p1.Y-p2.Y) {\n\t\t\/\/ If the line is more horizontal than vertical,\n\t\t\/\/ for every pixel column between p1 and p2\n\t\t\/\/ we find and switch on the pixel closest to y=a*x+b\n\t\tfor i := min(p1.X, p2.X); i <= max(p1.X, p2.X); i++ {\n\t\t\ty := int(a*float64(i) + b)\n\t\t\tif isInside(i, y, start, end) {\n\t\t\t\tplot.SetRGBA(i, y, col)\n\t\t\t}\n\t\t}\n\t} else {\n\t\t\/\/ If the line is more vertical than horizontal,\n\t\t\/\/ for every pixel row between p1 and p2\n\t\t\/\/ we find and switch on the pixel closest to y=a*x+b\n\t\tfor i := min(p1.Y, p2.Y); i <= max(p1.Y, p2.Y); i++ {\n\t\t\tx := int((float64(i) - b) \/ a)\n\t\t\tif isInside(x, i, start, end) {\n\t\t\t\tplot.SetRGBA(x, i, col)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ DrawSamples draws samples in the image rectangle defined by\n\/\/ starting (upper left) and ending (lower right) pixel.\nfunc (plot Plot) DrawSamples(samples []scope.Sample, zeroAndScale ZeroAndScale, start, end image.Point, col color.RGBA) {\n\tpoints := samplesToPoints(samples, zeroAndScale, start, end)\n\tfor i := 1; i < len(points); i++ {\n\t\tplot.DrawLine(points[i-1], points[i], start, end, col)\n\t}\n}\n\n\/\/ DrawAll draws samples from all the channels in the plot.\nfunc (plot Plot) DrawAll(samples map[scope.ChanID][]scope.Sample, zas map[scope.ChanID]ZeroAndScale, cols map[scope.ChanID]color.RGBA) {\n\tplot.Fill(color.RGBA{255, 255, 255, 255})\n\tb := plot.Bounds()\n\tfor id, v := range samples {\n\t\tpar, exists := zas[id]\n\t\tif !exists {\n\t\t\tpar = ZeroAndScale{0.5, 2}\n\t\t}\n\t\tcol, exists := cols[id]\n\t\tif !exists {\n\t\t\tcol = color.RGBA{0, 0, 0, 255}\n\t\t}\n\t\tplot.DrawSamples(v, par, b.Min, b.Max, col)\n\t}\n}\n\n\/\/ DrawFromDevice draws samples from the device in the plot.\nfunc (plot Plot) DrawFromDevice(dev scope.Device, zas map[scope.ChanID]ZeroAndScale, cols map[scope.ChanID]color.RGBA) error {\n\tdata, stop, err := dev.StartSampling()\n\tdefer stop()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsamples := (<-data).Samples\n\tplot.DrawAll(samples, zas, cols)\n\treturn nil\n}\n\n\/\/ CreatePlot plots samples from the device.\nfunc CreatePlot(dev scope.Device, width, height int, zas map[scope.ChanID]ZeroAndScale, cols map[scope.ChanID]color.RGBA) (Plot, error) {\n\tplot := Plot{image.NewRGBA(image.Rect(0, 0, width, height))}\n\terr := plot.DrawFromDevice(dev, zas, cols)\n\treturn plot, err\n}\n\n\/\/ PlotToPng creates a plot of the samples from the device\n\/\/ and saves it as PNG.\nfunc PlotToPng(dev scope.Device, width, height int, zas map[scope.ChanID]ZeroAndScale, cols map[scope.ChanID]color.RGBA, outputFile string) error {\n\tplot, err := CreatePlot(dev, width, height, zas, cols)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf, err := os.Create(outputFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tpng.Encode(f, plot)\n\treturn nil\n}\n\nfunc min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc max(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc abs(a int) int {\n\tif a < 0 {\n\t\treturn -a\n\t}\n\treturn a\n}\nbugfix: points slice filled from index 0\/\/ Copyright 2016 The goscope Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gui\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/png\"\n\t\"os\"\n\n\t\"github.com\/zagrodzki\/goscope\/scope\"\n)\n\ntype aggrPoint struct {\n\tsumY int\n\tsizeY int\n}\n\nfunc (p *aggrPoint) add(y int) {\n\tp.sumY += y\n\tp.sizeY++\n}\n\nfunc (p *aggrPoint) toPoint(x int) image.Point {\n\treturn image.Point{x, p.sumY \/ p.sizeY}\n}\n\n\/\/ ZeroAndScale represents the position of zero and the scale of the plot\ntype ZeroAndScale struct {\n\t\/\/ the position of Y=0 (0 <= Zero <= 1) given as\n\t\/\/ the fraction of the window height counting from the top\n\tZero float64\n\t\/\/ scale of the plot in sample units per pixel\n\tScale float64\n}\n\nfunc samplesToPoints(samples []scope.Sample, zeroAndScale ZeroAndScale, start, end image.Point) []image.Point {\n\tif len(samples) == 0 {\n\t\treturn nil\n\t}\n\n\tsampleMaxY := zeroAndScale.Zero * zeroAndScale.Scale\n\tsampleMinY := (zeroAndScale.Zero - 1) * zeroAndScale.Scale\n\tsampleWidthX := float64(len(samples) - 1)\n\tsampleWidthY := sampleMaxY - sampleMinY\n\n\tpixelStartX := float64(start.X)\n\tpixelEndY := float64(end.Y - 1)\n\tpixelWidthX := float64(end.X - start.X - 1)\n\tpixelWidthY := float64(end.Y - start.Y - 1)\n\tratioX := pixelWidthX \/ sampleWidthX\n\tratioY := pixelWidthY \/ sampleWidthY\n\n\tpoints := make([]image.Point, end.X-start.X)\n\tlastAggr := aggrPoint{}\n\tlastX := start.X\n\tpi := 0\n\tfor i, y := range samples {\n\t\tmapX := int(pixelStartX + float64(i)*ratioX)\n\t\tmapY := int(pixelEndY - float64(y-scope.Sample(sampleMinY))*ratioY)\n\t\tif lastX != mapX {\n\t\t\tpoints[pi] = lastAggr.toPoint(lastX)\n\t\t\tpi++\n\t\t\tlastX = mapX\n\t\t\tlastAggr = aggrPoint{}\n\t\t}\n\t\tlastAggr.add(mapY)\n\t}\n\tpoints[pi] = lastAggr.toPoint(lastX)\n\tpi++\n\n\treturn points[:pi]\n}\n\n\/\/ Plot represents the entire plotting area.\ntype Plot struct {\n\t*image.RGBA\n}\n\nvar (\n\tbgCache *image.RGBA\n\tbgColor color.RGBA\n)\n\nfunc background(r image.Rectangle, col color.RGBA) *image.RGBA {\n\timg := image.NewRGBA(r)\n\tpix := img.Pix\n\tfor i := 0; i < len(pix); i = i + 4 {\n\t\tpix[i] = col.R\n\t\tpix[i+1] = col.G\n\t\tpix[i+2] = col.B\n\t\tpix[i+3] = col.A\n\t}\n\treturn img\n}\n\n\/\/ Fill fills the plot with a background image of the same size.\nfunc (plot Plot) Fill(col color.RGBA) {\n\tif bgCache == nil || bgCache.Bounds() != plot.Bounds() || bgColor != col {\n\t\tbgCache = background(plot.Bounds(), col)\n\t\tbgColor = col\n\t}\n\tcopy(plot.Pix, bgCache.Pix)\n}\n\nfunc isInside(x, y int, start, end image.Point) bool {\n\treturn x >= start.X && x <= end.X && y >= start.Y && y <= end.Y\n}\n\n\/\/ DrawLine draws a straight line from pixel p1 to p2.\n\/\/ Only the line fragment inside the image rectangle defined by\n\/\/ starting (upper left) and ending (lower right) pixel is drawn.\nfunc (plot Plot) DrawLine(p1, p2 image.Point, start, end image.Point, col color.RGBA) {\n\tif p1.X == p2.X { \/\/ vertical line\n\t\tfor i := min(p1.Y, p2.Y); i <= max(p1.Y, p2.Y); i++ {\n\t\t\tplot.SetRGBA(p1.X, i, col)\n\t\t}\n\t\treturn\n\t}\n\n\t\/\/ Calculating the parameters of the equation\n\t\/\/ of the straight line (in the form y=a*x+b)\n\t\/\/ passing through p1 and p2.\n\n\t\/\/ slope of the line\n\ta := float64(p1.Y-p2.Y) \/ float64(p1.X-p2.X)\n\t\/\/ intercept of the line\n\tb := float64(p1.Y) - float64(p1.X)*a\n\n\t\/\/ To avoid visual \"gaps\" between the pixels we switch on,\n\t\/\/ we draw the line in one of two ways.\n\tif abs(p1.X-p2.X) >= abs(p1.Y-p2.Y) {\n\t\t\/\/ If the line is more horizontal than vertical,\n\t\t\/\/ for every pixel column between p1 and p2\n\t\t\/\/ we find and switch on the pixel closest to y=a*x+b\n\t\tfor i := min(p1.X, p2.X); i <= max(p1.X, p2.X); i++ {\n\t\t\ty := int(a*float64(i) + b)\n\t\t\tif isInside(i, y, start, end) {\n\t\t\t\tplot.SetRGBA(i, y, col)\n\t\t\t}\n\t\t}\n\t} else {\n\t\t\/\/ If the line is more vertical than horizontal,\n\t\t\/\/ for every pixel row between p1 and p2\n\t\t\/\/ we find and switch on the pixel closest to y=a*x+b\n\t\tfor i := min(p1.Y, p2.Y); i <= max(p1.Y, p2.Y); i++ {\n\t\t\tx := int((float64(i) - b) \/ a)\n\t\t\tif isInside(x, i, start, end) {\n\t\t\t\tplot.SetRGBA(x, i, col)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ DrawSamples draws samples in the image rectangle defined by\n\/\/ starting (upper left) and ending (lower right) pixel.\nfunc (plot Plot) DrawSamples(samples []scope.Sample, zeroAndScale ZeroAndScale, start, end image.Point, col color.RGBA) {\n\tpoints := samplesToPoints(samples, zeroAndScale, start, end)\n\tfor i := 1; i < len(points); i++ {\n\t\tplot.DrawLine(points[i-1], points[i], start, end, col)\n\t}\n}\n\n\/\/ DrawAll draws samples from all the channels in the plot.\nfunc (plot Plot) DrawAll(samples map[scope.ChanID][]scope.Sample, zas map[scope.ChanID]ZeroAndScale, cols map[scope.ChanID]color.RGBA) {\n\tplot.Fill(color.RGBA{255, 255, 255, 255})\n\tb := plot.Bounds()\n\tfor id, v := range samples {\n\t\tpar, exists := zas[id]\n\t\tif !exists {\n\t\t\tpar = ZeroAndScale{0.5, 2}\n\t\t}\n\t\tcol, exists := cols[id]\n\t\tif !exists {\n\t\t\tcol = color.RGBA{0, 0, 0, 255}\n\t\t}\n\t\tplot.DrawSamples(v, par, b.Min, b.Max, col)\n\t}\n}\n\n\/\/ DrawFromDevice draws samples from the device in the plot.\nfunc (plot Plot) DrawFromDevice(dev scope.Device, zas map[scope.ChanID]ZeroAndScale, cols map[scope.ChanID]color.RGBA) error {\n\tdata, stop, err := dev.StartSampling()\n\tdefer stop()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsamples := (<-data).Samples\n\tplot.DrawAll(samples, zas, cols)\n\treturn nil\n}\n\n\/\/ CreatePlot plots samples from the device.\nfunc CreatePlot(dev scope.Device, width, height int, zas map[scope.ChanID]ZeroAndScale, cols map[scope.ChanID]color.RGBA) (Plot, error) {\n\tplot := Plot{image.NewRGBA(image.Rect(0, 0, width, height))}\n\terr := plot.DrawFromDevice(dev, zas, cols)\n\treturn plot, err\n}\n\n\/\/ PlotToPng creates a plot of the samples from the device\n\/\/ and saves it as PNG.\nfunc PlotToPng(dev scope.Device, width, height int, zas map[scope.ChanID]ZeroAndScale, cols map[scope.ChanID]color.RGBA, outputFile string) error {\n\tplot, err := CreatePlot(dev, width, height, zas, cols)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf, err := os.Create(outputFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tpng.Encode(f, plot)\n\treturn nil\n}\n\nfunc min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc max(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc abs(a int) int {\n\tif a < 0 {\n\t\treturn -a\n\t}\n\treturn a\n}\n<|endoftext|>"} {"text":"package gui\n\nimport(\n \"glop\/gin\"\n \"gl\"\n)\n\ntype Point struct {\n X,Y int\n}\nfunc (p Point) Add(q Point) Point {\n return Point{\n X : p.X + q.X,\n Y : p.Y + q.Y,\n }\n}\nfunc (p Point) Inside(r Region) bool {\n if p.X < r.X { return false }\n if p.Y < r.Y { return false }\n if p.X > r.X + r.Dx { return false }\n if p.Y > r.Y + r.Dy { return false }\n return true\n}\ntype Dims struct {\n Dx,Dy int\n}\ntype Region struct {\n Point\n Dims\n}\nfunc (r Region) Add(p Point) Region {\n return Region{\n r.Point.Add(p),\n r.Dims,\n }\n}\n\n\/\/ Need a global stack of regions because opengl only handles pushing\/popping\n\/\/ the state of the enable bits for each clip plane, not the planes themselves\nvar clippers []Region\nfunc (r Region) setClipPlanes() {\n var eqs [][4]float64\n eqs = append(eqs, [4]float64{ 1, 0, 0, -float64(r.X)})\n eqs = append(eqs, [4]float64{-1, 0, 0, float64(r.X + r.Dx)})\n eqs = append(eqs, [4]float64{ 0, 1, 0, -float64(r.Y)})\n eqs = append(eqs, [4]float64{ 0,-1, 0, float64(r.Y + r.Dy)})\n gl.ClipPlane(gl.CLIP_PLANE0, &eqs[0][0])\n gl.ClipPlane(gl.CLIP_PLANE1, &eqs[1][0])\n gl.ClipPlane(gl.CLIP_PLANE2, &eqs[2][0])\n gl.ClipPlane(gl.CLIP_PLANE3, &eqs[3][0])\n}\nfunc (r Region) PushClipPlanes() {\n if len(clippers) == 0 {\n gl.Enable(gl.CLIP_PLANE0)\n gl.Enable(gl.CLIP_PLANE1)\n gl.Enable(gl.CLIP_PLANE2)\n gl.Enable(gl.CLIP_PLANE3)\n }\n r.setClipPlanes()\n clippers = append(clippers, r)\n}\nfunc (r Region) PopClipPlanes() {\n clippers = clippers[0 : len(clippers) - 1]\n if len(clippers) == 0 {\n gl.Disable(gl.CLIP_PLANE0)\n gl.Disable(gl.CLIP_PLANE1)\n gl.Disable(gl.CLIP_PLANE2)\n gl.Disable(gl.CLIP_PLANE3)\n } else {\n clippers[len(clippers) - 1].setClipPlanes()\n }\n}\n\n\n\/\/func (r Region) setViewport() {\n\/\/ gl.Viewport(r.Point.X, r.Point.Y, r.Dims.Dx, r.Dims.Dy)\n\/\/}\n\ntype Zone interface {\n \/\/ Returns the dimensions that this Widget would like available to\n \/\/ render itself. A Widget should only update the value it returns from\n \/\/ this method when its Think() method is called.\n Requested() Dims\n\n \/\/ Returns ex,ey, where ex and ey indicate whether this Widget is\n \/\/ capable of expanding along the X and Y axes, respectively.\n Expandable() (bool,bool)\n\n \/\/ Returns the region that this Widget used to render itself the last\n \/\/ time it was rendered. Should be completely contained within the\n \/\/ region that was passed to it on its last call to Render.\n Rendered() Region\n}\n\ntype EventGroup struct {\n gin.EventGroup\n Focus bool\n}\n\ntype Widget interface {\n Zone\n Think(int64)\n\n \/\/ Returns true if this widget or any of its children consumed the\n \/\/ event group\n Respond(*Gui,EventGroup) bool\n\n Draw(Region)\n}\ntype CoreWidget interface {\n DoThink(int64)\n\n \/\/ If take_focus is true, then the EventGroup will be consumed,\n \/\/ regardless of the value of consume\n DoRespond(EventGroup) (consume,take_focus bool)\n Zone\n\n Draw(Region)\n GetChildren() []Widget\n}\ntype EmbeddedWidget interface {\n Think(int64)\n Respond(*Gui, EventGroup) (consume bool)\n}\ntype BasicWidget struct {\n CoreWidget\n}\nfunc (w *BasicWidget) Think(t int64) {\n kids := w.GetChildren()\n for i := range kids {\n kids[i].Think(t)\n }\n w.DoThink(t)\n}\nfunc (w *BasicWidget) Respond(gui *Gui, event_group EventGroup) bool {\n cursor := event_group.Events[0].Key.Cursor()\n if cursor != nil {\n var p Point\n p.X, p.Y = cursor.Point()\n if !p.Inside(w.Rendered()) {\n return false\n }\n }\n consume,take_focus := w.DoRespond(event_group)\n if take_focus {\n gui.TakeFocus(w)\n }\n if take_focus || consume { return true }\n kids := w.GetChildren()\n for i := range kids {\n if kids[i].Respond(gui, event_group) { return true }\n }\n return false\n}\n\ntype BasicZone struct {\n Request_dims Dims\n Render_region Region\n Ex,Ey bool\n}\n\nfunc (bz *BasicZone) Requested() Dims {\n return bz.Request_dims\n}\nfunc (bz *BasicZone) Rendered() Region {\n return bz.Render_region\n}\nfunc (bz *BasicZone) Expandable() (bool,bool) {\n return bz.Ex, bz.Ey\n}\n\ntype NonThinker struct {}\nfunc (n NonThinker) DoThink(int64) {}\n\ntype NonResponder struct {}\nfunc (n NonResponder) DoRespond(EventGroup) (bool,bool) {\n return false,false\n}\n\ntype Childless struct {}\nfunc (c Childless) GetChildren() []Widget { return nil }\n\ntype StandardParent struct {\n Children []Widget\n}\nfunc (s *StandardParent) GetChildren() []Widget {\n return s.Children\n}\nfunc (s *StandardParent) AddChild(w Widget) {\n s.Children = append(s.Children, w)\n}\nfunc (s *StandardParent) RemoveChild(w Widget) {\n for i := range s.Children {\n if s.Children[i] == w {\n s.Children[i] = s.Children[len(s.Children)-1]\n s.Children = s.Children[0 : len(s.Children)-1]\n return\n }\n }\n}\n\n\ntype rootWidget struct {\n EmbeddedWidget\n StandardParent\n BasicZone\n NonResponder\n NonThinker\n}\n\nfunc (r *rootWidget) Draw(region Region) {\n r.Render_region = region\n for i := range r.Children {\n r.Children[i].Draw(region)\n }\n}\n\ntype Gui struct {\n root rootWidget\n\n \/\/ Stack of widgets that have focus\n focus []Widget\n}\n\nfunc Make(dispatcher gin.EventDispatcher, dims Dims) *Gui {\n var g Gui\n g.root.EmbeddedWidget = &BasicWidget{ CoreWidget : &g.root }\n g.root.Request_dims = dims\n g.root.Render_region.Dims = dims\n dispatcher.RegisterEventListener(&g)\n return &g\n}\n\nfunc (g *Gui) Draw() {\n gl.MatrixMode(gl.PROJECTION)\n gl.LoadIdentity();\n region := g.root.Render_region\n gl.Ortho(float64(region.X), float64(region.X + region.Dx), float64(region.Y), float64(region.Y + region.Dy), 1000, -1000)\n gl.ClearColor(0, 0, 0, 1)\n gl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)\n gl.MatrixMode(gl.MODELVIEW)\n gl.LoadIdentity();\n g.root.Draw(region)\n}\n\n\/\/ TODO: Shouldn't be exposing this\nfunc (g *Gui) Think(t int64) {\n g.root.Think(t)\n}\n\n\/\/ TODO: Shouldn't be exposing this\nfunc (g *Gui) HandleEventGroup(gin_group gin.EventGroup) {\n event_group := EventGroup{gin_group, false}\n if len(g.focus) > 0 {\n event_group.Focus = true\n consume := g.focus[len(g.focus)-1].Respond(g, event_group)\n if consume { return }\n event_group.Focus = false\n }\n g.root.Respond(g, event_group)\n}\n\nfunc (g *Gui) AddChild(w Widget) {\n g.root.AddChild(w)\n}\n\nfunc (g *Gui) RemoveChild(w Widget) {\n g.root.RemoveChild(w)\n}\n\nfunc (g *Gui) TakeFocus(w Widget) {\n if len(g.focus) == 0 {\n g.focus = append(g.focus, nil)\n }\n g.focus[len(g.focus)-1] = w\n}\nCreated a CollapsableZonepackage gui\n\nimport(\n \"glop\/gin\"\n \"gl\"\n)\n\ntype Point struct {\n X,Y int\n}\nfunc (p Point) Add(q Point) Point {\n return Point{\n X : p.X + q.X,\n Y : p.Y + q.Y,\n }\n}\nfunc (p Point) Inside(r Region) bool {\n if p.X < r.X { return false }\n if p.Y < r.Y { return false }\n if p.X > r.X + r.Dx { return false }\n if p.Y > r.Y + r.Dy { return false }\n return true\n}\ntype Dims struct {\n Dx,Dy int\n}\ntype Region struct {\n Point\n Dims\n}\nfunc (r Region) Add(p Point) Region {\n return Region{\n r.Point.Add(p),\n r.Dims,\n }\n}\n\n\/\/ Need a global stack of regions because opengl only handles pushing\/popping\n\/\/ the state of the enable bits for each clip plane, not the planes themselves\nvar clippers []Region\nfunc (r Region) setClipPlanes() {\n var eqs [][4]float64\n eqs = append(eqs, [4]float64{ 1, 0, 0, -float64(r.X)})\n eqs = append(eqs, [4]float64{-1, 0, 0, float64(r.X + r.Dx)})\n eqs = append(eqs, [4]float64{ 0, 1, 0, -float64(r.Y)})\n eqs = append(eqs, [4]float64{ 0,-1, 0, float64(r.Y + r.Dy)})\n gl.ClipPlane(gl.CLIP_PLANE0, &eqs[0][0])\n gl.ClipPlane(gl.CLIP_PLANE1, &eqs[1][0])\n gl.ClipPlane(gl.CLIP_PLANE2, &eqs[2][0])\n gl.ClipPlane(gl.CLIP_PLANE3, &eqs[3][0])\n}\nfunc (r Region) PushClipPlanes() {\n if len(clippers) == 0 {\n gl.Enable(gl.CLIP_PLANE0)\n gl.Enable(gl.CLIP_PLANE1)\n gl.Enable(gl.CLIP_PLANE2)\n gl.Enable(gl.CLIP_PLANE3)\n }\n r.setClipPlanes()\n clippers = append(clippers, r)\n}\nfunc (r Region) PopClipPlanes() {\n clippers = clippers[0 : len(clippers) - 1]\n if len(clippers) == 0 {\n gl.Disable(gl.CLIP_PLANE0)\n gl.Disable(gl.CLIP_PLANE1)\n gl.Disable(gl.CLIP_PLANE2)\n gl.Disable(gl.CLIP_PLANE3)\n } else {\n clippers[len(clippers) - 1].setClipPlanes()\n }\n}\n\n\n\/\/func (r Region) setViewport() {\n\/\/ gl.Viewport(r.Point.X, r.Point.Y, r.Dims.Dx, r.Dims.Dy)\n\/\/}\n\ntype Zone interface {\n \/\/ Returns the dimensions that this Widget would like available to\n \/\/ render itself. A Widget should only update the value it returns from\n \/\/ this method when its Think() method is called.\n Requested() Dims\n\n \/\/ Returns ex,ey, where ex and ey indicate whether this Widget is\n \/\/ capable of expanding along the X and Y axes, respectively.\n Expandable() (bool,bool)\n\n \/\/ Returns the region that this Widget used to render itself the last\n \/\/ time it was rendered. Should be completely contained within the\n \/\/ region that was passed to it on its last call to Render.\n Rendered() Region\n}\n\ntype EventGroup struct {\n gin.EventGroup\n Focus bool\n}\n\ntype Widget interface {\n Zone\n Think(int64)\n\n \/\/ Returns true if this widget or any of its children consumed the\n \/\/ event group\n Respond(*Gui,EventGroup) bool\n\n Draw(Region)\n}\ntype CoreWidget interface {\n DoThink(int64)\n\n \/\/ If take_focus is true, then the EventGroup will be consumed,\n \/\/ regardless of the value of consume\n DoRespond(EventGroup) (consume,take_focus bool)\n Zone\n\n Draw(Region)\n GetChildren() []Widget\n}\ntype EmbeddedWidget interface {\n Think(int64)\n Respond(*Gui, EventGroup) (consume bool)\n}\ntype BasicWidget struct {\n CoreWidget\n}\nfunc (w *BasicWidget) Think(t int64) {\n kids := w.GetChildren()\n for i := range kids {\n kids[i].Think(t)\n }\n w.DoThink(t)\n}\nfunc (w *BasicWidget) Respond(gui *Gui, event_group EventGroup) bool {\n cursor := event_group.Events[0].Key.Cursor()\n if cursor != nil {\n var p Point\n p.X, p.Y = cursor.Point()\n if !p.Inside(w.Rendered()) {\n return false\n }\n }\n consume,take_focus := w.DoRespond(event_group)\n if take_focus {\n gui.TakeFocus(w)\n }\n if take_focus || consume { return true }\n kids := w.GetChildren()\n for i := range kids {\n if kids[i].Respond(gui, event_group) { return true }\n }\n return false\n}\n\ntype BasicZone struct {\n Request_dims Dims\n Render_region Region\n Ex,Ey bool\n}\n\nfunc (bz BasicZone) Requested() Dims {\n return bz.Request_dims\n}\nfunc (bz BasicZone) Rendered() Region {\n return bz.Render_region\n}\nfunc (bz BasicZone) Expandable() (bool,bool) {\n return bz.Ex, bz.Ey\n}\n\ntype CollapsableZone struct {\n Collapsed bool\n Request_dims Dims\n Render_region Region\n Ex,Ey bool\n}\nfunc (cz CollapsableZone) Requested() Dims {\n if cz.Collapsed {\n return Dims{}\n }\n return cz.Request_dims\n}\nfunc (cz CollapsableZone) Rendered() Region {\n if cz.Collapsed {\n return Region{ Point : cz.Render_region.Point }\n }\n return cz.Render_region\n}\nfunc (cz *CollapsableZone) Expandable() (bool,bool) {\n if cz.Collapsed {\n return false, false\n }\n return cz.Ex, cz.Ey\n}\n\ntype NonThinker struct {}\nfunc (n NonThinker) DoThink(int64) {}\n\ntype NonResponder struct {}\nfunc (n NonResponder) DoRespond(EventGroup) (bool,bool) {\n return false,false\n}\n\ntype Childless struct {}\nfunc (c Childless) GetChildren() []Widget { return nil }\n\ntype StandardParent struct {\n Children []Widget\n}\nfunc (s *StandardParent) GetChildren() []Widget {\n return s.Children\n}\nfunc (s *StandardParent) AddChild(w Widget) {\n s.Children = append(s.Children, w)\n}\nfunc (s *StandardParent) RemoveChild(w Widget) {\n for i := range s.Children {\n if s.Children[i] == w {\n s.Children[i] = s.Children[len(s.Children)-1]\n s.Children = s.Children[0 : len(s.Children)-1]\n return\n }\n }\n}\n\n\ntype rootWidget struct {\n EmbeddedWidget\n StandardParent\n BasicZone\n NonResponder\n NonThinker\n}\n\nfunc (r *rootWidget) Draw(region Region) {\n r.Render_region = region\n for i := range r.Children {\n r.Children[i].Draw(region)\n }\n}\n\ntype Gui struct {\n root rootWidget\n\n \/\/ Stack of widgets that have focus\n focus []Widget\n}\n\nfunc Make(dispatcher gin.EventDispatcher, dims Dims) *Gui {\n var g Gui\n g.root.EmbeddedWidget = &BasicWidget{ CoreWidget : &g.root }\n g.root.Request_dims = dims\n g.root.Render_region.Dims = dims\n dispatcher.RegisterEventListener(&g)\n return &g\n}\n\nfunc (g *Gui) Draw() {\n gl.MatrixMode(gl.PROJECTION)\n gl.LoadIdentity();\n region := g.root.Render_region\n gl.Ortho(float64(region.X), float64(region.X + region.Dx), float64(region.Y), float64(region.Y + region.Dy), 1000, -1000)\n gl.ClearColor(0, 0, 0, 1)\n gl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)\n gl.MatrixMode(gl.MODELVIEW)\n gl.LoadIdentity();\n g.root.Draw(region)\n}\n\n\/\/ TODO: Shouldn't be exposing this\nfunc (g *Gui) Think(t int64) {\n g.root.Think(t)\n}\n\n\/\/ TODO: Shouldn't be exposing this\nfunc (g *Gui) HandleEventGroup(gin_group gin.EventGroup) {\n event_group := EventGroup{gin_group, false}\n if len(g.focus) > 0 {\n event_group.Focus = true\n consume := g.focus[len(g.focus)-1].Respond(g, event_group)\n if consume { return }\n event_group.Focus = false\n }\n g.root.Respond(g, event_group)\n}\n\nfunc (g *Gui) AddChild(w Widget) {\n g.root.AddChild(w)\n}\n\nfunc (g *Gui) RemoveChild(w Widget) {\n g.root.RemoveChild(w)\n}\n\nfunc (g *Gui) TakeFocus(w Widget) {\n if len(g.focus) == 0 {\n g.focus = append(g.focus, nil)\n }\n g.focus[len(g.focus)-1] = w\n}\n<|endoftext|>"} {"text":"package logging\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"sync\"\n\t\"text\/template\"\n\t\"time\"\n)\n\nconst (\n\tDefaultTimeLayout = \"2006-01-02 15:04:05\"\n\tDefaultFormat = \"[{{.TimeString}}] {{.Level}} {{.Message}}\\n\"\n\tDefaultBufSize = 1024\n)\n\ntype Handler interface {\n\tSetBufSize(int)\n\tSetLevel(LogLevel)\n\tSetLevelString(string)\n\tSetLevelRange(LogLevel, LogLevel)\n\tSetLevelRangeString(string, string)\n\tSetTimeLayout(string)\n\tSetFormat(string) error\n\tSetFilter(func(*Record) bool)\n\tEmit(Record)\n\tPanic(bool)\n}\n\ntype Record struct {\n\tTime time.Time\n\tTimeString string\n\tLevel LogLevel\n\tMessage string\n}\n\ntype BaseHandler struct {\n\tMutex sync.Mutex\n\tWriter io.WriteCloser\n\tLevel LogLevel\n\tLRange *LevelRange\n\tTimeLayout string\n\tTmpl *template.Template\n\tBuffer chan *Record\n\tBufSize int\n\tFilter func(*Record) bool\n\tBefore func(io.ReadWriter)\n\tAfter func(int64)\n\tGotError func(error)\n}\n\nfunc NewBaseHandler(out io.WriteCloser, level LogLevel, layout, format string) (*BaseHandler, error) {\n\th := &BaseHandler{\n\t\tWriter: out,\n\t\tLevel: level,\n\t\tTimeLayout: layout,\n\t}\n\tif err := h.SetFormat(format); err != nil {\n\t\treturn nil, err\n\t}\n\th.Panic(false)\n\th.BufSize = DefaultBufSize\n\th.Buffer = make(chan *Record, h.BufSize)\n\tgo h.WriteRecord()\n\treturn h, nil\n}\n\nfunc (h *BaseHandler) SetBufSize(size int) {\n\th.BufSize = size\n\tclose(h.Buffer)\n}\n\nfunc (h *BaseHandler) SetLevel(level LogLevel) {\n\th.Mutex.Lock()\n\tdefer h.Mutex.Unlock()\n\th.Level = level\n}\n\nfunc (h *BaseHandler) SetLevelString(s string) {\n\th.SetLevel(StringToLogLevel(s))\n}\n\nfunc (h *BaseHandler) SetLevelRange(min_level, max_level LogLevel) {\n\th.Mutex.Lock()\n\tdefer h.Mutex.Unlock()\n\th.LRange = &LevelRange{min_level, max_level}\n}\n\nfunc (h *BaseHandler) SetLevelRangeString(smin, smax string) {\n\th.SetLevelRange(StringToLogLevel(smin), StringToLogLevel(smax))\n}\n\nfunc (h *BaseHandler) SetTimeLayout(layout string) {\n\th.Mutex.Lock()\n\tdefer h.Mutex.Unlock()\n\th.TimeLayout = layout\n}\n\nfunc (h *BaseHandler) SetFormat(format string) error {\n\th.Mutex.Lock()\n\tdefer h.Mutex.Unlock()\n\ttmpl, err := template.New(\"tmpl\").Parse(format)\n\tif err != nil {\n\t\treturn err\n\t}\n\th.Tmpl = tmpl\n\treturn nil\n}\n\nfunc (h *BaseHandler) SetFilter(f func(*Record) bool) {\n\th.Mutex.Lock()\n\tdefer h.Mutex.Unlock()\n\th.Filter = f\n}\n\nfunc (h *BaseHandler) Emit(rd Record) {\n\tif h.LRange != nil {\n\t\tif !h.LRange.Contain(rd.Level) {\n\t\t\treturn\n\t\t}\n\t} else if h.Level > rd.Level {\n\t\treturn\n\t}\n\th.Buffer <- &rd\n}\n\nfunc (h *BaseHandler) PanicError(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (h *BaseHandler) IgnoreError(error) {\n}\n\nfunc (h *BaseHandler) Panic(b bool) {\n\tif b {\n\t\th.GotError = h.PanicError\n\t} else {\n\t\th.GotError = h.IgnoreError\n\t}\n}\n\nfunc (h *BaseHandler) WriteRecord() {\n\trd := &Record{}\n\tbuf := bytes.NewBuffer(nil)\n\tfor {\n\t\trd = <-h.Buffer\n\t\tif rd == nil {\n\t\t\th.Buffer = make(chan *Record, h.BufSize)\n\t\t\tgo h.WriteRecord()\n\t\t\tbreak\n\t\t}\n\t\tif h.Filter != nil && h.Filter(rd) {\n\t\t\tcontinue\n\t\t}\n\t\tif h.Writer == nil {\n\t\t\tcontinue\n\t\t}\n\t\tbuf.Reset()\n\t\trd.TimeString = rd.Time.Format(h.TimeLayout)\n\t\tif err := h.Tmpl.Execute(buf, rd); err != nil {\n\t\t\th.GotError(err)\n\t\t\tcontinue\n\t\t}\n\t\tif h.Before != nil {\n\t\t\th.Before(buf)\n\t\t}\n\t\tn, err := io.Copy(h.Writer, buf)\n\t\tif err != nil {\n\t\t\th.GotError(err)\n\t\t}\n\t\tif h.After != nil {\n\t\t\th.After(int64(n))\n\t\t}\n\t}\n}\nupdate handler set buffer sizepackage logging\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"sync\"\n\t\"text\/template\"\n\t\"time\"\n)\n\nconst (\n\tDefaultTimeLayout = \"2006-01-02 15:04:05\"\n\tDefaultFormat = \"[{{.TimeString}}] {{.Level}} {{.Message}}\\n\"\n\tDefaultBufSize = 1024\n)\n\ntype Handler interface {\n\tSetBufSize(int)\n\tSetLevel(LogLevel)\n\tSetLevelString(string)\n\tSetLevelRange(LogLevel, LogLevel)\n\tSetLevelRangeString(string, string)\n\tSetTimeLayout(string)\n\tSetFormat(string) error\n\tSetFilter(func(*Record) bool)\n\tEmit(Record)\n\tPanic(bool)\n}\n\ntype Record struct {\n\tTime time.Time\n\tTimeString string\n\tLevel LogLevel\n\tMessage string\n}\n\ntype BaseHandler struct {\n\tMutex sync.Mutex\n\tWriter io.WriteCloser\n\tLevel LogLevel\n\tLRange *LevelRange\n\tTimeLayout string\n\tTmpl *template.Template\n\tBuffer chan *Record\n\tBufSize int\n\tFilter func(*Record) bool\n\tBefore func(io.ReadWriter)\n\tAfter func(int64)\n\tGotError func(error)\n}\n\nfunc NewBaseHandler(out io.WriteCloser, level LogLevel, layout, format string) (*BaseHandler, error) {\n\th := &BaseHandler{\n\t\tWriter: out,\n\t\tLevel: level,\n\t\tTimeLayout: layout,\n\t}\n\tif err := h.SetFormat(format); err != nil {\n\t\treturn nil, err\n\t}\n\th.Panic(false)\n\th.BufSize = DefaultBufSize\n\th.Buffer = make(chan *Record, h.BufSize)\n\tgo h.WriteRecord()\n\treturn h, nil\n}\n\nfunc (h *BaseHandler) SetBufSize(size int) {\n\th.Mutex.Lock()\n\tdefer h.Mutex.Unlock()\n\th.BufSize = size\n\th.Buffer <- nil\n}\n\nfunc (h *BaseHandler) SetLevel(level LogLevel) {\n\th.Mutex.Lock()\n\tdefer h.Mutex.Unlock()\n\th.Level = level\n}\n\nfunc (h *BaseHandler) SetLevelString(s string) {\n\th.SetLevel(StringToLogLevel(s))\n}\n\nfunc (h *BaseHandler) SetLevelRange(min_level, max_level LogLevel) {\n\th.Mutex.Lock()\n\tdefer h.Mutex.Unlock()\n\th.LRange = &LevelRange{min_level, max_level}\n}\n\nfunc (h *BaseHandler) SetLevelRangeString(smin, smax string) {\n\th.SetLevelRange(StringToLogLevel(smin), StringToLogLevel(smax))\n}\n\nfunc (h *BaseHandler) SetTimeLayout(layout string) {\n\th.Mutex.Lock()\n\tdefer h.Mutex.Unlock()\n\th.TimeLayout = layout\n}\n\nfunc (h *BaseHandler) SetFormat(format string) error {\n\th.Mutex.Lock()\n\tdefer h.Mutex.Unlock()\n\ttmpl, err := template.New(\"tmpl\").Parse(format)\n\tif err != nil {\n\t\treturn err\n\t}\n\th.Tmpl = tmpl\n\treturn nil\n}\n\nfunc (h *BaseHandler) SetFilter(f func(*Record) bool) {\n\th.Mutex.Lock()\n\tdefer h.Mutex.Unlock()\n\th.Filter = f\n}\n\nfunc (h *BaseHandler) Emit(rd Record) {\n\tif h.LRange != nil {\n\t\tif !h.LRange.Contain(rd.Level) {\n\t\t\treturn\n\t\t}\n\t} else if h.Level > rd.Level {\n\t\treturn\n\t}\n\th.Mutex.Lock()\n\tdefer h.Mutex.Unlock()\n\th.Buffer <- &rd\n}\n\nfunc (h *BaseHandler) PanicError(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (h *BaseHandler) IgnoreError(error) {\n}\n\nfunc (h *BaseHandler) Panic(b bool) {\n\tif b {\n\t\th.GotError = h.PanicError\n\t} else {\n\t\th.GotError = h.IgnoreError\n\t}\n}\n\nfunc (h *BaseHandler) upgrade_buffer() {\n\th.Mutex.Lock()\n\tdefer h.Mutex.Unlock()\n\tbuffer := make(chan *Record, h.BufSize)\n\tfor {\n\t\tremain, ok := <-h.Buffer\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tbuffer <- remain\n\t}\n\tclose(h.Buffer)\n\th.Buffer = buffer\n}\n\nfunc (h *BaseHandler) WriteRecord() {\n\trd := &Record{}\n\tbuf := bytes.NewBuffer(nil)\n\tfor {\n\t\trd = <-h.Buffer\n\t\tif rd == nil {\n\t\t\th.upgrade_buffer()\n\t\t\tgo h.WriteRecord()\n\t\t\tbreak\n\t\t}\n\t\tif h.Filter != nil && h.Filter(rd) {\n\t\t\tcontinue\n\t\t}\n\t\tif h.Writer == nil {\n\t\t\tcontinue\n\t\t}\n\t\tbuf.Reset()\n\t\trd.TimeString = rd.Time.Format(h.TimeLayout)\n\t\tif err := h.Tmpl.Execute(buf, rd); err != nil {\n\t\t\th.GotError(err)\n\t\t\tcontinue\n\t\t}\n\t\tif h.Before != nil {\n\t\t\th.Before(buf)\n\t\t}\n\t\tn, err := io.Copy(h.Writer, buf)\n\t\tif err != nil {\n\t\t\th.GotError(err)\n\t\t}\n\t\tif h.After != nil {\n\t\t\th.After(int64(n))\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package compress\n\nimport (\n\t\"compress\/gzip\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/volatile\/core\"\n)\n\n\/\/ Use tells the core to use this handler.\nfunc Use() {\n\tcore.Use(func(c *core.Context) {\n\t\tif strings.Contains(c.Request.Header.Get(\"Accept-Encoding\"), \"gzip\") && len(c.Request.Header.Get(\"Sec-WebSocket-Key\")) == 0 {\n\t\t\tc.ResponseWriter.Header().Set(\"Content-Encoding\", \"gzip\")\n\n\t\t\tgzw := gzip.NewWriter(c.ResponseWriter)\n\t\t\tdefer gzw.Close()\n\n\t\t\t\/\/ Pass a new ResponseWriter\n\t\t\tc.NextWriter(core.ResponseWriterBinder{\n\t\t\t\tWriter: gzw,\n\t\t\t\tResponseWriter: c.ResponseWriter,\n\t\t\t\tBeforeWrite: func(b []byte) {\n\t\t\t\t\tif len(c.ResponseWriter.Header().Get(\"Content-Type\")) == 0 {\n\t\t\t\t\t\tc.ResponseWriter.Header().Set(\"Content-Type\", http.DetectContentType(b))\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t})\n\t\t} else {\n\t\t\tc.Next()\n\t\t}\n\t})\n}\nUse coreutil.SetContentType functionpackage compress\n\nimport (\n\t\"compress\/gzip\"\n\t\"strings\"\n\n\t\"github.com\/volatile\/core\"\n\t\"github.com\/volatile\/core\/coreutil\"\n)\n\n\/\/ Use tells the core to use this handler.\nfunc Use() {\n\tcore.Use(func(c *core.Context) {\n\t\tif strings.Contains(c.Request.Header.Get(\"Accept-Encoding\"), \"gzip\") && len(c.Request.Header.Get(\"Sec-WebSocket-Key\")) == 0 {\n\t\t\tc.ResponseWriter.Header().Set(\"Content-Encoding\", \"gzip\")\n\n\t\t\tgzw := gzip.NewWriter(c.ResponseWriter)\n\t\t\tdefer gzw.Close()\n\n\t\t\t\/\/ Pass a new ResponseWriter\n\t\t\tc.NextWriter(core.ResponseWriterBinder{\n\t\t\t\tWriter: gzw,\n\t\t\t\tResponseWriter: c.ResponseWriter,\n\t\t\t\tBeforeWrite: func(b []byte) {\n\t\t\t\t\tcoreutil.SetContentType(c.ResponseWriter, b)\n\t\t\t\t},\n\t\t\t})\n\t\t} else {\n\t\t\tc.Next()\n\t\t}\n\t})\n}\n<|endoftext|>"} {"text":"\/\/ PoolHandler is handler which may be useful at the projects where\n\/\/ websocket connections are divided into a groups (pools) with access\n\/\/ to common data\n\/\/\n\/\/ Author: Pushkin Ivan \npackage pwshandler\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"golang.org\/x\/net\/websocket\"\n)\n\n\/\/ Environment is a common data for ws connections in one pool(group)\ntype Environment interface{}\n\n\/\/ PoolManager is common interface for a structures which merge\n\/\/ websocket connections in a pools (groups) with access to common\n\/\/ data\ntype PoolManager interface {\n\t\/\/ AddConn creates connection to a pool and returns environment\n\t\/\/ data\n\tAddConn(ws *websocket.Conn) (Environment, error)\n\t\/\/ DelConn removes passed connection from a pool if it exists\n\t\/\/ in pool\n\tDelConn(ws *websocket.Conn) error\n}\n\n\/\/ ConnManager contains methods for processing websocket connections\n\/\/ with passed common group data\ntype ConnManager interface {\n\t\/\/ Handle handles connections using passed common environment data\n\tHandle(ws *websocket.Conn, data Environment) error\n\t\/\/ HandleError processes an errors\n\tHandleError(ws *websocket.Conn, err error)\n}\n\n\/\/ RequestVerifier verifies requests. It has to verify a request data\n\/\/ such a passed hashes, certificates, remote addr, token, passed headers\n\/\/ or something else\ntype RequestVerifier interface {\n\tVerify(ws *websocket.Conn) error\n}\n\nconst _ERR_FORMAT = \"%s: connection handling error: %s\"\n\n\/\/ PoolHandler returns WS handler which receives websocket requests and\n\/\/ merges connection goroutines in a pools (groups) with common data.\n\/\/ poolMgr is a storage of groups and connections. poolMgr divides handled\n\/\/ connections into groups, stores common group data and passes common\n\/\/ data to goroutines for processing ws connections. connMgr contains\n\/\/ handler for processing of ws connection. connMgr gets common group and\n\/\/ ws connection. verifier must verify connections. If was passed nil\n\/\/ instead verifier connections will not verified\nfunc PoolHandler(poolMgr PoolManager, connMgr ConnManager,\n\tverifier RequestVerifier) http.Handler {\n\n\treturn websocket.Handler(func(ws *websocket.Conn) {\n\t\tvar err error\n\n\t\t\/\/ Verify request\n\t\tif verifier != nil {\n\t\t\tif err = verifier.Verify(ws); err != nil {\n\t\t\t\tconnMgr.HandleError(ws,\n\t\t\t\t\tfmt.Errorf(_ERR_FORMAT, ws.Request().RemoteAddr, err))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Create connection to a pool and take envitonment data\n\t\tvar data Environment\n\t\tif data, err = poolMgr.AddConn(ws); err != nil {\n\t\t\tconnMgr.HandleError(ws,\n\t\t\t\tfmt.Errorf(_ERR_FORMAT, ws.Request().RemoteAddr, err))\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Handle connection\n\t\tif err = connMgr.Handle(ws, data); err != nil {\n\t\t\tconnMgr.HandleError(ws,\n\t\t\t\tfmt.Errorf(_ERR_FORMAT, ws.Request().RemoteAddr, err))\n\t\t}\n\n\t\t\/\/ Delete connection from a pool (group)\n\t\tif err = poolMgr.DelConn(ws); err != nil {\n\t\t\tconnMgr.HandleError(ws,\n\t\t\t\tfmt.Errorf(_ERR_FORMAT, ws.Request().RemoteAddr, err))\n\t\t}\n\t})\n}\nsmall fix\/\/ PoolHandler is handler which may be useful at the projects where\n\/\/ websocket connections are divided into a groups (pools) with access\n\/\/ to common data\npackage pwshandler\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"golang.org\/x\/net\/websocket\"\n)\n\n\/\/ Environment is a common data for ws connections in one pool(group)\ntype Environment interface{}\n\n\/\/ PoolManager is common interface for a structures which merge\n\/\/ websocket connections in a pools (groups) with access to common\n\/\/ data\ntype PoolManager interface {\n\t\/\/ AddConn creates connection to a pool and returns environment\n\t\/\/ data\n\tAddConn(ws *websocket.Conn) (Environment, error)\n\t\/\/ DelConn removes passed connection from a pool if it exists\n\t\/\/ in pool\n\tDelConn(ws *websocket.Conn) error\n}\n\n\/\/ ConnManager contains methods for processing websocket connections\n\/\/ with passed common group data\ntype ConnManager interface {\n\t\/\/ Handle handles connections using passed common environment data\n\tHandle(ws *websocket.Conn, data Environment) error\n\t\/\/ HandleError processes an errors\n\tHandleError(ws *websocket.Conn, err error)\n}\n\n\/\/ RequestVerifier verifies requests. It has to verify a request data\n\/\/ such a passed hashes, certificates, remote addr, token, passed headers\n\/\/ or something else\ntype RequestVerifier interface {\n\tVerify(ws *websocket.Conn) error\n}\n\nconst _ERR_FORMAT = \"%s: connection handling error: %s\"\n\n\/\/ PoolHandler returns WS handler which receives websocket requests and\n\/\/ merges connection goroutines in a pools (groups) with common data.\n\/\/ poolMgr is a storage of groups and connections. poolMgr divides handled\n\/\/ connections into groups, stores common group data and passes common\n\/\/ data to goroutines for processing ws connections. connMgr contains\n\/\/ handler for processing of ws connection. connMgr gets common group and\n\/\/ ws connection. verifier must verify connections. If was passed nil\n\/\/ instead verifier connections will not verified\nfunc PoolHandler(poolMgr PoolManager, connMgr ConnManager,\n\tverifier RequestVerifier) http.Handler {\n\n\treturn websocket.Handler(func(ws *websocket.Conn) {\n\t\tvar err error\n\n\t\t\/\/ Verify request\n\t\tif verifier != nil {\n\t\t\tif err = verifier.Verify(ws); err != nil {\n\t\t\t\tconnMgr.HandleError(ws,\n\t\t\t\t\tfmt.Errorf(_ERR_FORMAT, ws.Request().RemoteAddr, err))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Create connection to a pool and take envitonment data\n\t\tvar data Environment\n\t\tif data, err = poolMgr.AddConn(ws); err != nil {\n\t\t\tconnMgr.HandleError(ws,\n\t\t\t\tfmt.Errorf(_ERR_FORMAT, ws.Request().RemoteAddr, err))\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Handle connection\n\t\tif err = connMgr.Handle(ws, data); err != nil {\n\t\t\tconnMgr.HandleError(ws,\n\t\t\t\tfmt.Errorf(_ERR_FORMAT, ws.Request().RemoteAddr, err))\n\t\t}\n\n\t\t\/\/ Delete connection from a pool (group)\n\t\tif err = poolMgr.DelConn(ws); err != nil {\n\t\t\tconnMgr.HandleError(ws,\n\t\t\t\tfmt.Errorf(_ERR_FORMAT, ws.Request().RemoteAddr, err))\n\t\t}\n\t})\n}\n<|endoftext|>"} {"text":"package log\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/volatile\/core\"\n\t\"github.com\/volatile\/core\/coreutil\"\n\t\"github.com\/whitedevops\/colors\"\n)\n\n\/\/ Use tells the core to use this handler.\nfunc Use() {\n\tcore.Use(func(c *core.Context) {\n\t\tstart := time.Now()\n\t\t\/\/ Keep original request path in case of http.StripPrefix.\n\t\tpath := c.Request.URL.Path\n\n\t\tc.Next()\n\n\t\tlog.Printf(colors.ResetAll+\" %s %s %s %s\", fmtDuration(start), fmtStatus(c), fmtMethod(c), fmtPath(path))\n\t})\n}\n\nfunc fmtDuration(start time.Time) string {\n\treturn fmt.Sprintf(\"%s%s%13s%s\", colors.ResetAll, colors.ResetAll+colors.Dim, time.Since(start), colors.ResetAll)\n}\n\nfunc fmtStatus(c *core.Context) string {\n\tcode := coreutil.ResponseStatus(c.ResponseWriter)\n\n\tcolor := colors.White\n\n\tswitch {\n\tcase code >= 200 && code <= 299:\n\t\tcolor += colors.BackgroundGreen\n\tcase code >= 300 && code <= 399:\n\t\tcolor += colors.BackgroundCyan\n\tcase code >= 400 && code <= 499:\n\t\tcolor += colors.BackgroundYellow\n\tdefault:\n\t\tcolor += colors.BackgroundRed\n\t}\n\n\treturn fmt.Sprintf(\"%s%s %3d %s\", colors.ResetAll, color, code, colors.ResetAll)\n}\n\nfunc fmtMethod(c *core.Context) string {\n\tvar color string\n\n\tswitch c.Request.Method {\n\tcase \"GET\":\n\t\tcolor += colors.Green\n\tcase \"POST\":\n\t\tcolor += colors.Cyan\n\tcase \"PUT\", \"PATCH\":\n\t\tcolor += colors.Blue\n\tcase \"DELETE\":\n\t\tcolor += colors.Red\n\t}\n\n\treturn fmt.Sprintf(\"%s%s%s%s\", colors.ResetAll, color, c.Request.Method, colors.ResetAll)\n}\n\nfunc fmtPath(path string) string {\n\treturn fmt.Sprintf(\"%s%s%s%s\", colors.ResetAll, colors.Dim, path, colors.ResetAll)\n}\nFix comment typopackage log\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/volatile\/core\"\n\t\"github.com\/volatile\/core\/coreutil\"\n\t\"github.com\/whitedevops\/colors\"\n)\n\n\/\/ Use tells the core to use this handler.\nfunc Use() {\n\tcore.Use(func(c *core.Context) {\n\t\tstart := time.Now()\n\t\tpath := c.Request.URL.Path \/\/ Keep original request path in case of http.StripPrefix.\n\n\t\tc.Next()\n\n\t\tlog.Printf(colors.ResetAll+\" %s %s %s %s\", fmtDuration(start), fmtStatus(c), fmtMethod(c), fmtPath(path))\n\t})\n}\n\nfunc fmtDuration(start time.Time) string {\n\treturn fmt.Sprintf(\"%s%s%13s%s\", colors.ResetAll, colors.ResetAll+colors.Dim, time.Since(start), colors.ResetAll)\n}\n\nfunc fmtStatus(c *core.Context) string {\n\tcode := coreutil.ResponseStatus(c.ResponseWriter)\n\n\tcolor := colors.White\n\n\tswitch {\n\tcase code >= 200 && code <= 299:\n\t\tcolor += colors.BackgroundGreen\n\tcase code >= 300 && code <= 399:\n\t\tcolor += colors.BackgroundCyan\n\tcase code >= 400 && code <= 499:\n\t\tcolor += colors.BackgroundYellow\n\tdefault:\n\t\tcolor += colors.BackgroundRed\n\t}\n\n\treturn fmt.Sprintf(\"%s%s %3d %s\", colors.ResetAll, color, code, colors.ResetAll)\n}\n\nfunc fmtMethod(c *core.Context) string {\n\tvar color string\n\n\tswitch c.Request.Method {\n\tcase \"GET\":\n\t\tcolor += colors.Green\n\tcase \"POST\":\n\t\tcolor += colors.Cyan\n\tcase \"PUT\", \"PATCH\":\n\t\tcolor += colors.Blue\n\tcase \"DELETE\":\n\t\tcolor += colors.Red\n\t}\n\n\treturn fmt.Sprintf(\"%s%s%s%s\", colors.ResetAll, color, c.Request.Method, colors.ResetAll)\n}\n\nfunc fmtPath(path string) string {\n\treturn fmt.Sprintf(\"%s%s%s%s\", colors.ResetAll, colors.Dim, path, colors.ResetAll)\n}\n<|endoftext|>"} {"text":"package rest\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\ntype HttpTransport struct {\n\tBaseURL *url.URL\n\tHTTPClient *http.Client\n\thttpDo func(c *http.Client, req *http.Request) (*http.Response, error)\n}\n\nfunc (h HttpTransport) Request(req Request) ([]interface{}, error) {\n\tvar raw []interface{}\n\n\trel, err := url.Parse(req.RefURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif req.Params != nil {\n\t\trel.RawQuery = req.Params.Encode()\n\t}\n\tif req.Data == nil {\n\t\treq.Data = map[string]interface{}{}\n\t}\n\n\tb, err := json.Marshal(req.Data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody := bytes.NewReader(b)\n\n\tu := h.BaseURL.ResolveReference(rel)\n\thttpReq, err := http.NewRequest(req.Method, u.String(), body)\n\tfor k, v := range req.Headers {\n\t\thttpReq.Header.Add(k, v)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := h.do(httpReq, &raw)\n\tif err != nil {\n\t\tif resp != nil {\n\t\t\treturn nil, fmt.Errorf(\"%v\", err)\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"could not parse response: %s\", resp.Response.Status)\n\t\t}\n\n\t}\n\n\treturn raw, nil\n}\n\n\/\/ Do executes API request created by NewRequest method or custom *http.Request.\nfunc (h HttpTransport) do(req *http.Request, v interface{}) (*Response, error) {\n\tresp, err := h.httpDo(h.HTTPClient, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tresponse := newResponse(resp)\n\terr = checkResponse(response)\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\tif v != nil {\n\t\terr = json.Unmarshal(response.Body, v)\n\t\tif err != nil {\n\t\t\treturn response, err\n\t\t}\n\t}\n\n\treturn response, nil\n}\nnil dereference in Request fixedpackage rest\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\ntype HttpTransport struct {\n\tBaseURL *url.URL\n\tHTTPClient *http.Client\n\thttpDo func(c *http.Client, req *http.Request) (*http.Response, error)\n}\n\nfunc (h HttpTransport) Request(req Request) ([]interface{}, error) {\n\tvar raw []interface{}\n\n\trel, err := url.Parse(req.RefURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif req.Params != nil {\n\t\trel.RawQuery = req.Params.Encode()\n\t}\n\tif req.Data == nil {\n\t\treq.Data = map[string]interface{}{}\n\t}\n\n\tb, err := json.Marshal(req.Data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody := bytes.NewReader(b)\n\n\tu := h.BaseURL.ResolveReference(rel)\n\thttpReq, err := http.NewRequest(req.Method, u.String(), body)\n\tfor k, v := range req.Headers {\n\t\thttpReq.Header.Add(k, v)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := h.do(httpReq, &raw)\n\tif err != nil {\n\t\tif resp != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not parse response: %s\", resp.Response.Status)\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"%v\", err)\n\t\t}\n\n\t}\n\n\treturn raw, nil\n}\n\n\/\/ Do executes API request created by NewRequest method or custom *http.Request.\nfunc (h HttpTransport) do(req *http.Request, v interface{}) (*Response, error) {\n\tresp, err := h.httpDo(h.HTTPClient, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tresponse := newResponse(resp)\n\terr = checkResponse(response)\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\tif v != nil {\n\t\terr = json.Unmarshal(response.Body, v)\n\t\tif err != nil {\n\t\t\treturn response, err\n\t\t}\n\t}\n\n\treturn response, nil\n}\n<|endoftext|>"} {"text":"\/\/----------------------------------------\n\/\/\n\/\/ Copyright © ying32. All Rights Reserved.\n\/\/\n\/\/ Licensed under Apache License 2.0\n\/\/\n\/\/----------------------------------------\n\n\/\/ 实现一些Go的Image转vcl\/lcl的\npackage bitmap\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"image\"\n\t\"image\/gif\"\n\t\"image\/jpeg\"\n\t\"image\/png\"\n\t\"unsafe\"\n\n\t\"github.com\/ying32\/govcl\/vcl\/types\"\n\n\t\"github.com\/ying32\/govcl\/vcl\"\n)\n\nvar (\n\tErrPixelDataEmpty = errors.New(\"The pixel data is empty\")\n\tErrUnsupportedDataFormat = errors.New(\"Unsupported pixel data format\")\n)\n\n\/\/ 将Go的Image转为VCL\/LCL的 TPngImage\n\/\/ 返回的Png对象用完记得Free掉\nfunc ToPngImage(img image.Image) (*vcl.TPngImage, error) {\n\tbuff := bytes.NewBuffer([]byte{})\n\tif err := png.Encode(buff, img); err != nil {\n\t\treturn nil, err\n\t}\n\tmem := vcl.NewMemoryStreamFromBytes(buff.Bytes())\n\tdefer mem.Free()\n\tmem.SetPosition(0)\n\tobj := vcl.NewPngImage()\n\tobj.LoadFromStream(mem)\n\treturn obj, nil\n}\n\n\/\/ 32bit bmp,丢失透明度\n\/\/ 返回的Bmp对象用完记得Free掉\n\/\/ LCL貌似不分丢失透明度,VCL会。。。。\nfunc ToBitmap(img image.Image) (*vcl.TBitmap, error) {\n\tswitch img.(type) {\n\tcase *image.RGBA:\n\t\tdata, _ := img.(*image.RGBA)\n\t\treturn toBitmap(img.Bounds().Size().X, img.Bounds().Size().Y, data.Pix)\n\n\tcase *image.NRGBA:\n\t\tdata, _ := img.(*image.NRGBA)\n\t\treturn toBitmap(img.Bounds().Size().X, img.Bounds().Size().Y, data.Pix)\n\n\tdefault:\n\t\treturn nil, ErrUnsupportedDataFormat\n\t}\n}\n\n\/\/ 将Go的Image转为VCL\/LCL的 TJPEGImage\n\/\/ 返回的jpg对象用完记得Free掉\nfunc ToJPEGImage(img image.Image, quality int) (*vcl.TJPEGImage, error) {\n\tbuff := bytes.NewBuffer([]byte{})\n\tif err := jpeg.Encode(buff, img, &jpeg.Options{quality}); err != nil {\n\t\treturn nil, err\n\t}\n\tmem := vcl.NewMemoryStreamFromBytes(buff.Bytes())\n\tdefer mem.Free()\n\tmem.SetPosition(0)\n\tobj := vcl.NewJPEGImage()\n\tobj.LoadFromStream(mem)\n\treturn obj, nil\n}\n\n\/\/ 将Go的Image转为VCL\/LCL的 TGIFImage\n\/\/ 返回的gif对象用完记得Free掉\nfunc ToGIFImage(img image.Image, quality int) (*vcl.TGIFImage, error) {\n\tbuff := bytes.NewBuffer([]byte{})\n\tif err := gif.Encode(buff, img, &gif.Options{NumColors: 256}); err != nil {\n\t\treturn nil, err\n\t}\n\tmem := vcl.NewMemoryStreamFromBytes(buff.Bytes())\n\tdefer mem.Free()\n\tmem.SetPosition(0)\n\tobj := vcl.NewGIFImage()\n\tobj.LoadFromStream(mem)\n\treturn obj, nil\n}\n\nfunc toBitmap(width, height int, pix []uint8) (*vcl.TBitmap, error) {\n\tif len(pix) == 0 {\n\t\treturn nil, ErrPixelDataEmpty\n\t}\n\tbmp := vcl.NewBitmap()\n\tbmp.SetPixelFormat(types.Pf32bit)\n\tbmp.SetSize(int32(width), int32(height))\n\t\/\/ 填充,左下角为起点\n\tfor h := height - 1; h >= 0; h-- {\n\t\tptr := bmp.ScanLine(int32(h))\n\t\tfor w := 0; w < width; w++ {\n\t\t\tindex := (h*width + w) * 4\n\t\t\t*(*byte)(unsafe.Pointer(ptr + uintptr(w*4))) = pix[index+pixIndex[0]]\n\t\t\t*(*byte)(unsafe.Pointer(ptr + uintptr(w*4+1))) = pix[index+pixIndex[1]]\n\t\t\t*(*byte)(unsafe.Pointer(ptr + uintptr(w*4+2))) = pix[index+pixIndex[2]]\n\t\t\t*(*byte)(unsafe.Pointer(ptr + uintptr(w*4+3))) = pix[index+pixIndex[3]]\n\t\t}\n\t}\n\treturn bmp, nil\n}\nUpdate\/\/----------------------------------------\n\/\/\n\/\/ Copyright © ying32. All Rights Reserved.\n\/\/\n\/\/ Licensed under Apache License 2.0\n\/\/\n\/\/----------------------------------------\n\n\/\/ 实现一些Go的Image转vcl\/lcl的\npackage bitmap\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"image\"\n\t\"image\/gif\"\n\t\"image\/jpeg\"\n\t\"image\/png\"\n\t\"unsafe\"\n\n\t\"github.com\/ying32\/govcl\/vcl\/types\"\n\n\t\"github.com\/ying32\/govcl\/vcl\"\n)\n\nvar (\n\tErrPixelDataEmpty = errors.New(\"The pixel data is empty\")\n\tErrUnsupportedDataFormat = errors.New(\"Unsupported pixel data format\")\n)\n\n\/\/ 将Go的Image转为VCL\/LCL的 TPngImage\n\/\/ 返回的Png对象用完记得Free掉\nfunc ToPngImage(img image.Image) (*vcl.TPngImage, error) {\n\tbuff := bytes.NewBuffer([]byte{})\n\tif err := png.Encode(buff, img); err != nil {\n\t\treturn nil, err\n\t}\n\tmem := vcl.NewMemoryStreamFromBytes(buff.Bytes())\n\tdefer mem.Free()\n\tmem.SetPosition(0)\n\tobj := vcl.NewPngImage()\n\tobj.LoadFromStream(mem)\n\treturn obj, nil\n}\n\n\/\/ 32bit bmp,丢失透明度\n\/\/ 返回的Bmp对象用完记得Free掉\n\/\/ LCL貌似不会丢失透明度,VCL会。。。。\nfunc ToBitmap(img image.Image) (*vcl.TBitmap, error) {\n\tswitch img.(type) {\n\tcase *image.RGBA:\n\t\tdata, _ := img.(*image.RGBA)\n\t\treturn toBitmap(img.Bounds().Size().X, img.Bounds().Size().Y, data.Pix)\n\n\tcase *image.NRGBA:\n\t\tdata, _ := img.(*image.NRGBA)\n\t\treturn toBitmap(img.Bounds().Size().X, img.Bounds().Size().Y, data.Pix)\n\n\tdefault:\n\t\treturn nil, ErrUnsupportedDataFormat\n\t}\n}\n\n\/\/ 将Go的Image转为VCL\/LCL的 TJPEGImage\n\/\/ 返回的jpg对象用完记得Free掉\nfunc ToJPEGImage(img image.Image, quality int) (*vcl.TJPEGImage, error) {\n\tbuff := bytes.NewBuffer([]byte{})\n\tif err := jpeg.Encode(buff, img, &jpeg.Options{quality}); err != nil {\n\t\treturn nil, err\n\t}\n\tmem := vcl.NewMemoryStreamFromBytes(buff.Bytes())\n\tdefer mem.Free()\n\tmem.SetPosition(0)\n\tobj := vcl.NewJPEGImage()\n\tobj.LoadFromStream(mem)\n\treturn obj, nil\n}\n\n\/\/ 将Go的Image转为VCL\/LCL的 TGIFImage\n\/\/ 返回的gif对象用完记得Free掉\nfunc ToGIFImage(img image.Image, quality int) (*vcl.TGIFImage, error) {\n\tbuff := bytes.NewBuffer([]byte{})\n\tif err := gif.Encode(buff, img, &gif.Options{NumColors: 256}); err != nil {\n\t\treturn nil, err\n\t}\n\tmem := vcl.NewMemoryStreamFromBytes(buff.Bytes())\n\tdefer mem.Free()\n\tmem.SetPosition(0)\n\tobj := vcl.NewGIFImage()\n\tobj.LoadFromStream(mem)\n\treturn obj, nil\n}\n\nfunc toBitmap(width, height int, pix []uint8) (*vcl.TBitmap, error) {\n\tif len(pix) == 0 {\n\t\treturn nil, ErrPixelDataEmpty\n\t}\n\tbmp := vcl.NewBitmap()\n\tbmp.SetPixelFormat(types.Pf32bit)\n\tbmp.SetSize(int32(width), int32(height))\n\t\/\/ 填充,左下角为起点\n\tfor h := height - 1; h >= 0; h-- {\n\t\tptr := bmp.ScanLine(int32(h))\n\t\tfor w := 0; w < width; w++ {\n\t\t\tindex := (h*width + w) * 4\n\t\t\t*(*byte)(unsafe.Pointer(ptr + uintptr(w*4))) = pix[index+pixIndex[0]]\n\t\t\t*(*byte)(unsafe.Pointer(ptr + uintptr(w*4+1))) = pix[index+pixIndex[1]]\n\t\t\t*(*byte)(unsafe.Pointer(ptr + uintptr(w*4+2))) = pix[index+pixIndex[2]]\n\t\t\t*(*byte)(unsafe.Pointer(ptr + uintptr(w*4+3))) = pix[index+pixIndex[3]]\n\t\t}\n\t}\n\treturn bmp, nil\n}\n<|endoftext|>"} {"text":"package hamaprs\n\n\/\/ #cgo LDFLAGS: -lfap\n\/*\n#include \n#include \n\n\/\/ type is a reserved keyword in Go, we need something to reach p->type\nfap_packet_type_t getPacketType(fap_packet_t* p) {\n\tif (!p) return -1;\n if (p->type != NULL) return *p->type;\n return -1;\n}\n*\/\nimport \"C\"\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\t\"unsafe\"\n)\n\ntype PacketType int\n\nconst (\n\tLocationPacketType PacketType = iota\n\tObjectPacketType\n\tItemPacketType\n\tMicePacketType\n\tNMEAPacketType\n\tWXPacketType\n\tMessagePacketType\n\tCapabilitiesPacketType\n\tStatusPacketType\n\tTelemetryPacketType\n\tTelemetryMessagePacketType\n\tDXSpotPacketType\n\tExperimentalPacketType\n\tInvalidPacketType\n)\n\nconst InvalidCoordinate float64 = 360\n\n\/\/ Packet describes an APRS packet\ntype Packet struct {\n\tPacketType\n\tTimestamp int\n\tSourceCallsign string\n\tDestinationCallsign string\n\tPath []string\n\tStatus string\n\tSymbol string\n\tLatitude float64\n\tLongitude float64\n\tAltitude float64\n\tSpeed float64\n\tCourse uint8\n\tWeather *WeatherReport\n\tRawMessage string\n\tMicE string\n\tMessage string\n\tComment string\n}\n\n\/\/ WeatherReport describes the weather related part of an APRS packet\ntype WeatherReport struct {\n\tTemperature float64\n\tInsideTemperature float64\n\tHumidity uint8\n\tInsideHumidity uint8\n\tWindGust float64\n\tWindDirection uint8\n\tWindSpeed float64\n\tPressure float64\n}\n\n\/\/ Telemetry describes the telemetry related part of an APRS packet\ntype Telemetry struct {\n\tVal1, Val2, Val3, Val4, Val5 float64\n}\n\n\/\/ Parser is an APRS Parser\ntype Parser struct{}\n\n\/\/ Returns a new APRS Parser\nfunc NewParser() *Parser {\n\tC.fap_init()\n\tp := &Parser{}\n\truntime.SetFinalizer(p, func() {\n\t\tC.fap_cleanup()\n\t})\n\treturn p\n}\n\n\/\/ ParsePacket parse raw packet string and return a new Packet\nfunc (p *Parser) ParsePacket(raw string, isAX25 bool) (*Packet, error) {\n\tpacket := &Packet{Latitude: InvalidCoordinate, Longitude: InvalidCoordinate}\n\treturn p.FillAprsPacket(raw, isAX25, packet)\n}\n\nfunc (p *Parser) FillAprsPacket(raw string, isAX25 bool, packet *Packet) (*Packet, error) {\n\tmessage_cstring := C.CString(raw)\n\tmessage_length := C.uint(len(raw))\n\tdefer C.free(unsafe.Pointer(message_cstring))\n\n\tcpacket := C.fap_parseaprs(message_cstring, message_length, C.short(boolToInt(isAX25)))\n\n\tdefer C.fap_free(cpacket)\n\n\tif cpacket.error_code != nil {\n\t\treturn nil, errors.New(\"Unable to parse APRS message\")\n\t}\n\n\tpacket.Timestamp = int(time.Now().Unix())\n\tpacket.SourceCallsign = strings.ToUpper(C.GoString(cpacket.src_callsign))\n\tpacket.DestinationCallsign = strings.ToUpper(C.GoString(cpacket.dst_callsign))\n\tpacket.Latitude = parseNilableCoordinate(cpacket.latitude)\n\tpacket.Longitude = parseNilableCoordinate(cpacket.longitude)\n\tpacket.Speed = parseNilableFloat(cpacket.speed)\n\tpacket.Course = parseNilableUInt(cpacket.course)\n\tpacket.Altitude = parseNilableFloat(cpacket.altitude)\n\tpacket.Message = C.GoString(cpacket.message)\n\tpacket.Status = C.GoStringN(cpacket.status, C.int(cpacket.status_len))\n\tpacket.Comment = C.GoStringN(cpacket.comment, C.int(cpacket.comment_len))\n\tpacket.RawMessage = raw\n\n\tif C.int(cpacket.path_len) > 0 {\n\t\tvar CPath **C.char = cpacket.path\n\t\tlength := int(cpacket.path_len)\n\t\thdr := reflect.SliceHeader{\n\t\t\tData: uintptr(unsafe.Pointer(CPath)),\n\t\t\tLen: length,\n\t\t\tCap: length,\n\t\t}\n\t\tptrSlice := *(*[]*C.char)(unsafe.Pointer(&hdr))\n\t\tpacket.Path = make([]string, int(cpacket.path_len))\n\t\tfor i, v := range ptrSlice {\n\t\t\tpacket.Path[i] = C.GoString(v)\n\t\t}\n\t}\n\tswitch C.getPacketType(cpacket) {\n\tcase C.fapLOCATION:\n\t\tpacket.PacketType = LocationPacketType\n\tcase C.fapOBJECT:\n\t\tpacket.PacketType = ObjectPacketType\n\tcase C.fapITEM:\n\t\tpacket.PacketType = ItemPacketType\n\tcase C.fapMICE:\n\t\tpacket.PacketType = MicePacketType\n\tcase C.fapNMEA:\n\t\tpacket.PacketType = NMEAPacketType\n\tcase C.fapWX:\n\t\tpacket.PacketType = WXPacketType\n\tcase C.fapMESSAGE:\n\t\tpacket.PacketType = MessagePacketType\n\tcase C.fapCAPABILITIES:\n\t\tpacket.PacketType = CapabilitiesPacketType\n\tcase C.fapSTATUS:\n\t\tpacket.PacketType = StatusPacketType\n\tcase C.fapTELEMETRY:\n\t\tpacket.PacketType = TelemetryPacketType\n\tcase C.fapTELEMETRY_MESSAGE:\n\t\tpacket.PacketType = TelemetryMessagePacketType\n\tcase C.fapDX_SPOT:\n\t\tpacket.PacketType = DXSpotPacketType\n\tcase C.fapEXPERIMENTAL:\n\t\tpacket.PacketType = ExperimentalPacketType\n\tdefault:\n\t\tpacket.PacketType = InvalidPacketType\n\t}\n\n\tif cpacket.wx_report != nil {\n\t\tw := WeatherReport{\n\t\t\tTemperature: parseNilableFloat(cpacket.wx_report.temp),\n\t\t\tInsideTemperature: parseNilableFloat(cpacket.wx_report.temp_in),\n\t\t\tHumidity: parseNilableUInt(cpacket.wx_report.humidity),\n\t\t\tInsideHumidity: parseNilableUInt(cpacket.wx_report.humidity_in),\n\t\t\tWindGust: parseNilableFloat(cpacket.wx_report.wind_gust),\n\t\t\tWindDirection: parseNilableUInt(cpacket.wx_report.wind_dir),\n\t\t\tWindSpeed: parseNilableFloat(cpacket.wx_report.wind_speed),\n\t\t\tPressure: parseNilableFloat(cpacket.wx_report.pressure),\n\t\t}\n\t\tpacket.Weather = &w\n\t}\n\n\t\/\/ MicE alloc a buffer of 20 bytes for fap_mice_mbits_to_message C func\n\tcbuffer := (*C.char)(C.malloc(C.size_t(20)))\n\tdefer C.free(unsafe.Pointer(cbuffer))\n\n\tif cpacket.messagebits != nil {\n\t\tC.fap_mice_mbits_to_message(cpacket.messagebits, cbuffer)\n\t\tpacket.MicE = C.GoString(cbuffer)\n\t}\n\n\treturn packet, nil\n}\n\n\/\/ IncludePosition return true if the packet contains a Position\nfunc (p *Packet) IncludePosition() bool {\n\tif p.Latitude != InvalidCoordinate && p.Longitude != InvalidCoordinate {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ return a short version of the callsign as KK6NXK for KK6NXK-7\nfunc ShortCallsign(c string) string {\n\ts := strings.Split(c, \"-\")\n\treturn s[0]\n}\n\nfunc boolToInt(b bool) int {\n\tif b {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc parseNilableFloat(d *C.double) float64 {\n\tif d != nil {\n\t\treturn float64(C.double(*d))\n\t}\n\treturn 0\n}\n\nfunc parseNilableCoordinate(d *C.double) float64 {\n\tif d != nil {\n\t\treturn float64(C.double(*d))\n\t}\n\treturn InvalidCoordinate\n}\n\nfunc parseNilableUInt(d *C.uint) uint8 {\n\tif d != nil {\n\t\treturn uint8(C.uint(*d))\n\t}\n\treturn 0\n}\nadded telemetrypackage hamaprs\n\n\/\/ #cgo LDFLAGS: -lfap\n\/*\n#include \n#include \n\n\/\/ type is a reserved keyword in Go, we need something to reach p->type\nfap_packet_type_t getPacketType(fap_packet_t* p) {\n\tif (!p) return -1;\n if (p->type != NULL) return *p->type;\n return -1;\n}\n*\/\nimport \"C\"\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\t\"unsafe\"\n)\n\ntype PacketType int\n\nconst (\n\tLocationPacketType PacketType = iota\n\tObjectPacketType\n\tItemPacketType\n\tMicePacketType\n\tNMEAPacketType\n\tWXPacketType\n\tMessagePacketType\n\tCapabilitiesPacketType\n\tStatusPacketType\n\tTelemetryPacketType\n\tTelemetryMessagePacketType\n\tDXSpotPacketType\n\tExperimentalPacketType\n\tInvalidPacketType\n)\n\n\/\/ InvalidCoordinate is a marker for an unset position\nconst InvalidCoordinate float64 = 360\n\n\/\/ Packet describes an APRS packet\ntype Packet struct {\n\tPacketType\n\tTimestamp int\n\tSourceCallsign string\n\tDestinationCallsign string\n\tPath []string\n\tStatus string\n\tSymbol string\n\tLatitude float64\n\tLongitude float64\n\tAltitude float64\n\tSpeed float64\n\tCourse uint8\n\tWeather *WeatherReport\n\tTelemetry *Telemetry\n\tRawMessage string\n\tMicE string\n\tMessage string\n\tComment string\n}\n\n\/\/ WeatherReport describes the weather related part of an APRS packet\ntype WeatherReport struct {\n\tTemperature float64\n\tInsideTemperature float64\n\tHumidity uint8\n\tInsideHumidity uint8\n\tWindGust float64\n\tWindDirection uint8\n\tWindSpeed float64\n\tPressure float64\n}\n\n\/\/ Telemetry describes the telemetry related part of an APRS packet\ntype Telemetry struct {\n\tVal1, Val2, Val3, Val4, Val5 float64\n}\n\n\/\/ Parser is an APRS Parser\ntype Parser struct{}\n\n\/\/ Returns a new APRS Parser\nfunc NewParser() *Parser {\n\tC.fap_init()\n\tp := &Parser{}\n\truntime.SetFinalizer(p, func() {\n\t\tC.fap_cleanup()\n\t})\n\treturn p\n}\n\n\/\/ ParsePacket parse raw packet string and return a new Packet\nfunc (p *Parser) ParsePacket(raw string, isAX25 bool) (*Packet, error) {\n\tpacket := &Packet{Latitude: InvalidCoordinate, Longitude: InvalidCoordinate}\n\treturn p.FillAprsPacket(raw, isAX25, packet)\n}\n\nfunc (p *Parser) FillAprsPacket(raw string, isAX25 bool, packet *Packet) (*Packet, error) {\n\tmessage_cstring := C.CString(raw)\n\tmessage_length := C.uint(len(raw))\n\tdefer C.free(unsafe.Pointer(message_cstring))\n\n\tcpacket := C.fap_parseaprs(message_cstring, message_length, C.short(boolToInt(isAX25)))\n\n\tdefer C.fap_free(cpacket)\n\n\tif cpacket.error_code != nil {\n\t\treturn nil, errors.New(\"Unable to parse APRS message\")\n\t}\n\n\tpacket.Timestamp = int(time.Now().Unix())\n\tpacket.SourceCallsign = strings.ToUpper(C.GoString(cpacket.src_callsign))\n\tpacket.DestinationCallsign = strings.ToUpper(C.GoString(cpacket.dst_callsign))\n\tpacket.Latitude = parseNilableCoordinate(cpacket.latitude)\n\tpacket.Longitude = parseNilableCoordinate(cpacket.longitude)\n\tpacket.Speed = parseNilableFloat(cpacket.speed)\n\tpacket.Course = parseNilableUInt(cpacket.course)\n\tpacket.Altitude = parseNilableFloat(cpacket.altitude)\n\tpacket.Message = C.GoString(cpacket.message)\n\tpacket.Status = C.GoStringN(cpacket.status, C.int(cpacket.status_len))\n\tpacket.Comment = C.GoStringN(cpacket.comment, C.int(cpacket.comment_len))\n\tpacket.RawMessage = raw\n\n\tif C.int(cpacket.path_len) > 0 {\n\t\tvar CPath **C.char = cpacket.path\n\t\tlength := int(cpacket.path_len)\n\t\thdr := reflect.SliceHeader{\n\t\t\tData: uintptr(unsafe.Pointer(CPath)),\n\t\t\tLen: length,\n\t\t\tCap: length,\n\t\t}\n\t\tptrSlice := *(*[]*C.char)(unsafe.Pointer(&hdr))\n\t\tpacket.Path = make([]string, int(cpacket.path_len))\n\t\tfor i, v := range ptrSlice {\n\t\t\tpacket.Path[i] = C.GoString(v)\n\t\t}\n\t}\n\tswitch C.getPacketType(cpacket) {\n\tcase C.fapLOCATION:\n\t\tpacket.PacketType = LocationPacketType\n\tcase C.fapOBJECT:\n\t\tpacket.PacketType = ObjectPacketType\n\tcase C.fapITEM:\n\t\tpacket.PacketType = ItemPacketType\n\tcase C.fapMICE:\n\t\tpacket.PacketType = MicePacketType\n\tcase C.fapNMEA:\n\t\tpacket.PacketType = NMEAPacketType\n\tcase C.fapWX:\n\t\tpacket.PacketType = WXPacketType\n\tcase C.fapMESSAGE:\n\t\tpacket.PacketType = MessagePacketType\n\tcase C.fapCAPABILITIES:\n\t\tpacket.PacketType = CapabilitiesPacketType\n\tcase C.fapSTATUS:\n\t\tpacket.PacketType = StatusPacketType\n\tcase C.fapTELEMETRY:\n\t\tpacket.PacketType = TelemetryPacketType\n\tcase C.fapTELEMETRY_MESSAGE:\n\t\tpacket.PacketType = TelemetryMessagePacketType\n\tcase C.fapDX_SPOT:\n\t\tpacket.PacketType = DXSpotPacketType\n\tcase C.fapEXPERIMENTAL:\n\t\tpacket.PacketType = ExperimentalPacketType\n\tdefault:\n\t\tpacket.PacketType = InvalidPacketType\n\t}\n\n\tif cpacket.wx_report != nil {\n\t\tw := WeatherReport{\n\t\t\tTemperature: parseNilableFloat(cpacket.wx_report.temp),\n\t\t\tInsideTemperature: parseNilableFloat(cpacket.wx_report.temp_in),\n\t\t\tHumidity: parseNilableUInt(cpacket.wx_report.humidity),\n\t\t\tInsideHumidity: parseNilableUInt(cpacket.wx_report.humidity_in),\n\t\t\tWindGust: parseNilableFloat(cpacket.wx_report.wind_gust),\n\t\t\tWindDirection: parseNilableUInt(cpacket.wx_report.wind_dir),\n\t\t\tWindSpeed: parseNilableFloat(cpacket.wx_report.wind_speed),\n\t\t\tPressure: parseNilableFloat(cpacket.wx_report.pressure),\n\t\t}\n\t\tpacket.Weather = &w\n\t}\n\n\tif cpacket.telemetry != nil {\n\t\tt := Telemetry{\n\t\t\tVal1: parseNilableFloat(cpacket.telemetry.val1),\n\t\t\tVal2: parseNilableFloat(cpacket.telemetry.val2),\n\t\t\tVal3: parseNilableFloat(cpacket.telemetry.val3),\n\t\t\tVal4: parseNilableFloat(cpacket.telemetry.val4),\n\t\t\tVal5: parseNilableFloat(cpacket.telemetry.val5),\n\t\t}\n\t\tpacket.Telemetry = &t\n\t}\n\n\t\/\/ MicE alloc a buffer of 20 bytes for fap_mice_mbits_to_message C func\n\tcbuffer := (*C.char)(C.malloc(C.size_t(20)))\n\tdefer C.free(unsafe.Pointer(cbuffer))\n\n\tif cpacket.messagebits != nil {\n\t\tC.fap_mice_mbits_to_message(cpacket.messagebits, cbuffer)\n\t\tpacket.MicE = C.GoString(cbuffer)\n\t}\n\n\treturn packet, nil\n}\n\n\/\/ IncludePosition return true if the packet contains a Position\nfunc (p *Packet) IncludePosition() bool {\n\tif p.Latitude != InvalidCoordinate && p.Longitude != InvalidCoordinate {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ return a short version of the callsign as KK6NXK for KK6NXK-7\nfunc ShortCallsign(c string) string {\n\ts := strings.Split(c, \"-\")\n\treturn s[0]\n}\n\nfunc boolToInt(b bool) int {\n\tif b {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc parseNilableFloat(d *C.double) float64 {\n\tif d != nil {\n\t\treturn float64(C.double(*d))\n\t}\n\treturn 0\n}\n\nfunc parseNilableCoordinate(d *C.double) float64 {\n\tif d != nil {\n\t\treturn float64(C.double(*d))\n\t}\n\treturn InvalidCoordinate\n}\n\nfunc parseNilableUInt(d *C.uint) uint8 {\n\tif d != nil {\n\t\treturn uint8(C.uint(*d))\n\t}\n\treturn 0\n}\n<|endoftext|>"} {"text":"package gincrud\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/fatih\/structs\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/mgutz\/logxi\/v1\"\n\t\"github.com\/osiloke\/gostore\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"time\"\n)\n\nvar logger = log.New(\"gincrud\")\n\ntype ErrorList struct {\n\tMsg string `json:\"msg\"`\n\tError map[string]interface{} `json:\"error\"`\n}\n\ntype ErrorCtx struct {\n\tBucket string\n\tkey string\n\tGinCtx *gin.Context\n}\n\ntype SuccessCtx struct {\n\tBucket string\n\tKey string\n\tResult map[string]interface{}\n\tGinCtx *gin.Context\n}\n\ntype MarshalError struct {\n\tData map[string]interface{}\n}\n\ntype UnknownContent struct {\n\tS string `json:\"msg\"`\n}\n\nfunc (e *UnknownContent) Error() string {\n\treturn e.S\n}\n\ntype JSONError interface {\n\tSerialize() map[string]interface{} \/\/serialize error to json\n}\n\ntype ParsedContent map[string]interface{}\n\n\/\/Convert request json data to data and map, you can handle validation here\ntype MarshalFn func(ctx *gin.Context) (map[string]interface{}, error)\ntype UnMarshalFn func(*gin.Context, []byte) (map[string]interface{}, error)\n\n\/\/Get unique key from object and request\ntype GetKey func(interface{}, *gin.Context) string\n\n\/\/Called when a crud operation is successful\ntype OnSuccess func(ctx SuccessCtx) (string, error)\n\n\/\/Called when a crud operation fails\ntype OnError func(ctx interface{}, err error) error\n\ntype Results struct {\n\tData []map[string]interface{} `json:\"data\"`\n\tCount int `json:\"count,omitempty\"`\n\tTotalCount int `json:\"total_count,omitempty\"`\n}\n\nconst (\n\tFORM_CONTENT = \"form\"\n\tJSON_CONTENT = \"json\"\n\tXML_CONTENT = \"xml\"\n)\n\nfunc timeTrack(start time.Time, name string) {\n\telapsed := time.Since(start)\n\tlog.Debug(fmt.Sprintf(\"%s took %s\", name, elapsed))\n}\n\nfunc GetFunctionName(i interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()\n}\n\nfunc filterFlags(content string) string {\n\tfor i, a := range content {\n\t\tif a == ' ' || a == ';' {\n\t\t\treturn content[:i]\n\t\t}\n\t}\n\treturn content\n}\nfunc Decode(c *gin.Context, obj interface{}) error {\n\tctype := filterFlags(c.Request.Header.Get(\"Content-Type\"))\n\tswitch {\n\tcase c.Request.Method == \"GET\" || ctype == gin.MIMEPOSTForm:\n\t\treturn &UnknownContent{\"unimplemented content-type: \" + ctype}\n\tcase ctype == gin.MIMEJSON:\n\t\tdecoder := json.NewDecoder(c.Request.Body)\n\t\tif err := decoder.Decode(&obj); err == nil {\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\tcase ctype == gin.MIMEXML || ctype == gin.MIMEXML2:\n\t\treturn &UnknownContent{\"unimplemented content-type: \" + ctype}\n\tdefault:\n\t\terr := &UnknownContent{\"unknown content-type: \" + ctype}\n\t\treturn err\n\t}\n}\n\nfunc requestContent(c *gin.Context) (ParsedContent, error) {\n\tctype := filterFlags(c.Request.Header.Get(\"Content-Type\"))\n\tswitch {\n\tcase c.Request.Method == \"GET\" || ctype == gin.MIMEPOSTForm:\n\t\treturn nil, errors.New(\"Unimplemented content-type: \" + ctype)\n\tcase ctype == gin.MIMEJSON:\n\t\tvar obj ParsedContent\n\t\tdecoder := json.NewDecoder(c.Request.Body)\n\t\tif err := decoder.Decode(obj); err == nil {\n\t\t\treturn obj, err\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\tcase ctype == gin.MIMEXML || ctype == gin.MIMEXML2:\n\t\treturn nil, errors.New(\"Unimplemented content-type: \" + ctype)\n\tdefault:\n\t\terr := errors.New(\"unknown content-type: \" + ctype)\n\t\tc.Fail(400, err)\n\t\treturn nil, err\n\t}\n}\nfunc doUnmarshal(key, bucket string, data [][]byte, c *gin.Context, unMarshalFn UnMarshalFn, onSuccess OnSuccess, onError OnError) {\n\n\tdefer timeTrack(time.Now(), \"Do Unmarshal \"+key+\" from \"+bucket)\n\tm, err := unMarshalFn(c, data[1])\n\tif m == nil {\n\t\tm = make(map[string]interface{})\n\t}\n\tm[\"key\"] = string(data[0])\n\tif err != nil {\n\t\tc.JSON(500, err)\n\t} else {\n\t\tkk := string(data[0])\n\t\tif onSuccess != nil {\n\t\t\tctx := SuccessCtx{bucket, kk, m, c}\n\t\t\tonSuccess(ctx)\n\t\t}\n\t\tc.JSON(200, m)\n\t}\n}\nfunc Get(key, bucket string, store gostore.Store, c *gin.Context, record interface{},\n\tunMarshalFn UnMarshalFn, onSuccess OnSuccess, onError OnError) {\n\tdata, err := store.Get([]byte(key), bucket)\n\tif err != nil {\n\t\t\/\/TODO: Does not exist error for store\n\t\tif onError != nil {\n\t\t\tonError(ErrorCtx{bucket, key, c}, err)\n\t\t}\n\t\tc.JSON(404, gin.H{\"msg\": fmt.Sprintf(\"%s Not found\", key)})\n\t} else {\n\t\tif unMarshalFn != nil {\n\t\t\tdoUnmarshal(key, bucket, data, c, unMarshalFn, onSuccess, onError)\n\t\t} else {\n\t\t\t_ = json.Unmarshal(data[1], record)\n\t\t\tm := structs.Map(record)\n\t\t\tkk := string(data[0])\n\t\t\tm[\"key\"] = kk\n\t\t\tif onSuccess != nil {\n\t\t\t\tctx := SuccessCtx{bucket, kk, m, c}\n\t\t\t\tonSuccess(ctx)\n\t\t\t}\n\t\t\tc.JSON(200, m)\n\t\t}\n\t}\n}\n\n\/\/TODO: Extract core logic from each crud function i.e make doGetAll, doGet, ... they return data, err\nfunc GetAll(bucket string, store gostore.Store, c *gin.Context, onSuccess OnSuccess, onError OnError) {\n\tvar results []map[string]interface{}\n\tvar err error\n\n\tcount := 10\n\tq := c.Request.URL.Query()\n\tif val, ok := q[\"_perPage\"]; ok {\n\t\tcount, _ = strconv.Atoi(val[0])\n\t}\n\tvar data [][][]byte\n\n\tif val, ok := q[\"afterKey\"]; ok {\n\t\tdata, err = store.GetAllAfter([]byte(val[0]), count+1, 0, bucket)\n\t} else if val, ok := q[\"beforeKey\"]; ok {\n\t\tdata, err = store.GetAllBefore([]byte(val[0]), count+1, 0, bucket)\n\t} else {\n\t\tdata, err = store.GetAll(count+1, 0, bucket)\n\t}\n\tif err != nil {\n\t\tif onError != nil {\n\t\t\tonError(ErrorCtx{Bucket: bucket, GinCtx: c}, err)\n\t\t}\n\t\tc.JSON(200, []string{})\n\t} else {\n\t\tfor _, element := range data {\n\t\t\tvar result map[string]interface{}\n\t\t\tif err := json.Unmarshal(element[1], &result); err != nil {\n\t\t\t\tonError(ErrorCtx{Bucket: bucket, GinCtx: c}, err)\n\t\t\t\tc.JSON(500, gin.H{\"msg\": err})\n\t\t\t} else {\n\t\t\t\tif result == nil {\n\t\t\t\t\tresult = make(map[string]interface{})\n\t\t\t\t}\n\n\t\t\t\tresult[\"key\"] = string(element[0])\n\t\t\t\tresults = append(results, result)\n\t\t\t}\n\t\t}\n\t\tif len(results) == 0 {\n\t\t\tc.JSON(200, []string{})\n\t\t} else {\n\t\t\tif onSuccess != nil {\n\t\t\t}\n\t\t\tstats, _ := store.Stats(bucket)\n\t\t\ttotal_count := stats[\"KeyN\"].(int)\n\t\t\tc.Writer.Header().Set(\"X-Total-Count\", fmt.Sprintf(\"%d\", total_count))\n\t\t\tc.JSON(200, Results{results, count, total_count})\n\t\t\t\/\/ c.JSON(200, results)\n\t\t}\n\t}\n}\n\nfunc Post(bucket string, store gostore.Store, c *gin.Context,\n\trecord interface{}, fn GetKey, marshalFn MarshalFn, onSuccess OnSuccess, onError OnError) {\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\ttrace := make([]byte, 1024)\n\t\t\truntime.Stack(trace, true)\n\t\t\tfmt.Printf(\"Stack: %s\", trace)\n\t\t\t\/\/\t\t\t\tlog.Error(\"Stack of %d bytes: %s\", count, trace)\n\t\t\t\/\/\t\t\t\tfmt.Println(\"Defer Panic in auth middleware:\", r)\n\t\t\tlogger.Error(\"POST:\", \"err\", string(trace))\n\t\t\tc.JSON(500, gin.H{\"message\": \"Unable to edit item \"})\n\t\t\tc.Abort()\n\t\t}\n\t}()\n\tif marshalFn != nil {\n\t\tlogger.Debug(\"Post\", \"bucket\", bucket, \"marshalfn\", GetFunctionName(marshalFn))\n\t\tobj, err := marshalFn(c)\n\t\tif err != nil {\n\t\t\tif onError != nil {\n\t\t\t\tonError(ErrorCtx{Bucket: bucket}, err)\n\t\t\t}\n\t\t\tif e, ok := err.(JSONError); ok {\n\t\t\t\tresult := ErrorList{\"Malformed data\", e.Serialize()}\n\t\t\t\tc.JSON(400, result)\n\t\t\t} else {\n\t\t\t\tc.JSON(400, gin.H{\"msg\": err})\n\t\t\t}\n\n\t\t} else {\n\t\t\tkey := fn(obj, c)\n\t\t\tif key == \"\" {\n\t\t\t\tc.JSON(500, err)\n\t\t\t} else {\n\t\t\t\tdata, err := json.Marshal(obj)\n\t\t\t\tif err != nil {\n\t\t\t\t\tonError(ErrorCtx{Bucket: bucket}, err)\n\t\t\t\t} else {\n\t\t\t\t\tstore.Save([]byte(key), data, bucket)\n\t\t\t\t\tif onSuccess != nil {\n\n\t\t\t\t\t\tlogger.Debug(\"onSuccess\", \"bucket\", bucket, \"key\", key, \"onSuccess\", GetFunctionName(onSuccess))\n\t\t\t\t\t\tctx := SuccessCtx{bucket, key, obj, c}\n\t\t\t\t\t\tonSuccess(ctx)\n\t\t\t\t\t}\n\t\t\t\t\tobj[\"key\"] = key\n\t\t\t\t\tc.JSON(200, obj)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t} else {\n\t\tif b := c.Bind(record); b != false {\n\t\t\tm := structs.Map(record)\n\t\t\tdata, err := json.Marshal(&record)\n\t\t\tkey := fn(m, c)\n\t\t\tif err != nil {\n\t\t\t\tonError(ErrorCtx{Bucket: bucket}, err)\n\t\t\t\tc.JSON(500, gin.H{\"msg\": \"An error occured and this item could not be saved\"})\n\t\t\t} else {\n\t\t\t\tstore.Save([]byte([]byte(key)), data, bucket)\n\t\t\t\tm[\"key\"] = key\n\t\t\t\tlogger.Debug(\"Successfully saved object\", \"bucket\", bucket, \"key\", key)\n\n\t\t\t\tif onSuccess != nil {\n\t\t\t\t\tctx := SuccessCtx{bucket, key, m, c}\n\t\t\t\t\tonSuccess(ctx)\n\t\t\t\t}\n\t\t\t\tc.JSON(200, m)\n\t\t\t}\n\t\t} else {\n\t\t\tc.JSON(400, gin.H{\"msg\": \"Seems like the data submitted is not formatted properly\"})\n\t\t}\n\t}\n}\n\nfunc Put(key, bucket string, store gostore.Store, c *gin.Context, record interface{},\n\tmarshalFn MarshalFn, onSuccess OnSuccess, onError OnError) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\ttrace := make([]byte, 1024)\n\t\t\truntime.Stack(trace, true)\n\t\t\tfmt.Printf(\"Stack: %s\", trace)\n\t\t\t\/\/\t\t\t\tlog.Error(\"Stack of %d bytes: %s\", count, trace)\n\t\t\t\/\/\t\t\t\tfmt.Println(\"Defer Panic in auth middleware:\", r)\n\t\t\tlogger.Error(\"Defer Panic in Gincrud PUT:\", \"err\", string(trace))\n\t\t\tc.JSON(500, gin.H{\"message\": \"Unable to edit item \"})\n\t\t\tc.Abort()\n\t\t}\n\t}()\n\tif marshalFn != nil {\n\t\tobj, err := marshalFn(c)\n\t\tif err != nil {\n\t\t\tif onError != nil {\n\t\t\t\tonError(ErrorCtx{Bucket: bucket}, err)\n\t\t\t}\n\t\t\tif e, ok := err.(JSONError); ok {\n\t\t\t\tresult := ErrorList{\"Malformed data\", e.Serialize()}\n\t\t\t\tc.JSON(400, result)\n\t\t\t} else {\n\t\t\t\tc.JSON(400, gin.H{\"msg\": err.Error()})\n\t\t\t}\n\n\t\t} else {\n\t\t\tdata, err := json.Marshal(obj)\n\t\t\tif err != nil {\n\t\t\t\tonError(ErrorCtx{Bucket: bucket}, err)\n\t\t\t} else {\n\t\t\t\tstore.Save([]byte([]byte(key)), data, bucket)\n\t\t\t\tif onSuccess != nil {\n\t\t\t\t\tctx := SuccessCtx{bucket, key, obj, c}\n\t\t\t\t\tonSuccess(ctx)\n\t\t\t\t}\n\t\t\t\tc.JSON(200, obj)\n\t\t\t}\n\t\t}\n\n\t} else {\n\t\tif b := c.Bind(record); b != false {\n\t\t\tm := structs.Map(record)\n\t\t\tdata, err := json.Marshal(&record)\n\t\t\tif err != nil {\n\t\t\t\tonError(ErrorCtx{Bucket: bucket}, err)\n\t\t\t\tc.JSON(500, gin.H{\"msg\": \"An error occured and this item could not be saved\"})\n\t\t\t} else {\n\t\t\t\tstore.Save([]byte([]byte(key)), data, bucket)\n\t\t\t\tm[\"key\"] = key\n\n\t\t\t\tif onSuccess != nil {\n\t\t\t\t\tctx := SuccessCtx{bucket, key, m, c}\n\t\t\t\t\tonSuccess(ctx)\n\t\t\t\t}\n\t\t\t\tc.JSON(200, m)\n\t\t\t}\n\t\t} else {\n\t\t\tc.JSON(400, gin.H{\"msg\": \"Seems like the data submitted is not formatted properly\"})\n\t\t}\n\t}\n}\n\nfunc Delete(key, bucket string, store gostore.Store, c *gin.Context, onSuccess OnSuccess, onError OnError) {\n\terr := store.Delete([]byte(key), bucket)\n\tif err != nil {\n\t\tif onError != nil {\n\t\t\tonError(ErrorCtx{bucket, key, c}, err)\n\t\t}\n\t\tc.JSON(500, gin.H{\"msg\": \"The item [\" + key + \"] was not deleted\"})\n\t} else {\n\t\tif onSuccess != nil {\n\t\t\tctx := SuccessCtx{Bucket: bucket, Key: key, GinCtx: c}\n\t\t\tonSuccess(ctx)\n\t\t}\n\t\tc.JSON(200, gin.H{\"msg\": \"The item [\" + key + \"] was deleted\"})\n\t}\n}\nGetAll now accepts an UnMarshalFnpackage gincrud\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/fatih\/structs\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/mgutz\/logxi\/v1\"\n\t\"github.com\/osiloke\/gostore\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"time\"\n)\n\nvar logger = log.New(\"gincrud\")\n\ntype ErrorList struct {\n\tMsg string `json:\"msg\"`\n\tError map[string]interface{} `json:\"error\"`\n}\n\ntype ErrorCtx struct {\n\tBucket string\n\tkey string\n\tGinCtx *gin.Context\n}\n\ntype SuccessCtx struct {\n\tBucket string\n\tKey string\n\tResult map[string]interface{}\n\tGinCtx *gin.Context\n}\n\ntype MarshalError struct {\n\tData map[string]interface{}\n}\n\ntype UnknownContent struct {\n\tS string `json:\"msg\"`\n}\n\nfunc (e *UnknownContent) Error() string {\n\treturn e.S\n}\n\ntype JSONError interface {\n\tSerialize() map[string]interface{} \/\/serialize error to json\n}\n\ntype ParsedContent map[string]interface{}\n\n\/\/Convert request json data to data and map, you can handle validation here\ntype MarshalFn func(ctx *gin.Context) (map[string]interface{}, error)\ntype UnMarshalFn func(*gin.Context, [][]byte) (map[string]interface{}, error)\n\n\/\/Get unique key from object and request\ntype GetKey func(interface{}, *gin.Context) string\n\n\/\/Called when a crud operation is successful\ntype OnSuccess func(ctx SuccessCtx) (string, error)\n\n\/\/Called when a crud operation fails\ntype OnError func(ctx interface{}, err error) error\n\ntype Results struct {\n\tData []map[string]interface{} `json:\"data\"`\n\tCount int `json:\"count,omitempty\"`\n\tTotalCount int `json:\"total_count,omitempty\"`\n}\n\nconst (\n\tFORM_CONTENT = \"form\"\n\tJSON_CONTENT = \"json\"\n\tXML_CONTENT = \"xml\"\n)\n\nfunc timeTrack(start time.Time, name string) {\n\telapsed := time.Since(start)\n\tlog.Debug(fmt.Sprintf(\"%s took %s\", name, elapsed))\n}\n\nfunc GetFunctionName(i interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()\n}\n\nfunc filterFlags(content string) string {\n\tfor i, a := range content {\n\t\tif a == ' ' || a == ';' {\n\t\t\treturn content[:i]\n\t\t}\n\t}\n\treturn content\n}\nfunc Decode(c *gin.Context, obj interface{}) error {\n\tctype := filterFlags(c.Request.Header.Get(\"Content-Type\"))\n\tswitch {\n\tcase c.Request.Method == \"GET\" || ctype == gin.MIMEPOSTForm:\n\t\treturn &UnknownContent{\"unimplemented content-type: \" + ctype}\n\tcase ctype == gin.MIMEJSON:\n\t\tdecoder := json.NewDecoder(c.Request.Body)\n\t\tif err := decoder.Decode(&obj); err == nil {\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\tcase ctype == gin.MIMEXML || ctype == gin.MIMEXML2:\n\t\treturn &UnknownContent{\"unimplemented content-type: \" + ctype}\n\tdefault:\n\t\terr := &UnknownContent{\"unknown content-type: \" + ctype}\n\t\treturn err\n\t}\n}\n\nfunc requestContent(c *gin.Context) (ParsedContent, error) {\n\tctype := filterFlags(c.Request.Header.Get(\"Content-Type\"))\n\tswitch {\n\tcase c.Request.Method == \"GET\" || ctype == gin.MIMEPOSTForm:\n\t\treturn nil, errors.New(\"Unimplemented content-type: \" + ctype)\n\tcase ctype == gin.MIMEJSON:\n\t\tvar obj ParsedContent\n\t\tdecoder := json.NewDecoder(c.Request.Body)\n\t\tif err := decoder.Decode(obj); err == nil {\n\t\t\treturn obj, err\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\tcase ctype == gin.MIMEXML || ctype == gin.MIMEXML2:\n\t\treturn nil, errors.New(\"Unimplemented content-type: \" + ctype)\n\tdefault:\n\t\terr := errors.New(\"unknown content-type: \" + ctype)\n\t\tc.Fail(400, err)\n\t\treturn nil, err\n\t}\n}\nfunc doSingleUnmarshal(bucket string, item [][]byte, c *gin.Context, unMarshalFn UnMarshalFn) (data map[string]interface{}, err error) {\n\tkey := string(item[0])\n\tdefer timeTrack(time.Now(), \"Do Single Unmarshal \"+key+\" from \"+bucket)\n\tdata, err = unMarshalFn(c, item)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata[\"key\"] = string(key)\n\treturn\n}\nfunc doUnmarshal(key, bucket string, data [][]byte, c *gin.Context, unMarshalFn UnMarshalFn, onSuccess OnSuccess, onError OnError) {\n\n\tdefer timeTrack(time.Now(), \"Do Unmarshal \"+key+\" from \"+bucket)\n\tm, err := unMarshalFn(c, data)\n\tif m == nil {\n\t\tm = make(map[string]interface{})\n\t}\n\tm[\"key\"] = string(data[0])\n\tif err != nil {\n\t\tc.JSON(500, err)\n\t} else {\n\t\tkk := string(data[0])\n\t\tif onSuccess != nil {\n\t\t\tctx := SuccessCtx{bucket, kk, m, c}\n\t\t\tonSuccess(ctx)\n\t\t}\n\t\tc.JSON(200, m)\n\t}\n}\nfunc Get(key, bucket string, store gostore.Store, c *gin.Context, record interface{},\n\tunMarshalFn UnMarshalFn, onSuccess OnSuccess, onError OnError) {\n\tdata, err := store.Get([]byte(key), bucket)\n\tif err != nil {\n\t\t\/\/TODO: Does not exist error for store\n\t\tif onError != nil {\n\t\t\tonError(ErrorCtx{bucket, key, c}, err)\n\t\t}\n\t\tc.JSON(404, gin.H{\"msg\": fmt.Sprintf(\"%s Not found\", key)})\n\t} else {\n\t\tif unMarshalFn != nil {\n\t\t\tdoUnmarshal(key, bucket, data, c, unMarshalFn, onSuccess, onError)\n\t\t} else {\n\t\t\t_ = json.Unmarshal(data[1], record)\n\t\t\tm := structs.Map(record)\n\t\t\tkk := string(data[0])\n\t\t\tm[\"key\"] = kk\n\t\t\tif onSuccess != nil {\n\t\t\t\tctx := SuccessCtx{bucket, kk, m, c}\n\t\t\t\tonSuccess(ctx)\n\t\t\t}\n\t\t\tc.JSON(200, m)\n\t\t}\n\t}\n}\n\n\/\/TODO: Extract core logic from each crud function i.e make doGetAll, doGet, ... they return data, err\nfunc GetAll(bucket string, store gostore.Store, c *gin.Context, unMarshalFn UnMarshalFn, onSuccess OnSuccess, onError OnError) {\n\tvar results []map[string]interface{}\n\tvar err error\n\n\tcount := 10\n\tq := c.Request.URL.Query()\n\tif val, ok := q[\"_perPage\"]; ok {\n\t\tcount, _ = strconv.Atoi(val[0])\n\t}\n\tvar data [][][]byte\n\n\tif val, ok := q[\"afterKey\"]; ok {\n\t\tdata, err = store.GetAllAfter([]byte(val[0]), count+1, 0, bucket)\n\t} else if val, ok := q[\"beforeKey\"]; ok {\n\t\tdata, err = store.GetAllBefore([]byte(val[0]), count+1, 0, bucket)\n\t} else {\n\t\tdata, err = store.GetAll(count+1, 0, bucket)\n\t}\n\tif err != nil {\n\t\tif onError != nil {\n\t\t\tonError(ErrorCtx{Bucket: bucket, GinCtx: c}, err)\n\t\t}\n\t\tc.JSON(200, []string{})\n\t} else {\n\t\tif unMarshalFn != nil {\n\t\t\tfor _, element := range data {\n\t\t\t\tdata, err := doSingleUnmarshal(bucket, element, c, unMarshalFn)\n\t\t\t\tif err == nil {\n\t\t\t\t\tresults = append(results, data)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, element := range data {\n\t\t\t\tvar result map[string]interface{}\n\t\t\t\tif err := json.Unmarshal(element[1], &result); err != nil {\n\t\t\t\t\tonError(ErrorCtx{Bucket: bucket, GinCtx: c}, err)\n\t\t\t\t\tc.JSON(500, gin.H{\"msg\": err})\n\t\t\t\t} else {\n\t\t\t\t\tif result == nil {\n\t\t\t\t\t\tresult = make(map[string]interface{})\n\t\t\t\t\t}\n\n\t\t\t\t\tresult[\"key\"] = string(element[0])\n\t\t\t\t\tresults = append(results, result)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(results) == 0 {\n\t\t\tc.JSON(200, []string{})\n\t\t} else {\n\t\t\tif onSuccess != nil {\n\t\t\t}\n\t\t\tstats, _ := store.Stats(bucket)\n\t\t\ttotal_count := stats[\"KeyN\"].(int)\n\t\t\tc.Writer.Header().Set(\"X-Total-Count\", fmt.Sprintf(\"%d\", total_count))\n\t\t\tc.JSON(200, Results{results, count, total_count})\n\t\t\t\/\/ c.JSON(200, results)\n\t\t}\n\t}\n}\n\nfunc Post(bucket string, store gostore.Store, c *gin.Context,\n\trecord interface{}, fn GetKey, marshalFn MarshalFn, onSuccess OnSuccess, onError OnError) {\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\ttrace := make([]byte, 1024)\n\t\t\truntime.Stack(trace, true)\n\t\t\tfmt.Printf(\"Stack: %s\", trace)\n\t\t\t\/\/\t\t\t\tlog.Error(\"Stack of %d bytes: %s\", count, trace)\n\t\t\t\/\/\t\t\t\tfmt.Println(\"Defer Panic in auth middleware:\", r)\n\t\t\tlogger.Error(\"POST:\", \"err\", string(trace))\n\t\t\tc.JSON(500, gin.H{\"message\": \"Unable to edit item \"})\n\t\t\tc.Abort()\n\t\t}\n\t}()\n\tif marshalFn != nil {\n\t\tlogger.Debug(\"Post\", \"bucket\", bucket, \"marshalfn\", GetFunctionName(marshalFn))\n\t\tobj, err := marshalFn(c)\n\t\tif err != nil {\n\t\t\tif onError != nil {\n\t\t\t\tonError(ErrorCtx{Bucket: bucket}, err)\n\t\t\t}\n\t\t\tif e, ok := err.(JSONError); ok {\n\t\t\t\tresult := ErrorList{\"Malformed data\", e.Serialize()}\n\t\t\t\tc.JSON(400, result)\n\t\t\t} else {\n\t\t\t\tc.JSON(400, gin.H{\"msg\": err})\n\t\t\t}\n\n\t\t} else {\n\t\t\tkey := fn(obj, c)\n\t\t\tif key == \"\" {\n\t\t\t\tc.JSON(500, err)\n\t\t\t} else {\n\t\t\t\tdata, err := json.Marshal(obj)\n\t\t\t\tif err != nil {\n\t\t\t\t\tonError(ErrorCtx{Bucket: bucket}, err)\n\t\t\t\t} else {\n\t\t\t\t\tstore.Save([]byte(key), data, bucket)\n\t\t\t\t\tif onSuccess != nil {\n\n\t\t\t\t\t\tlogger.Debug(\"onSuccess\", \"bucket\", bucket, \"key\", key, \"onSuccess\", GetFunctionName(onSuccess))\n\t\t\t\t\t\tctx := SuccessCtx{bucket, key, obj, c}\n\t\t\t\t\t\tonSuccess(ctx)\n\t\t\t\t\t}\n\t\t\t\t\tobj[\"key\"] = key\n\t\t\t\t\tc.JSON(200, obj)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t} else {\n\t\tif b := c.Bind(record); b != false {\n\t\t\tm := structs.Map(record)\n\t\t\tdata, err := json.Marshal(&record)\n\t\t\tkey := fn(m, c)\n\t\t\tif err != nil {\n\t\t\t\tonError(ErrorCtx{Bucket: bucket}, err)\n\t\t\t\tc.JSON(500, gin.H{\"msg\": \"An error occured and this item could not be saved\"})\n\t\t\t} else {\n\t\t\t\tstore.Save([]byte([]byte(key)), data, bucket)\n\t\t\t\tm[\"key\"] = key\n\t\t\t\tlogger.Debug(\"Successfully saved object\", \"bucket\", bucket, \"key\", key)\n\n\t\t\t\tif onSuccess != nil {\n\t\t\t\t\tctx := SuccessCtx{bucket, key, m, c}\n\t\t\t\t\tonSuccess(ctx)\n\t\t\t\t}\n\t\t\t\tc.JSON(200, m)\n\t\t\t}\n\t\t} else {\n\t\t\tc.JSON(400, gin.H{\"msg\": \"Seems like the data submitted is not formatted properly\"})\n\t\t}\n\t}\n}\n\nfunc Put(key, bucket string, store gostore.Store, c *gin.Context, record interface{},\n\tmarshalFn MarshalFn, onSuccess OnSuccess, onError OnError) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\ttrace := make([]byte, 1024)\n\t\t\truntime.Stack(trace, true)\n\t\t\tfmt.Printf(\"Stack: %s\", trace)\n\t\t\t\/\/\t\t\t\tlog.Error(\"Stack of %d bytes: %s\", count, trace)\n\t\t\t\/\/\t\t\t\tfmt.Println(\"Defer Panic in auth middleware:\", r)\n\t\t\tlogger.Error(\"Defer Panic in Gincrud PUT:\", \"err\", string(trace))\n\t\t\tc.JSON(500, gin.H{\"message\": \"Unable to edit item \"})\n\t\t\tc.Abort()\n\t\t}\n\t}()\n\tif marshalFn != nil {\n\t\tobj, err := marshalFn(c)\n\t\tif err != nil {\n\t\t\tif onError != nil {\n\t\t\t\tonError(ErrorCtx{Bucket: bucket}, err)\n\t\t\t}\n\t\t\tif e, ok := err.(JSONError); ok {\n\t\t\t\tresult := ErrorList{\"Malformed data\", e.Serialize()}\n\t\t\t\tc.JSON(400, result)\n\t\t\t} else {\n\t\t\t\tc.JSON(400, gin.H{\"msg\": err.Error()})\n\t\t\t}\n\n\t\t} else {\n\t\t\tdata, err := json.Marshal(obj)\n\t\t\tif err != nil {\n\t\t\t\tonError(ErrorCtx{Bucket: bucket}, err)\n\t\t\t} else {\n\t\t\t\tstore.Save([]byte([]byte(key)), data, bucket)\n\t\t\t\tif onSuccess != nil {\n\t\t\t\t\tctx := SuccessCtx{bucket, key, obj, c}\n\t\t\t\t\tonSuccess(ctx)\n\t\t\t\t}\n\t\t\t\tc.JSON(200, obj)\n\t\t\t}\n\t\t}\n\n\t} else {\n\t\tif b := c.Bind(record); b != false {\n\t\t\tm := structs.Map(record)\n\t\t\tdata, err := json.Marshal(&record)\n\t\t\tif err != nil {\n\t\t\t\tonError(ErrorCtx{Bucket: bucket}, err)\n\t\t\t\tc.JSON(500, gin.H{\"msg\": \"An error occured and this item could not be saved\"})\n\t\t\t} else {\n\t\t\t\tstore.Save([]byte([]byte(key)), data, bucket)\n\t\t\t\tm[\"key\"] = key\n\n\t\t\t\tif onSuccess != nil {\n\t\t\t\t\tctx := SuccessCtx{bucket, key, m, c}\n\t\t\t\t\tonSuccess(ctx)\n\t\t\t\t}\n\t\t\t\tc.JSON(200, m)\n\t\t\t}\n\t\t} else {\n\t\t\tc.JSON(400, gin.H{\"msg\": \"Seems like the data submitted is not formatted properly\"})\n\t\t}\n\t}\n}\n\nfunc Delete(key, bucket string, store gostore.Store, c *gin.Context, onSuccess OnSuccess, onError OnError) {\n\terr := store.Delete([]byte(key), bucket)\n\tif err != nil {\n\t\tif onError != nil {\n\t\t\tonError(ErrorCtx{bucket, key, c}, err)\n\t\t}\n\t\tc.JSON(500, gin.H{\"msg\": \"The item [\" + key + \"] was not deleted\"})\n\t} else {\n\t\tif onSuccess != nil {\n\t\t\tctx := SuccessCtx{Bucket: bucket, Key: key, GinCtx: c}\n\t\t\tonSuccess(ctx)\n\t\t}\n\t\tc.JSON(200, gin.H{\"msg\": \"The item [\" + key + \"] was deleted\"})\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/md5\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/golang\/groupcache\"\n\t\"github.com\/gorilla\/mux\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\t\"vip\/fetch\"\n)\n\ntype UploadResponse struct {\n\tUrl string `json:\"url\"`\n}\n\ntype verifyAuth func(http.ResponseWriter, *http.Request)\n\nfunc (h verifyAuth) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Enable cross-origin requests\n\tif domain := os.Getenv(\"ALLOWED_ORIGIN\"); domain != \"\" {\n\t\tif origin := r.Header.Get(\"Origin\"); origin == domain {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS, PUT, DELETE\")\n\t\t\tw.Header().Set(\"Access-Control-Allow-Headers\",\n\t\t\t\t\"Accept, Content-Type, Content-Length, Accept-Encoding, X-Vip-Token, Authorization\")\n\t\t}\n\t} else {\n\t\tauth := r.Header.Get(\"X-Vip-Token\")\n\t\tif auth != authToken {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\t\n\t}\n\n\tif r.Method == \"OPTIONS\" {\n\t\treturn\n\t}\n\n\th(w, r)\n}\n\nfunc fileKey(bucket string) string {\n\tseed := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tkey := fmt.Sprintf(\"%d-%s-%d\", seed.Int63(), bucket, time.Now().UnixNano())\n\n\thash := md5.New()\n\tio.WriteString(hash, key)\n\treturn fmt.Sprintf(\"%x\", hash.Sum(nil))\n}\n\nfunc handleImageRequest(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t}\n\n\tw.Header().Set(\"Cache-Control\", \"public, max-age=31536000\")\n\n\t\/\/ Client is checking for a cached URI, assume it is valid\n\t\/\/ and return a 304\n\tif r.Header.Get(\"If-Modified-Since\") != \"\" {\n\t\tw.WriteHeader(http.StatusNotModified)\n\t\treturn\n\t}\n\n\tgc := fetch.RequestContext(r)\n\n\tvar data []byte\n\terr := cache.Get(gc, gc.CacheKey(), groupcache.AllocatingByteSliceSink(&data))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\n\tw.Header().Set(\"Content-Type\", http.DetectContentType(data))\n\thttp.ServeContent(w, r, gc.ImageId, time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), bytes.NewReader(data))\n}\n\nfunc handleUpload(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tvars := mux.Vars(r)\n\tbucket := vars[\"bucket_id\"]\n\n\t\/\/ Set a hard 5mb limit on files\n\tif r.ContentLength > 5<<20 {\n\t\tw.WriteHeader(http.StatusRequestEntityTooLarge)\n\t\treturn\n\t}\n\n\tkey := fileKey(bucket)\n\terr := storage.PutReader(bucket, key, r.Body,\n\t\tr.ContentLength, r.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\turi := r.URL\n\n\tif r.URL.Host == \"\" {\n\t\turi.Host = os.Getenv(\"URI_HOSTNAME\")\n\t\tif *secure {\n\t\t\turi.Scheme = \"https\"\n\t\t} else {\n\t\t\turi.Scheme = \"http\"\n\t\t}\n\t}\n\n\turi.Path = fmt.Sprintf(\"%s\/%s\", bucket, key)\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\n\tw.WriteHeader(http.StatusCreated)\n\tjson.NewEncoder(w).Encode(UploadResponse{\n\t\tUrl: uri.String(),\n\t})\n}\n\nfunc handlePing(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tfmt.Fprintf(w, \"pong\")\n}\nAdd error message for 413 responsepackage main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/md5\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/golang\/groupcache\"\n\t\"github.com\/gorilla\/mux\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\t\"vip\/fetch\"\n)\n\ntype UploadResponse struct {\n\tUrl string `json:\"url\"`\n}\n\ntype verifyAuth func(http.ResponseWriter, *http.Request)\n\nfunc (h verifyAuth) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Enable cross-origin requests\n\tif domain := os.Getenv(\"ALLOWED_ORIGIN\"); domain != \"\" {\n\t\tif origin := r.Header.Get(\"Origin\"); origin == domain {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS, PUT, DELETE\")\n\t\t\tw.Header().Set(\"Access-Control-Allow-Headers\",\n\t\t\t\t\"Accept, Content-Type, Content-Length, Accept-Encoding, X-Vip-Token, Authorization\")\n\t\t}\n\t} else {\n\t\tauth := r.Header.Get(\"X-Vip-Token\")\n\t\tif auth != authToken {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif r.Method == \"OPTIONS\" {\n\t\treturn\n\t}\n\n\th(w, r)\n}\n\nfunc fileKey(bucket string) string {\n\tseed := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tkey := fmt.Sprintf(\"%d-%s-%d\", seed.Int63(), bucket, time.Now().UnixNano())\n\n\thash := md5.New()\n\tio.WriteString(hash, key)\n\treturn fmt.Sprintf(\"%x\", hash.Sum(nil))\n}\n\nfunc handleImageRequest(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t}\n\n\tw.Header().Set(\"Cache-Control\", \"public, max-age=31536000\")\n\n\t\/\/ Client is checking for a cached URI, assume it is valid\n\t\/\/ and return a 304\n\tif r.Header.Get(\"If-Modified-Since\") != \"\" {\n\t\tw.WriteHeader(http.StatusNotModified)\n\t\treturn\n\t}\n\n\tgc := fetch.RequestContext(r)\n\n\tvar data []byte\n\terr := cache.Get(gc, gc.CacheKey(), groupcache.AllocatingByteSliceSink(&data))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\n\tw.Header().Set(\"Content-Type\", http.DetectContentType(data))\n\thttp.ServeContent(w, r, gc.ImageId, time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), bytes.NewReader(data))\n}\n\nfunc handleUpload(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tvars := mux.Vars(r)\n\tbucket := vars[\"bucket_id\"]\n\n\t\/\/ Set a hard 5mb limit on files\n\tlimit := 5\n\tif r.ContentLength > limit<<20 {\n\t\terrMsg := fmt.Printf(\"The file size limit is %dMB.\\n\", limit)\n\t\thttp.Error(w, errMsg, http.StatusRequestEntityTooLarge)\n\t\treturn\n\t}\n\n\tkey := fileKey(bucket)\n\terr := storage.PutReader(bucket, key, r.Body,\n\t\tr.ContentLength, r.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\turi := r.URL\n\n\tif r.URL.Host == \"\" {\n\t\turi.Host = os.Getenv(\"URI_HOSTNAME\")\n\t\tif *secure {\n\t\t\turi.Scheme = \"https\"\n\t\t} else {\n\t\t\turi.Scheme = \"http\"\n\t\t}\n\t}\n\n\turi.Path = fmt.Sprintf(\"%s\/%s\", bucket, key)\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\n\tw.WriteHeader(http.StatusCreated)\n\tjson.NewEncoder(w).Encode(UploadResponse{\n\t\tUrl: uri.String(),\n\t})\n}\n\nfunc handlePing(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tfmt.Fprintf(w, \"pong\")\n}\n<|endoftext|>"} {"text":"\/*\nPackage log provides a handler that logs each request\/response (time, duration, status, method, path).\n\nThe log formatting can either be couloured or not.\n\nMake sure to include this handler above any other handler to get accurate performance logs.\n*\/\npackage log\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nconst (\n\tcReset = \"\\033[0m\"\n\tcDim = \"\\033[2m\"\n\tcRed = \"\\033[31m\"\n\tcGreen = \"\\033[32m\"\n\tcBlue = \"\\033[34m\"\n\tcCyan = \"\\033[36m\"\n\tcWhite = \"\\033[97m\"\n\tcBgRed = \"\\033[41m\"\n\tcBgGreen = \"\\033[42m\"\n\tcBgYellow = \"\\033[43m\"\n\tcBgCyan = \"\\033[46m\"\n)\n\n\/\/ A handler provides a request\/response logging handler.\ntype handler struct {\n\toptions *Options\n\tnext http.Handler\n}\n\n\/\/ Options provides the handler options.\ntype Options struct {\n\tColor bool \/\/ Colors triggers a coloured formatting compatible with Unix-based terminals.\n}\n\n\/\/ Handle returns a Handler wrapping another http.Handler.\nfunc Handle(h http.Handler, o *Options) http.Handler {\n\treturn &handler{o, h}\n}\n\n\/\/ HandleFunc returns a Handler wrapping an http.HandlerFunc.\nfunc HandleFunc(f http.HandlerFunc, o *Options) http.Handler {\n\treturn Handle(f, o)\n}\n\nfunc (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tstart := time.Now()\n\tlw := &logWriter{\n\t\tResponseWriter: w,\n\t}\n\t\/\/ Keep originals in case the response will be altered.\n\tmethod := r.Method\n\tpath := r.URL.Path\n\n\tdefer func() {\n\t\tif lw.status == 0 {\n\t\t\tlw.status = http.StatusOK\n\t\t}\n\n\t\tif h.options == nil || !h.options.Color {\n\t\t\tlog.Printf(\"%s %s ▶︎ %d @ %s\", method, path, lw.status, time.Since(start))\n\t\t\treturn\n\t\t}\n\n\t\tvar cBgStatus string\n\t\tswitch {\n\t\tcase lw.status >= 200 && lw.status <= 299:\n\t\t\tcBgStatus += cBgGreen\n\t\tcase lw.status >= 300 && lw.status <= 399:\n\t\t\tcBgStatus += cBgCyan\n\t\tcase lw.status >= 400 && lw.status <= 499:\n\t\t\tcBgStatus += cBgYellow\n\t\tdefault:\n\t\t\tcBgStatus += cBgRed\n\t\t}\n\n\t\tvar cMethod string\n\t\tswitch method {\n\t\tcase \"GET\":\n\t\t\tcMethod += cGreen\n\t\tcase \"POST\":\n\t\t\tcMethod += cCyan\n\t\tcase \"PUT\", \"PATCH\":\n\t\t\tcMethod += cBlue\n\t\tcase \"DELETE\":\n\t\t\tcMethod += cRed\n\t\t}\n\n\t\tlog.Printf(\"%s %s%13s%s %s%s %3d %s %s%s%s %s%s%s\", cReset, cDim, time.Since(start), cReset, cWhite, cBgStatus, lw.status, cReset, cMethod, method, cReset, cDim, path, cReset)\n\t}()\n\n\th.next.ServeHTTP(lw, r)\n}\n\n\/\/ logWriter catches the status code from WriteHeader.\ntype logWriter struct {\n\thttp.ResponseWriter\n\tstatus int\n}\n\nfunc (lw *logWriter) WriteHeader(status int) {\n\tif lw.status == 0 {\n\t\tlw.status = status\n\t}\n\tlw.ResponseWriter.WriteHeader(status)\n}\nImplement CloseNotify, Flush, Hijack and Push\/*\nPackage log provides a handler that logs each request\/response (time, duration, status, method, path).\n\nThe log formatting can either be couloured or not.\n\nMake sure to include this handler above any other handler to get accurate performance logs.\n*\/\npackage log\n\nimport (\n\t\"bufio\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nconst (\n\tcReset = \"\\033[0m\"\n\tcDim = \"\\033[2m\"\n\tcRed = \"\\033[31m\"\n\tcGreen = \"\\033[32m\"\n\tcBlue = \"\\033[34m\"\n\tcCyan = \"\\033[36m\"\n\tcWhite = \"\\033[97m\"\n\tcBgRed = \"\\033[41m\"\n\tcBgGreen = \"\\033[42m\"\n\tcBgYellow = \"\\033[43m\"\n\tcBgCyan = \"\\033[46m\"\n)\n\n\/\/ A handler provides a request\/response logging handler.\ntype handler struct {\n\toptions *Options\n\tnext http.Handler\n}\n\n\/\/ Options provides the handler options.\ntype Options struct {\n\tColor bool \/\/ Colors triggers a coloured formatting compatible with Unix-based terminals.\n}\n\n\/\/ Handle returns a Handler wrapping another http.Handler.\nfunc Handle(h http.Handler, o *Options) http.Handler {\n\treturn &handler{o, h}\n}\n\n\/\/ HandleFunc returns a Handler wrapping an http.HandlerFunc.\nfunc HandleFunc(f http.HandlerFunc, o *Options) http.Handler {\n\treturn Handle(f, o)\n}\n\nfunc (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tstart := time.Now()\n\tlw := &logWriter{\n\t\tResponseWriter: w,\n\t}\n\t\/\/ Keep originals in case the response will be altered.\n\tmethod := r.Method\n\tpath := r.URL.Path\n\n\tdefer func() {\n\t\tif lw.status == 0 {\n\t\t\tlw.status = http.StatusOK\n\t\t}\n\n\t\tif h.options == nil || !h.options.Color {\n\t\t\tlog.Printf(\"%s %s ▶︎ %d @ %s\", method, path, lw.status, time.Since(start))\n\t\t\treturn\n\t\t}\n\n\t\tvar cBgStatus string\n\t\tswitch {\n\t\tcase lw.status >= 200 && lw.status <= 299:\n\t\t\tcBgStatus += cBgGreen\n\t\tcase lw.status >= 300 && lw.status <= 399:\n\t\t\tcBgStatus += cBgCyan\n\t\tcase lw.status >= 400 && lw.status <= 499:\n\t\t\tcBgStatus += cBgYellow\n\t\tdefault:\n\t\t\tcBgStatus += cBgRed\n\t\t}\n\n\t\tvar cMethod string\n\t\tswitch method {\n\t\tcase \"GET\":\n\t\t\tcMethod += cGreen\n\t\tcase \"POST\":\n\t\t\tcMethod += cCyan\n\t\tcase \"PUT\", \"PATCH\":\n\t\t\tcMethod += cBlue\n\t\tcase \"DELETE\":\n\t\t\tcMethod += cRed\n\t\t}\n\n\t\tlog.Printf(\"%s %s%13s%s %s%s %3d %s %s%s%s %s%s%s\", cReset, cDim, time.Since(start), cReset, cWhite, cBgStatus, lw.status, cReset, cMethod, method, cReset, cDim, path, cReset)\n\t}()\n\n\th.next.ServeHTTP(lw, r)\n}\n\n\/\/ logWriter catches the status code from WriteHeader.\ntype logWriter struct {\n\thttp.ResponseWriter\n\tstatus int\n}\n\nfunc (lw *logWriter) WriteHeader(status int) {\n\tif lw.status == 0 {\n\t\tlw.status = status\n\t}\n\tlw.ResponseWriter.WriteHeader(status)\n}\n\n\/\/ CloseNotify implements the http.CloseNotifier interface.\n\/\/ No channel is returned if CloseNotify is not implemented by an upstream response writer.\nfunc (lw *logWriter) CloseNotify() <-chan bool {\n\tn, ok := lw.ResponseWriter.(http.CloseNotifier)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn n.CloseNotify()\n}\n\n\/\/ Flush implements the http.Flusher interface.\n\/\/ Nothing is done if Flush is not implemented by an upstream response writer.\nfunc (lw *logWriter) Flush() {\n\tf, ok := lw.ResponseWriter.(http.Flusher)\n\tif ok {\n\t\tf.Flush()\n\t}\n}\n\n\/\/ Hijack implements the http.Hijacker interface.\n\/\/ Error http.ErrNotSupported is returned if Hijack is not implemented by an upstream response writer.\nfunc (lw *logWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\th, ok := lw.ResponseWriter.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, nil, http.ErrNotSupported\n\t}\n\treturn h.Hijack()\n}\n\n\/\/ Push implements the http.Pusher interface.\n\/\/ http.ErrNotSupported is returned if Push is not implemented by an upstream response writer or not supported by the client.\nfunc (lw *logWriter) Push(target string, opts *http.PushOptions) error {\n\tp, ok := lw.ResponseWriter.(http.Pusher)\n\tif !ok {\n\t\treturn http.ErrNotSupported\n\t}\n\treturn p.Push(target, opts)\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"go\/model\"\n\t\"golang.org\/x\/net\/websocket\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype reqHandler struct {\n\t*Context\n\tFn func(*Context, http.ResponseWriter, *http.Request) (int, error)\n}\n\n\/\/ ServeHTTP is called on a reqHandler by net\/http; Satisfies http.Handler\nfunc (h reqHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tstatus, err := h.Fn(h.Context, w, r)\n\tif err != nil {\n\t\tswitch status {\n\t\tcase http.StatusNotFound:\n\t\t\thttp.NotFound(w, r)\n\t\tcase http.StatusBadRequest:\n\t\t\thttp.Error(w, err.Error(), status)\n\t\tdefault:\n\t\t\tstatus = http.StatusInternalServerError\n\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t}\n\t}\n\tlog.Printf(\"%s %s %s %d\", strings.Split(r.RemoteAddr, \":\")[0], r.Method, r.URL.Path, status)\n}\n\n\/\/ Renders the home and about templates\nfunc rootHandler(c *Context, w http.ResponseWriter, r *http.Request) (int, error) {\n\tswitch r.URL.Path {\n\tcase \"\/\":\n\t\treturn http.StatusOK, renderTemplate(c, w, \"home\", nil)\n\tcase \"\/about\":\n\t\treturn http.StatusOK, renderTemplate(c, w, \"about\", nil)\n\tdefault:\n\t\treturn http.StatusNotFound, errors.New(\"handler: page not found\")\n\t}\n}\n\n\/\/ Renders the game template\nfunc gameHandler(c *Context, w http.ResponseWriter, r *http.Request) (int, error) {\n\tif r.Method == \"POST\" {\n\t\tsize, _ := strconv.Atoi(r.FormValue(\"size\"))\n\t\tvar black, white string\n\t\tif r.FormValue(\"color\") == \"black\" {\n\t\t\tblack = r.FormValue(\"player_1\")\n\t\t\twhite = r.FormValue(\"player_2\")\n\t\t} else {\n\t\t\tblack = r.FormValue(\"player_2\")\n\t\t\twhite = r.FormValue(\"player_1\")\n\t\t}\n\t\tgame, err := model.New(black, white, size)\n\t\tif err != nil {\n\t\t\treturn http.StatusBadRequest, err\n\t\t}\n\t\thttp.Redirect(w, r, \"\/game\/\"+game.Id, 303)\n\t\treturn http.StatusSeeOther, nil\n\t} else {\n\t\tid := r.URL.Path[6:]\n\t\tgame, err := model.Load(id)\n\t\tif err != nil {\n\t\t\treturn http.StatusNotFound, err\n\t\t}\n\t\tif r.Method == \"PATCH\" {\n\t\t\tx, _ := strconv.Atoi(r.FormValue(\"x\"))\n\t\t\ty, _ := strconv.Atoi(r.FormValue(\"y\"))\n\t\t\terr = game.Move(x, y)\n\t\t\tif err != nil {\n\t\t\t\treturn http.StatusBadRequest, err\n\t\t\t}\n\t\t\treturn http.StatusOK, nil\n\t\t} else {\n\t\t\treturn http.StatusOK, renderTemplate(c, w, \"game\", game)\n\t\t}\n\t}\n}\n\n\/\/ Sends game updates to a WebSocket connection\nfunc liveHandler(ws *websocket.Conn) {\n\tr := ws.Request()\n\tlog.Printf(\"%s %s %s websocket\", strings.Split(r.RemoteAddr, \":\")[0], r.Method, r.URL.Path)\n\n\tid := r.URL.Path[11:]\n\n\tmodel.Subscribe(id, func(g *model.Game) {\n\t\tlog.Printf(\"Sending WebSocket message for game %s\", g.Id)\n\t\terr := json.NewEncoder(ws).Encode(g)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(err.Error())\n\t\t}\n\t})\n}\nUse full import path for internal packagespackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"github.com\/waits\/go\/model\"\n\t\"golang.org\/x\/net\/websocket\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype reqHandler struct {\n\t*Context\n\tFn func(*Context, http.ResponseWriter, *http.Request) (int, error)\n}\n\n\/\/ ServeHTTP is called on a reqHandler by net\/http; Satisfies http.Handler\nfunc (h reqHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tstatus, err := h.Fn(h.Context, w, r)\n\tif err != nil {\n\t\tswitch status {\n\t\tcase http.StatusNotFound:\n\t\t\thttp.NotFound(w, r)\n\t\tcase http.StatusBadRequest:\n\t\t\thttp.Error(w, err.Error(), status)\n\t\tdefault:\n\t\t\tstatus = http.StatusInternalServerError\n\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t}\n\t}\n\tlog.Printf(\"%s %s %s %d\", strings.Split(r.RemoteAddr, \":\")[0], r.Method, r.URL.Path, status)\n}\n\n\/\/ Renders the home and about templates\nfunc rootHandler(c *Context, w http.ResponseWriter, r *http.Request) (int, error) {\n\tswitch r.URL.Path {\n\tcase \"\/\":\n\t\treturn http.StatusOK, renderTemplate(c, w, \"home\", nil)\n\tcase \"\/about\":\n\t\treturn http.StatusOK, renderTemplate(c, w, \"about\", nil)\n\tdefault:\n\t\treturn http.StatusNotFound, errors.New(\"handler: page not found\")\n\t}\n}\n\n\/\/ Renders the game template\nfunc gameHandler(c *Context, w http.ResponseWriter, r *http.Request) (int, error) {\n\tif r.Method == \"POST\" {\n\t\tsize, _ := strconv.Atoi(r.FormValue(\"size\"))\n\t\tvar black, white string\n\t\tif r.FormValue(\"color\") == \"black\" {\n\t\t\tblack = r.FormValue(\"player_1\")\n\t\t\twhite = r.FormValue(\"player_2\")\n\t\t} else {\n\t\t\tblack = r.FormValue(\"player_2\")\n\t\t\twhite = r.FormValue(\"player_1\")\n\t\t}\n\t\tgame, err := model.New(black, white, size)\n\t\tif err != nil {\n\t\t\treturn http.StatusBadRequest, err\n\t\t}\n\t\thttp.Redirect(w, r, \"\/game\/\"+game.Id, 303)\n\t\treturn http.StatusSeeOther, nil\n\t} else {\n\t\tid := r.URL.Path[6:]\n\t\tgame, err := model.Load(id)\n\t\tif err != nil {\n\t\t\treturn http.StatusNotFound, err\n\t\t}\n\t\tif r.Method == \"PATCH\" {\n\t\t\tx, _ := strconv.Atoi(r.FormValue(\"x\"))\n\t\t\ty, _ := strconv.Atoi(r.FormValue(\"y\"))\n\t\t\terr = game.Move(x, y)\n\t\t\tif err != nil {\n\t\t\t\treturn http.StatusBadRequest, err\n\t\t\t}\n\t\t\treturn http.StatusOK, nil\n\t\t} else {\n\t\t\treturn http.StatusOK, renderTemplate(c, w, \"game\", game)\n\t\t}\n\t}\n}\n\n\/\/ Sends game updates to a WebSocket connection\nfunc liveHandler(ws *websocket.Conn) {\n\tr := ws.Request()\n\tlog.Printf(\"%s %s %s websocket\", strings.Split(r.RemoteAddr, \":\")[0], r.Method, r.URL.Path)\n\n\tid := r.URL.Path[11:]\n\n\tmodel.Subscribe(id, func(g *model.Game) {\n\t\tlog.Printf(\"Sending WebSocket message for game %s\", g.Id)\n\t\terr := json.NewEncoder(ws).Encode(g)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(err.Error())\n\t\t}\n\t})\n}\n<|endoftext|>"} {"text":"\/\/ Package nosurf implements an HTTP handler that\n\/\/ mitigates Cross-Site Request Forgery Attacks.\npackage nosurf\n\nimport (\n\t\"net\/http\"\n\t\"regexp\"\n)\n\nconst (\n\t\/\/ the name of CSRF cookie\n\tCookieName = \"csrf_token\"\n\t\/\/ the name of the form field\n\tFormFieldName = \"csrf_token\"\n\t\/\/ the name of CSRF header\n\tHeaderName = \"X-CSRF-Token\"\n\t\/\/ the HTTP status code for the default failure handler\n\tFailureCode = 400\n\n\t\/\/ Max-Age for the default base cookie. 365 days.\n\tDefaultMaxAge = 365 * 24 * 60 * 60\n)\n\nvar safeMethods = []string{\"GET\", \"HEAD\", \"OPTIONS\", \"TRACE\"}\n\ntype CSRFHandler struct {\n\t\/\/ Handlers that CSRFHandler wraps.\n\tsuccessHandler http.Handler\n\tfailureHandler http.Handler\n\n\t\/\/ The base cookie that CSRF cookies will be built upon.\n\t\/\/ This should be a better solution of customizing the options\n\t\/\/ than a bunch of methods SetCookieExpiration(), etc.\n\tbaseCookie http.Cookie\n\n\t\/\/ Slices of URLs that are exempt from CSRF checks.\n\t\/\/ They can be specified by...\n\t\/\/ ...an exact URL\n\texemptPaths []string\n\t\/\/ ...a glob (as used by path.Match())\n\texemptGlobs []string\n\t\/\/ ...a regexp.\n\texemptRegexps []*regexp.Regexp\n\n\t\/\/ All of those will be matched against Request.URL.Path,\n\t\/\/ So they should take the leading slash into account\n}\n\nfunc defaultFailureHandler(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(FailureCode)\n}\n\n\/\/ Constructs a new CSRFHandler that calls\n\/\/ the specified handler if the CSRF check succeeds.\nfunc New(handler http.Handler) *CSRFHandler {\n\tbaseCookie := http.Cookie{}\n\tbaseCookie.MaxAge = DefaultMaxAge\n\n\tcsrf := &CSRFHandler{successHandler: handler,\n\t\tfailureHandler: http.HandlerFunc(defaultFailureHandler),\n\t\texemptPaths: make([]string, 0),\n\t\texemptGlobs: make([]string, 0),\n\t\texemptRegexps: make([]*regexp.Regexp, 0),\n\t\tbaseCookie: baseCookie,\n\t}\n\n\treturn csrf\n}\n\nfunc (h *CSRFHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Prefer the header over form value\n\tsent_token := r.Header.Get(HeaderName)\n\tif sent_token == \"\" {\n\t\tsent_token = r.PostFormValue(FormFieldName)\n\t}\n\n\ttoken_cookie, err := r.Cookie(CookieName)\n\treal_token := \"\"\n\tif err == http.ErrNoCookie {\n\t\treal_token = h.RegenerateToken(w, r)\n\t} else {\n\t\treal_token = token_cookie.Value\n\t}\n\t\/\/ If the length of the real token isn't what it should be,\n\t\/\/ it has either been tampered with,\n\t\/\/ or we're migrating onto a new algorithm for generating tokens.\n\t\/\/ In any case of those, we should regenerate it.\n\tif len(real_token) != tokenLength {\n\t\treal_token = h.RegenerateToken(w, r)\n\t}\n\n\t\/\/ clear the context after the request is served\n\tdefer ctxClear(r)\n\tctxSetToken(r, real_token)\n\n\tif sContains(safeMethods, r.Method) {\n\t\t\/\/ short-circuit with a success for safe methods\n\t\th.handleSuccess(w, r)\n\t\treturn\n\t}\n}\n\nfunc (h *CSRFHandler) handleSuccess(w http.ResponseWriter, r *http.Request) {\n\th.successHandler.ServeHTTP(w, r)\n}\n\n\/\/ Generates a new token, sets it on the given request and returns it\nfunc (h *CSRFHandler) RegenerateToken(w http.ResponseWriter, r *http.Request) string {\n\ttoken := generateToken()\n\n\tcookie := h.baseCookie\n\tcookie.Name = CookieName\n\tcookie.Value = token\n\n\thttp.SetCookie(w, &cookie)\n\n\tctxSetToken(r, token)\n\n\treturn token\n}\n\n\/\/ Sets the handler to call in case the CSRF check\n\/\/ fails. By default it's defaultFailureHandler.\nfunc (h *CSRFHandler) SetFailureHandler(handler http.Handler) {\n\th.failureHandler = handler\n}\n\n\/\/ Sets the base cookie to use when building a CSRF token cookie\n\/\/ This way you can specify the Domain, Path, HttpOnly, Secure, etc.\nfunc (h *CSRFHandler) SetBaseCookie(cookie http.Cookie) {\n\th.baseCookie = cookie\n}\nhandleFailure()\/\/ Package nosurf implements an HTTP handler that\n\/\/ mitigates Cross-Site Request Forgery Attacks.\npackage nosurf\n\nimport (\n\t\"net\/http\"\n\t\"regexp\"\n)\n\nconst (\n\t\/\/ the name of CSRF cookie\n\tCookieName = \"csrf_token\"\n\t\/\/ the name of the form field\n\tFormFieldName = \"csrf_token\"\n\t\/\/ the name of CSRF header\n\tHeaderName = \"X-CSRF-Token\"\n\t\/\/ the HTTP status code for the default failure handler\n\tFailureCode = 400\n\n\t\/\/ Max-Age for the default base cookie. 365 days.\n\tDefaultMaxAge = 365 * 24 * 60 * 60\n)\n\nvar safeMethods = []string{\"GET\", \"HEAD\", \"OPTIONS\", \"TRACE\"}\n\ntype CSRFHandler struct {\n\t\/\/ Handlers that CSRFHandler wraps.\n\tsuccessHandler http.Handler\n\tfailureHandler http.Handler\n\n\t\/\/ The base cookie that CSRF cookies will be built upon.\n\t\/\/ This should be a better solution of customizing the options\n\t\/\/ than a bunch of methods SetCookieExpiration(), etc.\n\tbaseCookie http.Cookie\n\n\t\/\/ Slices of URLs that are exempt from CSRF checks.\n\t\/\/ They can be specified by...\n\t\/\/ ...an exact URL\n\texemptPaths []string\n\t\/\/ ...a glob (as used by path.Match())\n\texemptGlobs []string\n\t\/\/ ...a regexp.\n\texemptRegexps []*regexp.Regexp\n\n\t\/\/ All of those will be matched against Request.URL.Path,\n\t\/\/ So they should take the leading slash into account\n}\n\nfunc defaultFailureHandler(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(FailureCode)\n}\n\n\/\/ Constructs a new CSRFHandler that calls\n\/\/ the specified handler if the CSRF check succeeds.\nfunc New(handler http.Handler) *CSRFHandler {\n\tbaseCookie := http.Cookie{}\n\tbaseCookie.MaxAge = DefaultMaxAge\n\n\tcsrf := &CSRFHandler{successHandler: handler,\n\t\tfailureHandler: http.HandlerFunc(defaultFailureHandler),\n\t\texemptPaths: make([]string, 0),\n\t\texemptGlobs: make([]string, 0),\n\t\texemptRegexps: make([]*regexp.Regexp, 0),\n\t\tbaseCookie: baseCookie,\n\t}\n\n\treturn csrf\n}\n\nfunc (h *CSRFHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Prefer the header over form value\n\tsent_token := r.Header.Get(HeaderName)\n\tif sent_token == \"\" {\n\t\tsent_token = r.PostFormValue(FormFieldName)\n\t}\n\n\ttoken_cookie, err := r.Cookie(CookieName)\n\treal_token := \"\"\n\tif err == http.ErrNoCookie {\n\t\treal_token = h.RegenerateToken(w, r)\n\t} else {\n\t\treal_token = token_cookie.Value\n\t}\n\t\/\/ If the length of the real token isn't what it should be,\n\t\/\/ it has either been tampered with,\n\t\/\/ or we're migrating onto a new algorithm for generating tokens.\n\t\/\/ In any case of those, we should regenerate it.\n\tif len(real_token) != tokenLength {\n\t\treal_token = h.RegenerateToken(w, r)\n\t}\n\n\t\/\/ clear the context after the request is served\n\tdefer ctxClear(r)\n\tctxSetToken(r, real_token)\n\n\tif sContains(safeMethods, r.Method) {\n\t\t\/\/ short-circuit with a success for safe methods\n\t\th.handleSuccess(w, r)\n\t\treturn\n\t}\n}\n\n\/\/ handleSuccess simply calls the successHandler\n\/\/ everything else, like setting a token in the context\n\/\/ is taken care of by h.ServeHTTP()\nfunc (h *CSRFHandler) handleSuccess(w http.ResponseWriter, r *http.Request) {\n\th.successHandler.ServeHTTP(w, r)\n}\n\n\/\/ Same applies here: h.ServeHTTP() sets the failure reason, the token,\n\/\/ and only then calls handleFailure()\nfunc (h *CSRFHandler) handleFailure(w http.ResponseWriter, r *http.Request) {\n\th.failureHandler.ServeHTTP(w, r)\n}\n\n\/\/ Generates a new token, sets it on the given request and returns it\nfunc (h *CSRFHandler) RegenerateToken(w http.ResponseWriter, r *http.Request) string {\n\ttoken := generateToken()\n\n\tcookie := h.baseCookie\n\tcookie.Name = CookieName\n\tcookie.Value = token\n\n\thttp.SetCookie(w, &cookie)\n\n\tctxSetToken(r, token)\n\n\treturn token\n}\n\n\/\/ Sets the handler to call in case the CSRF check\n\/\/ fails. By default it's defaultFailureHandler.\nfunc (h *CSRFHandler) SetFailureHandler(handler http.Handler) {\n\th.failureHandler = handler\n}\n\n\/\/ Sets the base cookie to use when building a CSRF token cookie\n\/\/ This way you can specify the Domain, Path, HttpOnly, Secure, etc.\nfunc (h *CSRFHandler) SetBaseCookie(cookie http.Cookie) {\n\th.baseCookie = cookie\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nfunc main() {\n\tgoPath := os.Getenv(\"GOPATH\")\n\thugoPath := filepath.Join(goPath, \"src\/github.com\/spf13\/hugo\")\n\n\tif found, err := exists(hugoPath); !found || err != nil {\n\t\tlog.Fatalf(\"Aborting. Can't find Hugo source on %s.\", hugoPath)\n\t}\n\n\t\/\/ NOTE: I assume that 'go get -u' was run before of this and that\n\t\/\/ every package and dependency is up to date.\n\n\t\/\/ Get new tags from remote\n\trun(\"git\", []string{\"fetch\", \"--tags\"}, hugoPath)\n\n\t\/\/ Get the revision for the latest tag\n\tcommit := run(\"git\", []string{\"rev-list\", \"--tags\", \"--max-count=1\"}, hugoPath)\n\n\t\/\/ Get the latest tag\n\ttag := run(\"git\", []string{\"describe\", \"--tags\", commit}, hugoPath)\n\n\t\/\/ Checkout the latest tag\n\trun(\"git\", []string{\"checkout\", tag}, hugoPath)\n\n\t\/\/ Build hugo binary\n\tpluginPath := filepath.Join(goPath, \"src\/github.com\/hacdias\/caddy-hugo\")\n\trun(\"go\", []string{\"build\", \"-o\", \"assets\/hugo\", \"github.com\/spf13\/hugo\"}, pluginPath)\n\n\tupdateVersion(pluginPath, tag)\n}\n\nfunc run(command string, args []string, path string) string {\n\tcmd := exec.Command(command, args...)\n\tcmd.Dir = path\n\tout, err := cmd.Output()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn strings.TrimSpace(string(out))\n}\n\n\/\/ exists returns whether the given file or directory exists or not\nfunc exists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\n\treturn true, err\n}\n\nfunc updateVersion(path string, version string) {\n\tpath = filepath.Join(path, \"installer.go\")\n\n\tinput, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tlines := strings.Split(string(input), \"\\n\")\n\n\tfor i, line := range lines {\n\t\tif strings.Contains(line, \"const version\") {\n\t\t\tlines[i] = \"const version = \\\"\" + version + \"\\\"\"\n\t\t}\n\t}\n\n\toutput := strings.Join(lines, \"\\n\")\n\terr = ioutil.WriteFile(path, []byte(output), 0644)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}\ntravis updatepackage main\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nfunc main() {\n\tif len(os.Getenv(\"TRAVIS\")) > 0 || len(os.Getenv(\"CI\")) > 0 {\n\t\treturn\n\t}\n\n\tgoPath := os.Getenv(\"GOPATH\")\n\thugoPath := filepath.Join(goPath, \"src\/github.com\/spf13\/hugo\")\n\n\tif found, err := exists(hugoPath); !found || err != nil {\n\t\tlog.Fatalf(\"Aborting. Can't find Hugo source on %s.\", hugoPath)\n\t}\n\n\t\/\/ NOTE: I assume that 'go get -u' was run before of this and that\n\t\/\/ every package and dependency is up to date.\n\n\t\/\/ Get new tags from remote\n\trun(\"git\", []string{\"fetch\", \"--tags\"}, hugoPath)\n\n\t\/\/ Get the revision for the latest tag\n\tcommit := run(\"git\", []string{\"rev-list\", \"--tags\", \"--max-count=1\"}, hugoPath)\n\n\t\/\/ Get the latest tag\n\ttag := run(\"git\", []string{\"describe\", \"--tags\", commit}, hugoPath)\n\n\t\/\/ Checkout the latest tag\n\trun(\"git\", []string{\"checkout\", tag}, hugoPath)\n\n\t\/\/ Build hugo binary\n\tpluginPath := filepath.Join(goPath, \"src\/github.com\/hacdias\/caddy-hugo\")\n\trun(\"go\", []string{\"build\", \"-o\", \"assets\/hugo\", \"github.com\/spf13\/hugo\"}, pluginPath)\n\n\tupdateVersion(pluginPath, tag)\n}\n\nfunc run(command string, args []string, path string) string {\n\tcmd := exec.Command(command, args...)\n\tcmd.Dir = path\n\tout, err := cmd.Output()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn strings.TrimSpace(string(out))\n}\n\n\/\/ exists returns whether the given file or directory exists or not\nfunc exists(path string) (bool, error) {\n\t_, err := os.Stat(path)\n\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\n\treturn true, err\n}\n\nfunc updateVersion(path string, version string) {\n\tpath = filepath.Join(path, \"installer.go\")\n\n\tinput, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tlines := strings.Split(string(input), \"\\n\")\n\n\tfor i, line := range lines {\n\t\tif strings.Contains(line, \"const version\") {\n\t\t\tlines[i] = \"const version = \\\"\" + version + \"\\\"\"\n\t\t}\n\t}\n\n\toutput := strings.Join(lines, \"\\n\")\n\terr = ioutil.WriteFile(path, []byte(output), 0644)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright (c) 2017, Daniel Martí \n\/\/ See LICENSE for licensing information\n\npackage pattern\n\nimport (\n\t\"fmt\"\n\t\"regexp\/syntax\"\n\t\"testing\"\n)\n\nvar translateTests = []struct {\n\tpattern string\n\tgreedy bool\n\twant string\n\twantErr bool\n}{\n\t{``, false, ``, false},\n\t{`foo`, false, `foo`, false},\n\t{`.`, false, `\\.`, false},\n\t{`foo*`, false, `foo.*?`, false},\n\t{`foo*`, true, `foo.*`, false},\n\t{`\\*`, false, `\\*`, false},\n\t{`\\`, false, \"\", true},\n\t{`?`, false, `.`, false},\n\t{`\\a`, false, `a`, false},\n\t{`(`, false, `\\(`, false},\n\t{`a|b`, false, `a\\|b`, false},\n\t{`x{3}`, false, `x\\{3\\}`, false},\n\t{`[a]`, false, `[a]`, false},\n\t{`[abc]`, false, `[abc]`, false},\n\t{`[^bc]`, false, `[^bc]`, false},\n\t{`[!bc]`, false, `[^bc]`, false},\n\t{`[[]`, false, `[[]`, false},\n\t{`[]]`, false, `[]]`, false},\n\t{`[^]]`, false, `[^]]`, false},\n\t{`[`, false, \"\", true},\n\t{`[]`, false, \"\", true},\n\t{`[^]`, false, \"\", true},\n\t{`[ab`, false, \"\", true},\n\t{`[a-]`, false, `[a-]`, false},\n\t{`[z-a]`, false, \"\", true},\n\t{`[a-a]`, false, \"[a-a]\", false},\n\t{`[aa]`, false, `[aa]`, false},\n\t{`[0-4A-Z]`, false, `[0-4A-Z]`, false},\n\t{`[-a]`, false, \"[-a]\", false},\n\t{`[^-a]`, false, \"[^-a]\", false},\n\t{`[a-]`, false, \"[a-]\", false},\n\t{`[[:digit:]]`, false, `[[:digit:]]`, false},\n\t{`[[:`, false, \"\", true},\n\t{`[[:digit`, false, \"\", true},\n\t{`[[:wrong:]]`, false, \"\", true},\n\t{`[[=x=]]`, false, \"\", true},\n\t{`[[.x.]]`, false, \"\", true},\n}\n\nfunc TestRegexp(t *testing.T) {\n\tt.Parallel()\n\tfor i, tc := range translateTests {\n\t\tt.Run(fmt.Sprintf(\"%02d\", i), func(t *testing.T) {\n\t\t\tgot, gotErr := Regexp(tc.pattern, tc.greedy)\n\t\t\tif tc.wantErr && gotErr == nil {\n\t\t\t\tt.Fatalf(\"(%q, %v) did not error\",\n\t\t\t\t\ttc.pattern, tc.greedy)\n\t\t\t}\n\t\t\tif !tc.wantErr && gotErr != nil {\n\t\t\t\tt.Fatalf(\"(%q, %v) errored with %q\",\n\t\t\t\t\ttc.pattern, tc.greedy, gotErr)\n\t\t\t}\n\t\t\tif got != tc.want {\n\t\t\t\tt.Fatalf(\"(%q, %v) got %q, wanted %q\",\n\t\t\t\t\ttc.pattern, tc.greedy, got, tc.want)\n\t\t\t}\n\t\t\t_, rxErr := syntax.Parse(got, syntax.Perl)\n\t\t\tif gotErr == nil && rxErr != nil {\n\t\t\t\tt.Fatalf(\"regexp\/syntax.Parse(%q) failed with %q\",\n\t\t\t\t\tgot, rxErr)\n\t\t\t}\n\t\t})\n\t}\n}\n\nvar quoteTests = []struct {\n\tpattern string\n\twant string\n}{\n\t{``, ``},\n\t{`foo`, `foo`},\n\t{`.`, `.`},\n\t{`*`, `\\*`},\n\t{`foo?`, `foo\\?`},\n\t{`\\[`, `\\\\\\[`},\n}\n\nfunc TestQuoteMeta(t *testing.T) {\n\tt.Parallel()\n\tfor _, tc := range quoteTests {\n\t\tgot := QuoteMeta(tc.pattern)\n\t\tif got != tc.want {\n\t\t\tt.Errorf(\"(%q) got %q, wanted %q\",\n\t\t\t\ttc.pattern, got, tc.want)\n\t\t}\n\t}\n}\npattern: cover HasMeta in its tests\/\/ Copyright (c) 2017, Daniel Martí \n\/\/ See LICENSE for licensing information\n\npackage pattern\n\nimport (\n\t\"fmt\"\n\t\"regexp\/syntax\"\n\t\"testing\"\n)\n\nvar translateTests = []struct {\n\tpattern string\n\tgreedy bool\n\twant string\n\twantErr bool\n}{\n\t{``, false, ``, false},\n\t{`foo`, false, `foo`, false},\n\t{`.`, false, `\\.`, false},\n\t{`foo*`, false, `foo.*?`, false},\n\t{`foo*`, true, `foo.*`, false},\n\t{`\\*`, false, `\\*`, false},\n\t{`\\`, false, \"\", true},\n\t{`?`, false, `.`, false},\n\t{`\\a`, false, `a`, false},\n\t{`(`, false, `\\(`, false},\n\t{`a|b`, false, `a\\|b`, false},\n\t{`x{3}`, false, `x\\{3\\}`, false},\n\t{`[a]`, false, `[a]`, false},\n\t{`[abc]`, false, `[abc]`, false},\n\t{`[^bc]`, false, `[^bc]`, false},\n\t{`[!bc]`, false, `[^bc]`, false},\n\t{`[[]`, false, `[[]`, false},\n\t{`[]]`, false, `[]]`, false},\n\t{`[^]]`, false, `[^]]`, false},\n\t{`[`, false, \"\", true},\n\t{`[]`, false, \"\", true},\n\t{`[^]`, false, \"\", true},\n\t{`[ab`, false, \"\", true},\n\t{`[a-]`, false, `[a-]`, false},\n\t{`[z-a]`, false, \"\", true},\n\t{`[a-a]`, false, \"[a-a]\", false},\n\t{`[aa]`, false, `[aa]`, false},\n\t{`[0-4A-Z]`, false, `[0-4A-Z]`, false},\n\t{`[-a]`, false, \"[-a]\", false},\n\t{`[^-a]`, false, \"[^-a]\", false},\n\t{`[a-]`, false, \"[a-]\", false},\n\t{`[[:digit:]]`, false, `[[:digit:]]`, false},\n\t{`[[:`, false, \"\", true},\n\t{`[[:digit`, false, \"\", true},\n\t{`[[:wrong:]]`, false, \"\", true},\n\t{`[[=x=]]`, false, \"\", true},\n\t{`[[.x.]]`, false, \"\", true},\n}\n\nfunc TestRegexp(t *testing.T) {\n\tt.Parallel()\n\tfor i, tc := range translateTests {\n\t\tt.Run(fmt.Sprintf(\"%02d\", i), func(t *testing.T) {\n\t\t\tgot, gotErr := Regexp(tc.pattern, tc.greedy)\n\t\t\tif tc.wantErr && gotErr == nil {\n\t\t\t\tt.Fatalf(\"(%q, %v) did not error\", tc.pattern, tc.greedy)\n\t\t\t}\n\t\t\tif !tc.wantErr && gotErr != nil {\n\t\t\t\tt.Fatalf(\"(%q, %v) errored with %q\", tc.pattern, tc.greedy, gotErr)\n\t\t\t}\n\t\t\tif got != tc.want {\n\t\t\t\tt.Fatalf(\"(%q, %v) got %q, wanted %q\", tc.pattern, tc.greedy, got, tc.want)\n\t\t\t}\n\t\t\t_, rxErr := syntax.Parse(got, syntax.Perl)\n\t\t\tif gotErr == nil && rxErr != nil {\n\t\t\t\tt.Fatalf(\"regexp\/syntax.Parse(%q) failed with %q\", got, rxErr)\n\t\t\t}\n\t\t})\n\t}\n}\n\nvar metaTests = []struct {\n\tpat string\n\twantHas bool\n\twantQuote string\n}{\n\t{``, false, ``},\n\t{`foo`, false, `foo`},\n\t{`.`, false, `.`},\n\t{`*`, true, `\\*`},\n\t{`foo?`, true, `foo\\?`},\n\t{`\\[`, false, `\\\\\\[`},\n}\n\nfunc TestMeta(t *testing.T) {\n\tt.Parallel()\n\tfor _, tc := range metaTests {\n\t\tif got := HasMeta(tc.pat); got != tc.wantHas {\n\t\t\tt.Errorf(\"HasMeta(%q) got %t, wanted %t\",\n\t\t\t\ttc.pat, got, tc.wantHas)\n\t\t}\n\t\tif got := QuoteMeta(tc.pat); got != tc.wantQuote {\n\t\t\tt.Errorf(\"QuoteMeta(%q) got %q, wanted %q\",\n\t\t\t\ttc.pat, got, tc.wantQuote)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright (c) Ilia Kravets, 2015. All rights reserved. PROVIDED \"AS IS\"\n\/\/ WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED. See LICENSE file for details.\n\npackage sim\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"my\/itto\/verify\/packet\"\n\t\"my\/itto\/verify\/packet\/itto\"\n\n\t\"code.google.com\/p\/gopacket\"\n)\n\nvar _ = log.Ldate\n\ntype IttoDbStats struct {\n\tnumOrders int\n\tnumOptions int\n\tnumSessions int\n}\n\ntype IttoDbMessage struct {\n\tPam packet.ApplicationMessage\n}\n\ntype IttoDb interface {\n\tStats() IttoDbStats\n\tMessageOperations(*IttoDbMessage) []IttoOperation\n\tApplyOperation(operation IttoOperation)\n}\n\nfunc NewIttoDb() IttoDb {\n\treturn &db{\n\t\torders: make(map[orderIndex]order),\n\t}\n}\n\ntype db struct {\n\tsessions []session\n\torders map[orderIndex]order\n}\n\ntype orderIndex uint64\n\nfunc NewOrderIndex(d *db, flow gopacket.Flow, refNumD itto.RefNumDelta) orderIndex {\n\ts := d.getSession(flow)\n\treturn orderIndex(uint64(s.index)<<32 + uint64(refNumD.Delta()))\n}\n\ntype order struct {\n\tOId itto.OptionId\n\titto.OrderSide\n}\n\ntype session struct {\n\tflow gopacket.Flow\n\tindex int\n}\n\nfunc (d *db) findOrder(flow gopacket.Flow, refNumD itto.RefNumDelta) (order order, err error) {\n\torder, ok := d.orders[NewOrderIndex(d, flow, refNumD)]\n\tif !ok {\n\t\terr = errors.New(\"order not found\")\n\t}\n\treturn\n}\n\nfunc (d *db) getSession(flow gopacket.Flow) session {\n\tfor _, s := range d.sessions {\n\t\tif s.flow == flow {\n\t\t\treturn s\n\t\t}\n\t}\n\ts := session{\n\t\tflow: flow,\n\t\tindex: len(d.sessions),\n\t}\n\td.sessions = append(d.sessions, s)\n\treturn s\n}\n\nfunc (d *db) Stats() IttoDbStats {\n\ts := IttoDbStats{\n\t\tnumOrders: len(d.orders),\n\t\tnumSessions: len(d.sessions),\n\t}\n\treturn s\n}\n\nfunc (d *db) ApplyOperation(operation IttoOperation) {\n\toperation.getOperation().populate()\n\toid := operation.GetOptionId()\n\tif oid.Invalid() {\n\t\treturn\n\t}\n\tswitch op := operation.(type) {\n\tcase *OperationAdd:\n\t\tnewOrder := order{OId: op.optionId, OrderSide: op.OrderSide}\n\t\tif op.origOrder != nil {\n\t\t\tif op.optionId.Valid() {\n\t\t\t\tlog.Fatalf(\"bad option id for add operation %#v origOrder=%#v\\n\", op, *op.origOrder)\n\t\t\t}\n\t\t\tif op.Side != itto.MarketSideUnknown && op.Side != op.origOrder.Side {\n\t\t\t\tlog.Fatalf(\"bad side for add operation %#v origOrder=%#v\\n\", op, *op.origOrder)\n\t\t\t}\n\t\t\tnewOrder.OId = op.origOrder.OId\n\t\t\tnewOrder.Side = op.origOrder.Side\n\t\t}\n\t\td.orders[op.orderIndex()] = newOrder\n\tcase *OperationRemove:\n\t\tdelete(d.orders, op.origOrderIndex())\n\tcase *OperationUpdate:\n\t\to := *op.origOrder\n\t\to.Size -= op.sizeChange\n\t\tswitch {\n\t\tcase o.Size > 0:\n\t\t\td.orders[op.origOrderIndex()] = o\n\t\tcase o.Size == 0:\n\t\t\tdelete(d.orders, op.origOrderIndex())\n\t\tcase o.Size < 0:\n\t\t\tlog.Fatalf(\"negative size after operation %#v origOrder=%#v\\n\", op, *op.origOrder)\n\t\t}\n\tdefault:\n\t\tlog.Fatal(\"unknown operation \", operation)\n\t}\n}\n\ntype IttoOperation interface {\n\tGetOptionId() itto.OptionId\n\tgetOperation() *Operation\n}\n\ntype Operation struct {\n\tm *IttoDbMessage\n\td *db\n\torigRefNumD itto.RefNumDelta\n\torigOrder *order\n\tsibling IttoOperation\n}\n\nfunc (op *Operation) populate() {\n\tif op.origOrder != nil {\n\t\treturn\n\t}\n\tif op.sibling != nil {\n\t\top.sibling.getOperation().populate()\n\t\top.origOrder = op.sibling.getOperation().origOrder\n\t} else if op.origRefNumD != (itto.RefNumDelta{}) {\n\t\tif ord, err := op.d.findOrder(op.m.Pam.Flow(), op.origRefNumD); err == nil {\n\t\t\top.origOrder = &ord\n\t\t}\n\t}\n}\n\nfunc (op *Operation) origOrderIndex() orderIndex {\n\treturn NewOrderIndex(op.d, op.m.Pam.Flow(), op.origRefNumD)\n}\n\nfunc (o *Operation) getOptionId() (oid itto.OptionId) {\n\to.populate()\n\tif o.origOrder != nil {\n\t\treturn o.origOrder.OId\n\t} else {\n\t\treturn itto.OptionId(0)\n\t}\n}\n\ntype OperationAdd struct {\n\tOperation\n\toptionId itto.OptionId\n\titto.OrderSide\n}\n\nfunc (o *OperationAdd) getOperation() *Operation {\n\treturn &o.Operation\n}\nfunc (o *OperationAdd) GetOptionId() itto.OptionId {\n\tif o.optionId.Valid() {\n\t\treturn o.optionId\n\t} else {\n\t\treturn o.Operation.getOptionId()\n\t}\n}\nfunc (op *OperationAdd) orderIndex() orderIndex {\n\treturn NewOrderIndex(op.d, op.m.Pam.Flow(), op.RefNumD)\n}\n\ntype OperationRemove struct {\n\tOperation\n}\n\nfunc (o *OperationRemove) getOperation() *Operation {\n\treturn &o.Operation\n}\nfunc (o *OperationRemove) GetOptionId() itto.OptionId {\n\treturn o.Operation.getOptionId()\n}\n\ntype OperationUpdate struct {\n\tOperation\n\tsizeChange int\n}\n\nfunc (o *OperationUpdate) getOperation() *Operation {\n\treturn &o.Operation\n}\nfunc (o *OperationUpdate) GetOptionId() itto.OptionId {\n\treturn o.Operation.getOptionId()\n}\n\nfunc (d *db) MessageOperations(m *IttoDbMessage) []IttoOperation {\n\tvar ops []IttoOperation\n\taddOperation := func(origRefNumD itto.RefNumDelta, operation IttoOperation) {\n\t\topop := operation.getOperation()\n\t\topop.m = m\n\t\topop.d = d\n\t\topop.origRefNumD = origRefNumD\n\t\tops = append(ops, operation)\n\t}\n\taddOperationReplace := func(origRefNumD itto.RefNumDelta, orderSide itto.OrderSide) {\n\t\topRemove := &OperationRemove{}\n\t\topAdd := &OperationAdd{\n\t\t\t\/\/ unknown: optionId; maybe unknown: OrderSide.Side\n\t\t\tOrderSide: orderSide,\n\t\t\tOperation: Operation{sibling: opRemove},\n\t\t}\n\t\taddOperation(origRefNumD, opRemove)\n\t\taddOperation(itto.RefNumDelta{}, opAdd)\n\t}\n\tswitch im := m.Pam.Layer().(type) {\n\tcase *itto.IttoMessageAddOrder:\n\t\taddOperation(itto.RefNumDelta{}, &OperationAdd{optionId: im.OId, OrderSide: im.OrderSide})\n\tcase *itto.IttoMessageAddQuote:\n\t\taddOperation(itto.RefNumDelta{}, &OperationAdd{optionId: im.OId, OrderSide: im.Bid})\n\t\taddOperation(itto.RefNumDelta{}, &OperationAdd{optionId: im.OId, OrderSide: im.Ask})\n\tcase *itto.IttoMessageSingleSideExecuted:\n\t\taddOperation(im.OrigRefNumD, &OperationUpdate{sizeChange: im.Size})\n\tcase *itto.IttoMessageSingleSideExecutedWithPrice:\n\t\taddOperation(im.OrigRefNumD, &OperationUpdate{sizeChange: im.Size})\n\tcase *itto.IttoMessageOrderCancel:\n\t\taddOperation(im.OrigRefNumD, &OperationUpdate{sizeChange: im.Size})\n\tcase *itto.IttoMessageSingleSideReplace:\n\t\taddOperationReplace(im.OrigRefNumD, im.OrderSide)\n\tcase *itto.IttoMessageSingleSideDelete:\n\t\taddOperation(im.OrigRefNumD, &OperationRemove{})\n\tcase *itto.IttoMessageSingleSideUpdate:\n\t\taddOperationReplace(im.RefNumD, im.OrderSide)\n\tcase *itto.IttoMessageQuoteReplace:\n\t\taddOperationReplace(im.Bid.OrigRefNumD, im.Bid.OrderSide)\n\t\taddOperationReplace(im.Ask.OrigRefNumD, im.Ask.OrderSide)\n\tcase *itto.IttoMessageQuoteDelete:\n\t\taddOperation(im.BidOrigRefNumD, &OperationRemove{})\n\t\taddOperation(im.AskOrigRefNumD, &OperationRemove{})\n\tcase *itto.IttoMessageBlockSingleSideDelete:\n\t\tfor _, r := range im.RefNumDs {\n\t\t\taddOperation(r, &OperationRemove{})\n\t\t}\n\t}\n\treturn ops\n}\nadd sim.IttoOperation methods\/\/ Copyright (c) Ilia Kravets, 2015. All rights reserved. PROVIDED \"AS IS\"\n\/\/ WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED. See LICENSE file for details.\n\npackage sim\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"my\/itto\/verify\/packet\"\n\t\"my\/itto\/verify\/packet\/itto\"\n\n\t\"code.google.com\/p\/gopacket\"\n)\n\nvar _ = log.Ldate\n\ntype IttoDbStats struct {\n\tnumOrders int\n\tnumOptions int\n\tnumSessions int\n}\n\ntype IttoDbMessage struct {\n\tPam packet.ApplicationMessage\n}\n\ntype IttoDb interface {\n\tStats() IttoDbStats\n\tMessageOperations(*IttoDbMessage) []IttoOperation\n\tApplyOperation(operation IttoOperation)\n}\n\nfunc NewIttoDb() IttoDb {\n\treturn &db{\n\t\torders: make(map[orderIndex]order),\n\t}\n}\n\ntype db struct {\n\tsessions []session\n\torders map[orderIndex]order\n}\n\ntype orderIndex uint64\n\nfunc NewOrderIndex(d *db, flow gopacket.Flow, refNumD itto.RefNumDelta) orderIndex {\n\ts := d.getSession(flow)\n\treturn orderIndex(uint64(s.index)<<32 + uint64(refNumD.Delta()))\n}\n\ntype order struct {\n\tOId itto.OptionId\n\titto.OrderSide\n}\n\ntype session struct {\n\tflow gopacket.Flow\n\tindex int\n}\n\nfunc (d *db) findOrder(flow gopacket.Flow, refNumD itto.RefNumDelta) (order order, err error) {\n\torder, ok := d.orders[NewOrderIndex(d, flow, refNumD)]\n\tif !ok {\n\t\terr = errors.New(\"order not found\")\n\t}\n\treturn\n}\n\nfunc (d *db) getSession(flow gopacket.Flow) session {\n\tfor _, s := range d.sessions {\n\t\tif s.flow == flow {\n\t\t\treturn s\n\t\t}\n\t}\n\ts := session{\n\t\tflow: flow,\n\t\tindex: len(d.sessions),\n\t}\n\td.sessions = append(d.sessions, s)\n\treturn s\n}\n\nfunc (d *db) Stats() IttoDbStats {\n\ts := IttoDbStats{\n\t\tnumOrders: len(d.orders),\n\t\tnumSessions: len(d.sessions),\n\t}\n\treturn s\n}\n\nfunc (d *db) ApplyOperation(operation IttoOperation) {\n\toperation.getOperation().populate()\n\toid := operation.GetOptionId()\n\tif oid.Invalid() {\n\t\treturn\n\t}\n\tswitch op := operation.(type) {\n\tcase *OperationAdd:\n\t\tnewOrder := order{OId: op.optionId, OrderSide: op.OrderSide}\n\t\tif op.origOrder != nil {\n\t\t\tif op.optionId.Valid() {\n\t\t\t\tlog.Fatalf(\"bad option id for add operation %#v origOrder=%#v\\n\", op, *op.origOrder)\n\t\t\t}\n\t\t\tif op.Side != itto.MarketSideUnknown && op.Side != op.origOrder.Side {\n\t\t\t\tlog.Fatalf(\"bad side for add operation %#v origOrder=%#v\\n\", op, *op.origOrder)\n\t\t\t}\n\t\t\tnewOrder.OId = op.origOrder.OId\n\t\t\tnewOrder.Side = op.origOrder.Side\n\t\t}\n\t\td.orders[op.orderIndex()] = newOrder\n\tcase *OperationRemove:\n\t\tdelete(d.orders, op.origOrderIndex())\n\tcase *OperationUpdate:\n\t\to := *op.origOrder\n\t\to.Size -= op.sizeChange\n\t\tswitch {\n\t\tcase o.Size > 0:\n\t\t\td.orders[op.origOrderIndex()] = o\n\t\tcase o.Size == 0:\n\t\t\tdelete(d.orders, op.origOrderIndex())\n\t\tcase o.Size < 0:\n\t\t\tlog.Fatalf(\"negative size after operation %#v origOrder=%#v\\n\", op, *op.origOrder)\n\t\t}\n\tdefault:\n\t\tlog.Fatal(\"unknown operation \", operation)\n\t}\n}\n\ntype IttoOperation interface {\n\tGetOptionId() itto.OptionId\n\tGetSide() itto.MarketSide\n\tGetSizeDelta() int\n\tGetPrice() int\n\tgetOperation() *Operation\n}\n\ntype Operation struct {\n\tm *IttoDbMessage\n\td *db\n\torigRefNumD itto.RefNumDelta\n\torigOrder *order\n\tsibling IttoOperation\n}\n\nfunc (op *Operation) populate() {\n\tif op.origOrder != nil {\n\t\treturn\n\t}\n\tif op.sibling != nil {\n\t\top.sibling.getOperation().populate()\n\t\top.origOrder = op.sibling.getOperation().origOrder\n\t} else if op.origRefNumD != (itto.RefNumDelta{}) {\n\t\tif ord, err := op.d.findOrder(op.m.Pam.Flow(), op.origRefNumD); err == nil {\n\t\t\top.origOrder = &ord\n\t\t}\n\t}\n}\nfunc (op *Operation) origOrderIndex() orderIndex {\n\treturn NewOrderIndex(op.d, op.m.Pam.Flow(), op.origRefNumD)\n}\nfunc (o *Operation) getOptionId() (oid itto.OptionId) {\n\to.populate()\n\tif o.origOrder != nil {\n\t\treturn o.origOrder.OId\n\t} else {\n\t\treturn itto.OptionId(0)\n\t}\n}\nfunc (o *Operation) getSide() (side itto.MarketSide) {\n\to.populate()\n\tif o.origOrder != nil {\n\t\tside = o.origOrder.Side\n\t}\n\treturn\n}\n\ntype OperationAdd struct {\n\tOperation\n\toptionId itto.OptionId\n\titto.OrderSide\n}\n\nfunc (o *OperationAdd) getOperation() *Operation {\n\treturn &o.Operation\n}\nfunc (o *OperationAdd) GetOptionId() itto.OptionId {\n\tif o.optionId.Valid() {\n\t\treturn o.optionId\n\t} else {\n\t\treturn o.Operation.getOptionId()\n\t}\n}\nfunc (o *OperationAdd) GetSide() (side itto.MarketSide) {\n\tif o.Side != itto.MarketSideUnknown {\n\t\treturn o.Side\n\t} else {\n\t\treturn o.Operation.getSide()\n\t}\n}\nfunc (o *OperationAdd) GetPrice() int {\n\treturn o.Price\n}\nfunc (o *OperationAdd) GetSizeDelta() int {\n\treturn o.Size\n}\nfunc (op *OperationAdd) orderIndex() orderIndex {\n\treturn NewOrderIndex(op.d, op.m.Pam.Flow(), op.RefNumD)\n}\n\ntype OperationRemove struct {\n\tOperation\n}\n\nfunc (o *OperationRemove) getOperation() *Operation {\n\treturn &o.Operation\n}\nfunc (o *OperationRemove) GetOptionId() itto.OptionId {\n\treturn o.Operation.getOptionId()\n}\nfunc (o *OperationRemove) GetSide() (side itto.MarketSide) {\n\treturn o.Operation.getSide()\n}\nfunc (o *OperationRemove) GetSizeDelta() int {\n\to.Operation.populate()\n\tif o.origOrder == nil {\n\t\tlog.Fatal(\"no origOrder\")\n\t}\n\treturn -o.origOrder.Size\n}\nfunc (o *OperationRemove) GetPrice() int {\n\to.Operation.populate()\n\tif o.origOrder == nil {\n\t\tlog.Fatal(\"no origOrder\")\n\t}\n\treturn o.origOrder.Price\n}\n\ntype OperationUpdate struct {\n\tOperation\n\tsizeChange int\n}\n\nfunc (o *OperationUpdate) getOperation() *Operation {\n\treturn &o.Operation\n}\nfunc (o *OperationUpdate) GetOptionId() itto.OptionId {\n\treturn o.Operation.getOptionId()\n}\nfunc (o *OperationUpdate) GetSide() (side itto.MarketSide) {\n\treturn o.Operation.getSide()\n}\nfunc (o *OperationUpdate) GetSizeDelta() int {\n\treturn -o.sizeChange\n}\nfunc (o *OperationUpdate) GetPrice() int {\n\to.Operation.populate()\n\tif o.origOrder == nil {\n\t\tlog.Fatal(\"no origOrder\")\n\t}\n\treturn o.origOrder.Price\n}\n\nfunc (d *db) MessageOperations(m *IttoDbMessage) []IttoOperation {\n\tvar ops []IttoOperation\n\taddOperation := func(origRefNumD itto.RefNumDelta, operation IttoOperation) {\n\t\topop := operation.getOperation()\n\t\topop.m = m\n\t\topop.d = d\n\t\topop.origRefNumD = origRefNumD\n\t\tops = append(ops, operation)\n\t}\n\taddOperationReplace := func(origRefNumD itto.RefNumDelta, orderSide itto.OrderSide) {\n\t\topRemove := &OperationRemove{}\n\t\topAdd := &OperationAdd{\n\t\t\t\/\/ unknown: optionId; maybe unknown: OrderSide.Side\n\t\t\tOrderSide: orderSide,\n\t\t\tOperation: Operation{sibling: opRemove},\n\t\t}\n\t\taddOperation(origRefNumD, opRemove)\n\t\taddOperation(itto.RefNumDelta{}, opAdd)\n\t}\n\tswitch im := m.Pam.Layer().(type) {\n\tcase *itto.IttoMessageAddOrder:\n\t\taddOperation(itto.RefNumDelta{}, &OperationAdd{optionId: im.OId, OrderSide: im.OrderSide})\n\tcase *itto.IttoMessageAddQuote:\n\t\taddOperation(itto.RefNumDelta{}, &OperationAdd{optionId: im.OId, OrderSide: im.Bid})\n\t\taddOperation(itto.RefNumDelta{}, &OperationAdd{optionId: im.OId, OrderSide: im.Ask})\n\tcase *itto.IttoMessageSingleSideExecuted:\n\t\taddOperation(im.OrigRefNumD, &OperationUpdate{sizeChange: im.Size})\n\tcase *itto.IttoMessageSingleSideExecutedWithPrice:\n\t\taddOperation(im.OrigRefNumD, &OperationUpdate{sizeChange: im.Size})\n\tcase *itto.IttoMessageOrderCancel:\n\t\taddOperation(im.OrigRefNumD, &OperationUpdate{sizeChange: im.Size})\n\tcase *itto.IttoMessageSingleSideReplace:\n\t\taddOperationReplace(im.OrigRefNumD, im.OrderSide)\n\tcase *itto.IttoMessageSingleSideDelete:\n\t\taddOperation(im.OrigRefNumD, &OperationRemove{})\n\tcase *itto.IttoMessageSingleSideUpdate:\n\t\taddOperationReplace(im.RefNumD, im.OrderSide)\n\tcase *itto.IttoMessageQuoteReplace:\n\t\taddOperationReplace(im.Bid.OrigRefNumD, im.Bid.OrderSide)\n\t\taddOperationReplace(im.Ask.OrigRefNumD, im.Ask.OrderSide)\n\tcase *itto.IttoMessageQuoteDelete:\n\t\taddOperation(im.BidOrigRefNumD, &OperationRemove{})\n\t\taddOperation(im.AskOrigRefNumD, &OperationRemove{})\n\tcase *itto.IttoMessageBlockSingleSideDelete:\n\t\tfor _, r := range im.RefNumDs {\n\t\t\taddOperation(r, &OperationRemove{})\n\t\t}\n\t}\n\treturn ops\n}\n<|endoftext|>"} {"text":"package cors\n\nconst (\n\tAccessControlAllowOrigin = \"Access-Control-Allow-Origin\"\n\tAccessControlAllowMethods = \"Access-Control-Allow-Methods\"\n\tAccessControlRequestMethod = \"Access-Control-Request-Method\"\n\tOrigin = \"Origin\"\n)\nFix testspackage cors\n\nconst (\n\tAccessControlAllowOrigin string = \"Access-Control-Allow-Origin\"\n\tAccessControlAllowMethods = \"Access-Control-Allow-Methods\"\n\tAccessControlRequestMethod = \"Access-Control-Request-Method\"\n\tOrigin = \"Origin\"\n)\n<|endoftext|>"} {"text":"package dexcom\n\nimport (\n\t\"log\"\n\t\"time\"\n)\n\nconst (\n\tuserTimeLayout = \"2006-01-02 15:04:05\"\n)\n\nfunc (cgm *CGM) ReadHistory(pageType PageType, since time.Time) []Record {\n\tfirst, last := cgm.ReadPageRange(pageType)\n\tif cgm.Error() != nil {\n\t\treturn nil\n\t}\n\tvar results []Record\n\tproc := func(r Record) (bool, error) {\n\t\tt := r.Time()\n\t\tif t.Before(since) {\n\t\t\tlog.Printf(\"stopping %v scan at %s\", pageType, t.Format(userTimeLayout))\n\t\t\treturn true, nil\n\t\t}\n\t\tresults = append(results, r)\n\t\treturn false, nil\n\t}\n\tcgm.IterRecords(pageType, first, last, proc)\n\treturn results\n}\n\nfunc (cgm *CGM) ReadCount(pageType PageType, count int) []Record {\n\tfirst, last := cgm.ReadPageRange(pageType)\n\tif cgm.Error() != nil {\n\t\treturn nil\n\t}\n\tvar results []Record\n\tproc := func(r Record) (bool, error) {\n\t\tresults = append(results, r)\n\t\treturn len(results) == count, nil\n\t}\n\tcgm.IterRecords(pageType, first, last, proc)\n\treturn results\n}\n\n\/\/ Merge slices of records that are already in reverse chronological order\n\/\/ into a single ordered slice.\nfunc MergeHistory(slices ...[]Record) []Record {\n\tn := len(slices)\n\tif n == 0 {\n\t\treturn nil\n\t}\n\tif n == 1 {\n\t\treturn slices[0]\n\t}\n\tlength := make([]int, n)\n\ttotal := 0\n\tfor i, v := range slices {\n\t\tlength[i] = len(v)\n\t\ttotal += len(v)\n\t}\n\tresults := make([]Record, total)\n\tindex := make([]int, n)\n\tfor next, _ := range results {\n\t\t\/\/ Find slice with latest current value.\n\t\twhich := -1\n\t\tmax := time.Time{}\n\t\tfor i, v := range slices {\n\t\t\tif index[i] < len(v) {\n\t\t\t\tt := v[index[i]].Time()\n\t\t\t\tif t.After(max) {\n\t\t\t\t\twhich = i\n\t\t\t\t\tmax = t\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tresults[next] = slices[which][index[which]]\n\t\tindex[which]++\n\t}\n\treturn results\n}\n\nconst (\n\t\/\/ Time window within which EGV and sensor readings will be merged.\n\tglucoseReadingWindow = 10 * time.Second\n)\n\nfunc (cgm *CGM) GlucoseReadings(since time.Time) []Record {\n\tsensor := cgm.ReadHistory(SENSOR_DATA, since)\n\tif cgm.Error() != nil {\n\t\treturn nil\n\t}\n\tnumSensor := len(sensor)\n\tegv := cgm.ReadHistory(EGV_DATA, since)\n\tif cgm.Error() != nil {\n\t\treturn nil\n\t}\n\tnumEGV := len(egv)\n\tvar readings []Record\n\ti, j := 0, 0\n\tfor {\n\t\tvar r Record\n\t\tif i < numSensor && j < numEGV {\n\t\t\tsensorTime := sensor[i].Time()\n\t\t\tegvTime := egv[j].Time()\n\t\t\tdelta := egvTime.Sub(sensorTime)\n\t\t\tif 0 <= delta && delta < glucoseReadingWindow {\n\t\t\t\t\/\/ Merge using sensor[i]'s slightly earlier time.\n\t\t\t\tr = sensor[i]\n\t\t\t\tr.EGV = egv[j].EGV\n\t\t\t\ti++\n\t\t\t\tj++\n\t\t\t} else if 0 <= -delta && -delta < glucoseReadingWindow {\n\t\t\t\t\/\/ Merge using egv[j]'s slightly earlier time.\n\t\t\t\tr = egv[j]\n\t\t\t\tr.Sensor = sensor[i].Sensor\n\t\t\t\ti++\n\t\t\t\tj++\n\t\t\t} else if sensorTime.After(egvTime) {\n\t\t\t\tr = sensor[i]\n\t\t\t\ti++\n\t\t\t} else {\n\t\t\t\tr = egv[j]\n\t\t\t\tj++\n\t\t\t}\n\t\t} else if i < numSensor {\n\t\t\tr = sensor[i]\n\t\t\ti++\n\t\t} else if j < numEGV {\n\t\t\tr = egv[j]\n\t\t\tj++\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t\treadings = append(readings, r)\n\t}\n\treturn readings\n}\nSimplify GlucoseReadings functionpackage dexcom\n\nimport (\n\t\"log\"\n\t\"time\"\n)\n\nconst (\n\tuserTimeLayout = \"2006-01-02 15:04:05\"\n)\n\nfunc (cgm *CGM) ReadHistory(pageType PageType, since time.Time) []Record {\n\tfirst, last := cgm.ReadPageRange(pageType)\n\tif cgm.Error() != nil {\n\t\treturn nil\n\t}\n\tvar results []Record\n\tproc := func(r Record) (bool, error) {\n\t\tt := r.Time()\n\t\tif t.Before(since) {\n\t\t\tlog.Printf(\"stopping %v scan at %s\", pageType, t.Format(userTimeLayout))\n\t\t\treturn true, nil\n\t\t}\n\t\tresults = append(results, r)\n\t\treturn false, nil\n\t}\n\tcgm.IterRecords(pageType, first, last, proc)\n\treturn results\n}\n\nfunc (cgm *CGM) ReadCount(pageType PageType, count int) []Record {\n\tfirst, last := cgm.ReadPageRange(pageType)\n\tif cgm.Error() != nil {\n\t\treturn nil\n\t}\n\tvar results []Record\n\tproc := func(r Record) (bool, error) {\n\t\tresults = append(results, r)\n\t\treturn len(results) == count, nil\n\t}\n\tcgm.IterRecords(pageType, first, last, proc)\n\treturn results\n}\n\n\/\/ Merge slices of records that are already in reverse chronological order\n\/\/ into a single ordered slice.\nfunc MergeHistory(slices ...[]Record) []Record {\n\tn := len(slices)\n\tif n == 0 {\n\t\treturn nil\n\t}\n\tif n == 1 {\n\t\treturn slices[0]\n\t}\n\tlength := make([]int, n)\n\ttotal := 0\n\tfor i, v := range slices {\n\t\tlength[i] = len(v)\n\t\ttotal += len(v)\n\t}\n\tresults := make([]Record, total)\n\tindex := make([]int, n)\n\tfor next, _ := range results {\n\t\t\/\/ Find slice with latest current value.\n\t\twhich := -1\n\t\tmax := time.Time{}\n\t\tfor i, v := range slices {\n\t\t\tif index[i] < len(v) {\n\t\t\t\tt := v[index[i]].Time()\n\t\t\t\tif t.After(max) {\n\t\t\t\t\twhich = i\n\t\t\t\t\tmax = t\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tresults[next] = slices[which][index[which]]\n\t\tindex[which]++\n\t}\n\treturn results\n}\n\nconst (\n\t\/\/ Time window within which EGV and sensor readings will be merged.\n\tglucoseReadingWindow = 10 * time.Second\n)\n\nfunc (cgm *CGM) GlucoseReadings(since time.Time) []Record {\n\tsensor := cgm.ReadHistory(SENSOR_DATA, since)\n\tif cgm.Error() != nil {\n\t\treturn nil\n\t}\n\tegv := cgm.ReadHistory(EGV_DATA, since)\n\tif cgm.Error() != nil {\n\t\treturn nil\n\t}\n\treadings := make([]Record, 0, len(sensor))\n\ti, j := 0, 0\n\tfor {\n\t\tvar r Record\n\t\tif i < len(sensor) && j < len(egv) {\n\t\t\tr = chooseRecord(sensor, egv, &i, &j)\n\t\t} else if i < len(sensor) {\n\t\t\tr = sensor[i]\n\t\t\ti++\n\t\t} else if j < len(egv) {\n\t\t\tr = egv[j]\n\t\t\tj++\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t\treadings = append(readings, r)\n\t}\n\treturn readings\n}\n\nfunc chooseRecord(sensor, egv []Record, ip, jp *int) Record {\n\ti := *ip\n\tj := *jp\n\tsensorTime := sensor[i].Time()\n\tegvTime := egv[j].Time()\n\tdelta := egvTime.Sub(sensorTime)\n\tvar r Record\n\tif 0 <= delta && delta < glucoseReadingWindow {\n\t\t\/\/ Merge using sensor[i]'s slightly earlier time.\n\t\tr = sensor[i]\n\t\tr.EGV = egv[j].EGV\n\t\ti++\n\t\tj++\n\t} else if 0 <= -delta && -delta < glucoseReadingWindow {\n\t\t\/\/ Merge using egv[j]'s slightly earlier time.\n\t\tr = egv[j]\n\t\tr.Sensor = sensor[i].Sensor\n\t\ti++\n\t\tj++\n\t} else if sensorTime.After(egvTime) {\n\t\tr = sensor[i]\n\t\ti++\n\t} else {\n\t\tr = egv[j]\n\t\tj++\n\t}\n\t*ip = i\n\t*jp = j\n\treturn r\n}\n<|endoftext|>"} {"text":"package channeldb\n\nimport (\n\t\"encoding\/binary\"\n\n\t\"io\"\n\n\t\"bytes\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/go-errors\/errors\"\n\t\"github.com\/lightningnetwork\/lnd\/lnwire\"\n)\n\nvar (\n\t\/\/ waitingProofsBucketKey byte string name of the waiting proofs store.\n\twaitingProofsBucketKey = []byte(\"waitingproofs\")\n\n\t\/\/ ErrWaitingProofNotFound is returned if waiting proofs haven't been\n\t\/\/ found by db.\n\tErrWaitingProofNotFound = errors.New(\"waiting proofs haven't been \" +\n\t\t\"found\")\n\n\t\/\/ ErrWaitingProofAlreadyExist is returned if waiting proofs haven't been\n\t\/\/ found by db.\n\tErrWaitingProofAlreadyExist = errors.New(\"waiting proof with such \" +\n\t\t\"key already exist\")\n)\n\n\/\/ WaitingProofStore is the bold db map-like storage for half announcement\n\/\/ signatures. The one responsibility of this storage is to be able to\n\/\/ retrieve waiting proofs after client restart.\ntype WaitingProofStore struct {\n\t\/\/ cache is used in order to reduce the number of redundant get\n\t\/\/ calls, when object isn't stored in it.\n\tcache map[WaitingProofKey]struct{}\n\tdb *DB\n}\n\n\/\/ NewWaitingProofStore creates new instance of proofs storage.\nfunc NewWaitingProofStore(db *DB) (*WaitingProofStore, error) {\n\ts := &WaitingProofStore{\n\t\tdb: db,\n\t\tcache: make(map[WaitingProofKey]struct{}),\n\t}\n\n\tif err := s.ForAll(func(proof *WaitingProof) error {\n\t\ts.cache[proof.Key()] = struct{}{}\n\t\treturn nil\n\t}); err != nil && err != ErrWaitingProofNotFound {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}\n\n\/\/ Add adds new waiting proof in the storage.\nfunc (s *WaitingProofStore) Add(proof *WaitingProof) error {\n\tif _, ok := s.cache[proof.Key()]; ok {\n\t\treturn ErrWaitingProofAlreadyExist\n\t}\n\n\treturn s.db.Batch(func(tx *bolt.Tx) error {\n\t\tvar err error\n\t\tvar b bytes.Buffer\n\n\t\t\/\/ Get or create the bucket.\n\t\tbucket, err := tx.CreateBucketIfNotExists(waitingProofsBucketKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Encode the objects and place it in the bucket.\n\t\tif err := proof.Encode(&b); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tkey := proof.Key()\n\t\tif err := bucket.Put(key[:], b.Bytes()); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.cache[proof.Key()] = struct{}{}\n\t\treturn nil\n\t})\n}\n\n\/\/ Remove removes the proof from storage by its key.\nfunc (s *WaitingProofStore) Remove(key WaitingProofKey) error {\n\tif _, ok := s.cache[key]; !ok {\n\t\treturn ErrWaitingProofNotFound\n\t}\n\n\treturn s.db.Batch(func(tx *bolt.Tx) error {\n\t\t\/\/ Get or create the top bucket.\n\t\tbucket := tx.Bucket(waitingProofsBucketKey)\n\t\tif bucket == nil {\n\t\t\treturn ErrWaitingProofNotFound\n\t\t}\n\n\t\tif err := bucket.Delete(key[:]); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdelete(s.cache, key)\n\t\treturn nil\n\t})\n}\n\n\/\/ ForAll iterates thought all waiting proofs and passing the waiting proof\n\/\/ in the given callback.\nfunc (s *WaitingProofStore) ForAll(cb func(*WaitingProof) error) error {\n\treturn s.db.View(func(tx *bolt.Tx) error {\n\t\tbucket := tx.Bucket(waitingProofsBucketKey)\n\t\tif bucket == nil {\n\t\t\treturn ErrWaitingProofNotFound\n\t\t}\n\n\t\t\/\/ Iterate over objects buckets.\n\t\treturn bucket.ForEach(func(k, v []byte) error {\n\t\t\t\/\/ Skip buckets fields.\n\t\t\tif v == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tr := bytes.NewReader(v)\n\t\t\tproof := &WaitingProof{}\n\t\t\tif err := proof.Decode(r); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn cb(proof)\n\t\t})\n\t})\n}\n\n\/\/ Get returns the object which corresponds to the given index.\nfunc (s *WaitingProofStore) Get(key WaitingProofKey) (*WaitingProof, error) {\n\tproof := &WaitingProof{}\n\n\tif _, ok := s.cache[key]; !ok {\n\t\treturn nil, ErrWaitingProofNotFound\n\t}\n\n\terr := s.db.View(func(tx *bolt.Tx) error {\n\t\tbucket := tx.Bucket(waitingProofsBucketKey)\n\t\tif bucket == nil {\n\t\t\treturn ErrWaitingProofNotFound\n\t\t}\n\n\t\t\/\/ Iterate over objects buckets.\n\t\tv := bucket.Get(key[:])\n\t\tif v == nil {\n\t\t\treturn ErrWaitingProofNotFound\n\t\t}\n\n\t\tr := bytes.NewReader(v)\n\t\treturn proof.Decode(r)\n\t})\n\n\treturn proof, err\n}\n\n\/\/ WaitingProofKey is the proof key which uniquely identifies the waiting\n\/\/ proof object. The goal of this key is distinguish the local and remote\n\/\/ proof for the same channel id.\ntype WaitingProofKey [9]byte\n\n\/\/ WaitingProof is the storable object, which encapsulate the half proof and\n\/\/ the information about from which side this proof came. This structure is\n\/\/ needed to make channel proof exchange persistent, so that after client\n\/\/ restart we may receive remote\/local half proof and process it.\ntype WaitingProof struct {\n\t*lnwire.AnnounceSignatures\n\tisRemote bool\n}\n\n\/\/ NewWaitingProof constructs a new waiting prof instance.\nfunc NewWaitingProof(isRemote bool, proof *lnwire.AnnounceSignatures) *WaitingProof {\n\treturn &WaitingProof{\n\t\tAnnounceSignatures: proof,\n\t\tisRemote: isRemote,\n\t}\n}\n\n\/\/ OppositeKey returns the key which uniquely identifies opposite waiting proof.\nfunc (p *WaitingProof) OppositeKey() WaitingProofKey {\n\tvar key [9]byte\n\tbinary.BigEndian.PutUint64(key[:8], p.ShortChannelID.ToUint64())\n\n\tif !p.isRemote {\n\t\tkey[8] = 1\n\t}\n\treturn key\n}\n\n\/\/ Key returns the key which uniquely identifies waiting proof.\nfunc (p *WaitingProof) Key() WaitingProofKey {\n\tvar key [9]byte\n\tbinary.BigEndian.PutUint64(key[:8], p.ShortChannelID.ToUint64())\n\n\tif p.isRemote {\n\t\tkey[8] = 1\n\t}\n\treturn key\n}\n\n\/\/ Encode writes the internal representation of waiting proof in byte stream.\nfunc (p *WaitingProof) Encode(w io.Writer) error {\n\tvar b [1]byte\n\tif p.isRemote {\n\t\tb[0] = 1\n\t}\n\n\tif _, err := w.Write(b[:]); err != nil {\n\t\treturn err\n\t}\n\n\tif err := p.AnnounceSignatures.Encode(w, 0); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Decode reads the data from the byte stream and initialize the\n\/\/ waiting proof object with it.\nfunc (p *WaitingProof) Decode(r io.Reader) error {\n\tvar b [1]byte\n\tif _, err := r.Read(b[:]); err != nil {\n\t\treturn err\n\t}\n\n\tif b[0] == 1 {\n\t\t(*p).isRemote = true\n\t}\n\n\tmsg := &lnwire.AnnounceSignatures{}\n\tif err := msg.Decode(r, 0); err != nil {\n\t\treturn err\n\t}\n\n\t(*p).AnnounceSignatures = msg\n\treturn nil\n}\nchanneldb: use binary.Read\/Write in waitingproof.gopackage channeldb\n\nimport (\n\t\"encoding\/binary\"\n\n\t\"io\"\n\n\t\"bytes\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/go-errors\/errors\"\n\t\"github.com\/lightningnetwork\/lnd\/lnwire\"\n)\n\nvar (\n\t\/\/ waitingProofsBucketKey byte string name of the waiting proofs store.\n\twaitingProofsBucketKey = []byte(\"waitingproofs\")\n\n\t\/\/ ErrWaitingProofNotFound is returned if waiting proofs haven't been\n\t\/\/ found by db.\n\tErrWaitingProofNotFound = errors.New(\"waiting proofs haven't been \" +\n\t\t\"found\")\n\n\t\/\/ ErrWaitingProofAlreadyExist is returned if waiting proofs haven't been\n\t\/\/ found by db.\n\tErrWaitingProofAlreadyExist = errors.New(\"waiting proof with such \" +\n\t\t\"key already exist\")\n)\n\n\/\/ WaitingProofStore is the bold db map-like storage for half announcement\n\/\/ signatures. The one responsibility of this storage is to be able to\n\/\/ retrieve waiting proofs after client restart.\ntype WaitingProofStore struct {\n\t\/\/ cache is used in order to reduce the number of redundant get\n\t\/\/ calls, when object isn't stored in it.\n\tcache map[WaitingProofKey]struct{}\n\tdb *DB\n}\n\n\/\/ NewWaitingProofStore creates new instance of proofs storage.\nfunc NewWaitingProofStore(db *DB) (*WaitingProofStore, error) {\n\ts := &WaitingProofStore{\n\t\tdb: db,\n\t\tcache: make(map[WaitingProofKey]struct{}),\n\t}\n\n\tif err := s.ForAll(func(proof *WaitingProof) error {\n\t\ts.cache[proof.Key()] = struct{}{}\n\t\treturn nil\n\t}); err != nil && err != ErrWaitingProofNotFound {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}\n\n\/\/ Add adds new waiting proof in the storage.\nfunc (s *WaitingProofStore) Add(proof *WaitingProof) error {\n\tif _, ok := s.cache[proof.Key()]; ok {\n\t\treturn ErrWaitingProofAlreadyExist\n\t}\n\n\treturn s.db.Batch(func(tx *bolt.Tx) error {\n\t\tvar err error\n\t\tvar b bytes.Buffer\n\n\t\t\/\/ Get or create the bucket.\n\t\tbucket, err := tx.CreateBucketIfNotExists(waitingProofsBucketKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Encode the objects and place it in the bucket.\n\t\tif err := proof.Encode(&b); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tkey := proof.Key()\n\t\tif err := bucket.Put(key[:], b.Bytes()); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.cache[proof.Key()] = struct{}{}\n\t\treturn nil\n\t})\n}\n\n\/\/ Remove removes the proof from storage by its key.\nfunc (s *WaitingProofStore) Remove(key WaitingProofKey) error {\n\tif _, ok := s.cache[key]; !ok {\n\t\treturn ErrWaitingProofNotFound\n\t}\n\n\treturn s.db.Batch(func(tx *bolt.Tx) error {\n\t\t\/\/ Get or create the top bucket.\n\t\tbucket := tx.Bucket(waitingProofsBucketKey)\n\t\tif bucket == nil {\n\t\t\treturn ErrWaitingProofNotFound\n\t\t}\n\n\t\tif err := bucket.Delete(key[:]); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdelete(s.cache, key)\n\t\treturn nil\n\t})\n}\n\n\/\/ ForAll iterates thought all waiting proofs and passing the waiting proof\n\/\/ in the given callback.\nfunc (s *WaitingProofStore) ForAll(cb func(*WaitingProof) error) error {\n\treturn s.db.View(func(tx *bolt.Tx) error {\n\t\tbucket := tx.Bucket(waitingProofsBucketKey)\n\t\tif bucket == nil {\n\t\t\treturn ErrWaitingProofNotFound\n\t\t}\n\n\t\t\/\/ Iterate over objects buckets.\n\t\treturn bucket.ForEach(func(k, v []byte) error {\n\t\t\t\/\/ Skip buckets fields.\n\t\t\tif v == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tr := bytes.NewReader(v)\n\t\t\tproof := &WaitingProof{}\n\t\t\tif err := proof.Decode(r); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn cb(proof)\n\t\t})\n\t})\n}\n\n\/\/ Get returns the object which corresponds to the given index.\nfunc (s *WaitingProofStore) Get(key WaitingProofKey) (*WaitingProof, error) {\n\tproof := &WaitingProof{}\n\n\tif _, ok := s.cache[key]; !ok {\n\t\treturn nil, ErrWaitingProofNotFound\n\t}\n\n\terr := s.db.View(func(tx *bolt.Tx) error {\n\t\tbucket := tx.Bucket(waitingProofsBucketKey)\n\t\tif bucket == nil {\n\t\t\treturn ErrWaitingProofNotFound\n\t\t}\n\n\t\t\/\/ Iterate over objects buckets.\n\t\tv := bucket.Get(key[:])\n\t\tif v == nil {\n\t\t\treturn ErrWaitingProofNotFound\n\t\t}\n\n\t\tr := bytes.NewReader(v)\n\t\treturn proof.Decode(r)\n\t})\n\n\treturn proof, err\n}\n\n\/\/ WaitingProofKey is the proof key which uniquely identifies the waiting\n\/\/ proof object. The goal of this key is distinguish the local and remote\n\/\/ proof for the same channel id.\ntype WaitingProofKey [9]byte\n\n\/\/ WaitingProof is the storable object, which encapsulate the half proof and\n\/\/ the information about from which side this proof came. This structure is\n\/\/ needed to make channel proof exchange persistent, so that after client\n\/\/ restart we may receive remote\/local half proof and process it.\ntype WaitingProof struct {\n\t*lnwire.AnnounceSignatures\n\tisRemote bool\n}\n\n\/\/ NewWaitingProof constructs a new waiting prof instance.\nfunc NewWaitingProof(isRemote bool, proof *lnwire.AnnounceSignatures) *WaitingProof {\n\treturn &WaitingProof{\n\t\tAnnounceSignatures: proof,\n\t\tisRemote: isRemote,\n\t}\n}\n\n\/\/ OppositeKey returns the key which uniquely identifies opposite waiting proof.\nfunc (p *WaitingProof) OppositeKey() WaitingProofKey {\n\tvar key [9]byte\n\tbinary.BigEndian.PutUint64(key[:8], p.ShortChannelID.ToUint64())\n\n\tif !p.isRemote {\n\t\tkey[8] = 1\n\t}\n\treturn key\n}\n\n\/\/ Key returns the key which uniquely identifies waiting proof.\nfunc (p *WaitingProof) Key() WaitingProofKey {\n\tvar key [9]byte\n\tbinary.BigEndian.PutUint64(key[:8], p.ShortChannelID.ToUint64())\n\n\tif p.isRemote {\n\t\tkey[8] = 1\n\t}\n\treturn key\n}\n\n\/\/ Encode writes the internal representation of waiting proof in byte stream.\nfunc (p *WaitingProof) Encode(w io.Writer) error {\n\tif err := binary.Write(w, byteOrder, p.isRemote); err != nil {\n\t\treturn err\n\t}\n\n\tif err := p.AnnounceSignatures.Encode(w, 0); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Decode reads the data from the byte stream and initializes the\n\/\/ waiting proof object with it.\nfunc (p *WaitingProof) Decode(r io.Reader) error {\n\tif err := binary.Read(r, byteOrder, &p.isRemote); err != nil {\n\t\treturn err\n\t}\n\n\tmsg := &lnwire.AnnounceSignatures{}\n\tif err := msg.Decode(r, 0); err != nil {\n\t\treturn err\n\t}\n\n\t(*p).AnnounceSignatures = msg\n\treturn nil\n}\n<|endoftext|>"} {"text":"package cmd\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\n\t\"github.com\/mattes\/migrate\"\n\t_ \"github.com\/mattes\/migrate\/database\/postgres\"\n\t_ \"github.com\/mattes\/migrate\/source\/file\"\n\n\t\"gopkg.in\/src-d\/go-kallax.v1\/generator\"\n\tcli \"gopkg.in\/urfave\/cli.v1\"\n)\n\nvar Migrate = cli.Command{\n\tName: \"migrate\",\n\tUsage: \"Generate migrations for current kallax models\",\n\tAction: migrateAction,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"out, o\",\n\t\t\tUsage: \"Output directory of migrations\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"name, n\",\n\t\t\tUsage: \"Descriptive name for the migration\",\n\t\t\tValue: \"migration\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"input, i\",\n\t\t\tUsage: \"List of directories to scan models from. You can use this flag as many times as you want.\",\n\t\t},\n\t},\n\tSubcommands: cli.Commands{\n\t\tUp,\n\t\tDown,\n\t},\n}\n\nvar migrationFlags = []cli.Flag{\n\tcli.StringFlag{\n\t\tName: \"dir, d\",\n\t\tValue: \".\/migrations\",\n\t\tUsage: \"Directory where your migrations are stored\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"dsn\",\n\t\tUsage: \"PostgreSQL data source name. Example: `user:pass@localhost:5432\/database?sslmode=enable`\",\n\t},\n\tcli.UintFlag{\n\t\tName: \"steps, n\",\n\t\tUsage: \"Number of migrations to run\",\n\t},\n\tcli.UintFlag{\n\t\tName: \"version, v\",\n\t\tUsage: \"Migrate to a specific version. If `steps` and this flag are given, this will be used.\",\n\t},\n}\n\nvar Up = cli.Command{\n\tName: \"up\",\n\tUsage: \"Executes the migrations from the current version until the specified version.\",\n\tAction: runMigrationAction(upAction),\n\tFlags: migrationFlags,\n}\n\nvar Down = cli.Command{\n\tName: \"down\",\n\tUsage: \"Downgrades the database a certain number of migrations or until a certain version.\",\n\tAction: runMigrationAction(downAction),\n\tFlags: migrationFlags,\n}\n\nfunc upAction(m *migrate.Migrate, steps, version uint) error {\n\tif version > 0 {\n\t\tif err := m.Migrate(version); err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: unable to upgrade up to version %d: %s\", version, err)\n\t\t}\n\t} else if steps > 0 {\n\t\tif err := m.Steps(int(steps)); err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: unable to execute %d migration(s) up: %s\", steps, err)\n\t\t}\n\t} else {\n\t\tfmt.Println(\"WARN: No `version` or `steps` provided, upgrading all the way up.\")\n\t\tif err := m.Up(); err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: unable to upgrade the database all the way up: %s\", err)\n\t\t}\n\t}\n\treportMigrationSuccess(m)\n\treturn nil\n}\n\nfunc downAction(m *migrate.Migrate, steps, version uint) error {\n\tif version > 0 {\n\t\tif err := m.Migrate(version); err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: unable to upgrade up to version %d: %s\", version, err)\n\t\t}\n\t} else if steps > 0 {\n\t\tif err := m.Steps(-int(steps)); err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: unable to execute %d migration(s) up: %s\", steps, err)\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"kallax: no `version` or `steps` provided. You need to specify one of them.\")\n\t}\n\treportMigrationSuccess(m)\n\treturn nil\n}\n\nfunc reportMigrationSuccess(m *migrate.Migrate) {\n\tfmt.Println(\"Success! the migration has been run.\")\n\n\tif v, _, err := m.Version(); err != nil {\n\t\tfmt.Printf(\"Unable to check the latest version of the database: %s.\\n\", err)\n\t} else {\n\t\tfmt.Printf(\"Database is now at version %d.\\n\", v)\n\t}\n}\n\ntype runMigrationFunc func(m *migrate.Migrate, steps, version uint) error\n\nfunc runMigrationAction(fn runMigrationFunc) cli.ActionFunc {\n\treturn func(c *cli.Context) error {\n\t\tvar (\n\t\t\tdir = c.String(\"dir\")\n\t\t\tdsn = c.String(\"dsn\")\n\t\t\tsteps = c.Uint(\"steps\")\n\t\t\tversion = c.Uint(\"version\")\n\t\t)\n\n\t\tok, err := isDirectory(dir)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: cannot check if `dir` is a directory: %s\", err)\n\t\t}\n\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"kallax: argument `dir` must be a valid directory\")\n\t\t}\n\n\t\tdir, err = filepath.Abs(dir)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: cannot get absolute path of `dir`: %s\", err)\n\t\t}\n\n\t\tm, err := migrate.New(fmt.Sprintf(\"file:\/\/%s\", dir), fmt.Sprintf(\"postgres:\/\/%s\", dsn))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: unable to open a connection with the database: %s\", err)\n\t\t}\n\n\t\treturn fn(m, steps, version)\n\t}\n}\n\nfunc migrateAction(c *cli.Context) error {\n\tdirs := c.StringSlice(\"input\")\n\tdir := c.String(\"out\")\n\tname := c.String(\"name\")\n\n\tvar pkgs []*generator.Package\n\tfor _, dir := range dirs {\n\t\tok, err := isDirectory(dir)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: cannot check directory in `input`: %s\", err)\n\t\t}\n\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"kallax: `input` must be a valid directory\")\n\t\t}\n\n\t\tp := generator.NewProcessor(dir, nil)\n\t\tp.Silent()\n\t\tpkg, err := p.Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpkgs = append(pkgs, pkg)\n\t}\n\n\tok, err := isDirectory(dir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"kallax: cannot check directory in `out`: %s\", err)\n\t}\n\n\tif !ok {\n\t\treturn fmt.Errorf(\"kallax: `out` must be a valid directory\")\n\t}\n\n\tg := generator.NewMigrationGenerator(name, dir)\n\tmigration, err := g.Build(pkgs...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn g.Generate(migration)\n}\nadd --all flag to migrate all the way uppackage cmd\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\n\t\"github.com\/mattes\/migrate\"\n\t_ \"github.com\/mattes\/migrate\/database\/postgres\"\n\t_ \"github.com\/mattes\/migrate\/source\/file\"\n\n\t\"gopkg.in\/src-d\/go-kallax.v1\/generator\"\n\tcli \"gopkg.in\/urfave\/cli.v1\"\n)\n\nvar Migrate = cli.Command{\n\tName: \"migrate\",\n\tUsage: \"Generate migrations for current kallax models\",\n\tAction: migrateAction,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"out, o\",\n\t\t\tUsage: \"Output directory of migrations\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"name, n\",\n\t\t\tUsage: \"Descriptive name for the migration\",\n\t\t\tValue: \"migration\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"input, i\",\n\t\t\tUsage: \"List of directories to scan models from. You can use this flag as many times as you want.\",\n\t\t},\n\t},\n\tSubcommands: cli.Commands{\n\t\tUp,\n\t\tDown,\n\t},\n}\n\nvar migrationFlags = []cli.Flag{\n\tcli.StringFlag{\n\t\tName: \"dir, d\",\n\t\tValue: \".\/migrations\",\n\t\tUsage: \"Directory where your migrations are stored\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"dsn\",\n\t\tUsage: \"PostgreSQL data source name. Example: `user:pass@localhost:5432\/database?sslmode=enable`\",\n\t},\n\tcli.UintFlag{\n\t\tName: \"steps, n\",\n\t\tUsage: \"Number of migrations to run\",\n\t},\n\tcli.UintFlag{\n\t\tName: \"version, v\",\n\t\tUsage: \"Migrate to a specific version. If `steps` and this flag are given, this will be used.\",\n\t},\n}\n\nvar Up = cli.Command{\n\tName: \"up\",\n\tUsage: \"Executes the migrations from the current version until the specified version.\",\n\tAction: runMigrationAction(upAction),\n\tFlags: append(migrationFlags, cli.BoolFlag{\n\t\tName: \"all\",\n\t\tUsage: \"If this flag is used, the database will be migrated all the way up.\",\n\t}),\n}\n\nvar Down = cli.Command{\n\tName: \"down\",\n\tUsage: \"Downgrades the database a certain number of migrations or until a certain version.\",\n\tAction: runMigrationAction(downAction),\n\tFlags: migrationFlags,\n}\n\nfunc upAction(m *migrate.Migrate, steps, version uint, all bool) error {\n\tif all {\n\t\tif err := m.Up(); err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: unable to upgrade the database all the way up: %s\", err)\n\t\t}\n\t} else if version > 0 {\n\t\tif err := m.Migrate(version); err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: unable to upgrade up to version %d: %s\", version, err)\n\t\t}\n\t} else if steps > 0 {\n\t\tif err := m.Steps(int(steps)); err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: unable to execute %d migration(s) up: %s\", steps, err)\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"WARN: No `version` or `steps` provided\")\n\t}\n\treportMigrationSuccess(m)\n\treturn nil\n}\n\nfunc downAction(m *migrate.Migrate, steps, version uint, all bool) error {\n\tif version > 0 {\n\t\tif err := m.Migrate(version); err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: unable to upgrade up to version %d: %s\", version, err)\n\t\t}\n\t} else if steps > 0 {\n\t\tif err := m.Steps(-int(steps)); err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: unable to execute %d migration(s) up: %s\", steps, err)\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"kallax: no `version` or `steps` provided. You need to specify one of them.\")\n\t}\n\treportMigrationSuccess(m)\n\treturn nil\n}\n\nfunc reportMigrationSuccess(m *migrate.Migrate) {\n\tfmt.Println(\"Success! the migration has been run.\")\n\n\tif v, _, err := m.Version(); err != nil {\n\t\tfmt.Printf(\"Unable to check the latest version of the database: %s.\\n\", err)\n\t} else {\n\t\tfmt.Printf(\"Database is now at version %d.\\n\", v)\n\t}\n}\n\ntype runMigrationFunc func(m *migrate.Migrate, steps, version uint, all bool) error\n\nfunc runMigrationAction(fn runMigrationFunc) cli.ActionFunc {\n\treturn func(c *cli.Context) error {\n\t\tvar (\n\t\t\tdir = c.String(\"dir\")\n\t\t\tdsn = c.String(\"dsn\")\n\t\t\tsteps = c.Uint(\"steps\")\n\t\t\tversion = c.Uint(\"version\")\n\t\t\tall = c.Bool(\"all\")\n\t\t)\n\n\t\tok, err := isDirectory(dir)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: cannot check if `dir` is a directory: %s\", err)\n\t\t}\n\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"kallax: argument `dir` must be a valid directory\")\n\t\t}\n\n\t\tdir, err = filepath.Abs(dir)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: cannot get absolute path of `dir`: %s\", err)\n\t\t}\n\n\t\tm, err := migrate.New(fmt.Sprintf(\"file:\/\/%s\", dir), fmt.Sprintf(\"postgres:\/\/%s\", dsn))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: unable to open a connection with the database: %s\", err)\n\t\t}\n\n\t\treturn fn(m, steps, version, all)\n\t}\n}\n\nfunc migrateAction(c *cli.Context) error {\n\tdirs := c.StringSlice(\"input\")\n\tdir := c.String(\"out\")\n\tname := c.String(\"name\")\n\n\tvar pkgs []*generator.Package\n\tfor _, dir := range dirs {\n\t\tok, err := isDirectory(dir)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"kallax: cannot check directory in `input`: %s\", err)\n\t\t}\n\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"kallax: `input` must be a valid directory\")\n\t\t}\n\n\t\tp := generator.NewProcessor(dir, nil)\n\t\tp.Silent()\n\t\tpkg, err := p.Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpkgs = append(pkgs, pkg)\n\t}\n\n\tok, err := isDirectory(dir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"kallax: cannot check directory in `out`: %s\", err)\n\t}\n\n\tif !ok {\n\t\treturn fmt.Errorf(\"kallax: `out` must be a valid directory\")\n\t}\n\n\tg := generator.NewMigrationGenerator(name, dir)\n\tmigration, err := g.Build(pkgs...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn g.Generate(migration)\n}\n<|endoftext|>"} {"text":"package dskvs\n\nimport (\n\t\"log\"\n\t\"strings\"\n)\n\nconst CollKeySep = \"\/\"\n\nfunc checkKeyValid(key string) error {\n\tidxSeperator := strings.Index(key, CollKeySep)\n\tif idxSeperator == 0 {\n\t\treturn errorNoColl(key)\n\t} else if key == \"\" {\n\t\treturn errorEmptyKey()\n\t}\n\treturn nil\n}\n\n\/\/ Returns whether a key is a collection key or a collection\/member key.\n\/\/ Returns an error if the key is invalid\nfunc isCollectionKey(key string) bool {\n\tidxSeperator := strings.Index(key, CollKeySep)\n\tif idxSeperator < 0 {\n\t\treturn true\n\t} else if idxSeperator == len(key)-1 {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Takes a fullkey and splits it in a (collection, member) tuple. If member\n\/\/ is nil, the fullkey is a request for the collection as a whole\nfunc splitKeys(fullKey string) (string, string, error) {\n\tif isCollectionKey(fullKey) {\n\t\treturn \"\", \"\", errorNoKey(fullKey)\n\t}\n\n\tkeys := strings.SplitN(fullKey, CollKeySep, 2)\n\n\treturn keys[0], keys[1], nil\n}\n\nfunc isValidPath(path string) bool {\n\tlog.Printf(\"isValidPath(%s) called but not yet implemented\", path)\n\treturn true\n}\n\nfunc expandPath(path string) string {\n\tlog.Printf(\"expandPath(%s) called but not yet implemented\", path)\n\treturn \"\"\n}\nImplement isValidPath and expandPathpackage dskvs\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nconst CollKeySep = \"\/\"\n\nfunc checkKeyValid(key string) error {\n\tidxSeperator := strings.Index(key, CollKeySep)\n\tif idxSeperator == 0 {\n\t\treturn errorNoColl(key)\n\t} else if key == \"\" {\n\t\treturn errorEmptyKey()\n\t}\n\treturn nil\n}\n\n\/\/ Returns whether a key is a collection key or a collection\/member key.\n\/\/ Returns an error if the key is invalid\nfunc isCollectionKey(key string) bool {\n\tidxSeperator := strings.Index(key, CollKeySep)\n\tif idxSeperator < 0 {\n\t\treturn true\n\t} else if idxSeperator == len(key)-1 {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Takes a fullkey and splits it in a (collection, member) tuple. If member\n\/\/ is nil, the fullkey is a request for the collection as a whole\nfunc splitKeys(fullKey string) (string, string, error) {\n\tif isCollectionKey(fullKey) {\n\t\treturn \"\", \"\", errorNoKey(fullKey)\n\t}\n\n\tkeys := strings.SplitN(fullKey, CollKeySep, 2)\n\n\treturn keys[0], keys[1], nil\n}\n\nfunc isValidPath(path string) bool {\n\tabsPath, err := filepath.Abs(path)\n\tif err != nil {\n\t\tlog.Printf(\"Could not get absolute filepath %v\", err)\n\t\treturn false\n\t}\n\n\tstat, err := os.Stat(absPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn true\n\t\t} else {\n\t\t\tlog.Printf(\"Could not get stat %v\", err)\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn stat.IsDir()\n}\n\nfunc expandPath(path string) string {\n\tabsPath, err := filepath.Abs(path)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn absPath\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bytes\"\n\tcrand \"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\/exec\"\n\t\"testing\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n)\n\n\/\/ sock.js protocol is described here:\n\/\/ http:\/\/sockjs.github.io\/sockjs-protocol\/sockjs-protocol-0.3.3.html#section-36\nconst url = \"ws:\/\/localhost:8008\/subscribe\/%d\/%s\/websocket\"\nconst origin = \"http:\/\/localhost\/\" \/\/ not checked on broker\n\n\/\/ returna a new sockjs url for client\nfunc newURL() string {\n\treturn fmt.Sprintf(url, rand.Intn(1000), RandomStringLength(8))\n}\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n\nfunc runBroker(t *testing.T) (b *Broker, closer func()) {\n\t\/\/ Run authWorker (Authworker must be running when broker is running.)\n\tcmd := exec.Command(\"cake\", \"authWorker\")\n\tcmd.Dir = \"\/opt\/koding\"\n\terr := cmd.Start()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Log(\"authWorker is running\")\n\n\t\/\/ Run broker\n\tbroker := NewBroker()\n\tbroker.Start()\n\tt.Log(\"broker is running\")\n\n\treturn broker, func() {\n\t\t\/\/ Close authWorker\n\t\tif err := cmd.Process.Kill(); err != nil {\n\t\t\tt.Errorf(err.Error())\n\t\t}\n\t\tbroker.Close()\n\t}\n}\n\nfunc TestBroker(t *testing.T) {\n\t_, closer := runBroker(t)\n\tdefer closer()\n\n\tclient, err := dialSockJS(newURL(), origin)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\n\tgo client.Run()\n\tdefer client.Close()\n\n\ttype testCase struct{ send, expect string }\n\tcases := []testCase{\n\t\ttestCase{`{\"action\": \"ping\"}`, `{\"routingKey\":\"broker.pong\",\"payload\":null}`},\n\t}\n\n\tfor _, tc := range cases {\n\t\terr = client.SendAndExpectString(tc.send, tc.expect)\n\t\tif err != nil {\n\t\t\tt.Errorf(err.Error())\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc TestPubSub(t *testing.T) {\n\t\/\/ Run authWorker and broker\n\t_, closer := runBroker(t)\n\tdefer closer()\n\n\t\/\/ Run subscriber\n\tsubscriber, err := dialSockJS(newURL(), origin)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tgo subscriber.Run()\n\tdefer subscriber.Close()\n\tmsg, err := subscriber.ReadJSON()\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tif msg[\"routingKey\"].(string) != \"broker.connected\" {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tt.Log(\"subscriber is running\")\n\n\t\/\/ Run publisher\n\tpublisher, err := dialSockJS(newURL(), origin)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tgo publisher.Run()\n\tdefer publisher.Close()\n\tmsg, err = publisher.ReadJSON()\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tif msg[\"routingKey\"].(string) != \"broker.connected\" {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tt.Log(\"publisher is running\")\n\n\t\/\/ Subscribe\n\terr = subscriber.SendString(`{\"action\": \"subscribe\", \"routingKeyPrefix\": \"client.foo\"}`)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tstr, err := subscriber.ReadString()\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tif str != `{\"routingKey\":\"broker.subscribed\",\"payload\":\"client.foo\"}` {\n\t\tt.Errorf(\"unexpected msg: %s\", str)\n\t\treturn\n\t}\n\tt.Log(\"subscribed\")\n\n\t\/\/ Publish a message\n\terr = publisher.SendString(`{\"action\": \"publish\", \"exchange\": \"broker\", \"routingKey\": \"client.foo\", \"payload\": \"{\\\"bar\\\": \\\"baz\\\"}\"}`)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tt.Log(\"published a message\")\n\n\t\/\/ Receive published message\n\tstr, err = subscriber.ReadString()\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tif str != `{\"routingKey\":\"client.foo\",\"payload\":{\"bar\":\"baz\"}}` {\n\t\tt.Errorf(\"unexpected msg: %s\", str)\n\t\treturn\n\t}\n}\n\n\/\/ cheap imitation of sockjs-client js library\ntype sockJSClient struct {\n\tws *websocket.Conn\n\tmessages chan []byte\n}\n\nfunc dialSockJS(url, origin string) (*sockJSClient, error) {\n\tws, err := websocket.Dial(url, \"\", origin)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newSockJSClient(ws), nil\n}\n\nfunc newSockJSClient(ws *websocket.Conn) *sockJSClient {\n\treturn &sockJSClient{\n\t\tws: ws,\n\t\tmessages: make(chan []byte),\n\t}\n}\n\n\/\/ read messages from websocket and put it to the channel\nfunc (c *sockJSClient) Run() error {\n\tdefer close(c.messages)\n\tfor {\n\t\tvar data []byte\n\t\terr := websocket.Message.Receive(c.ws, &data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ fmt.Printf(\"--- read data: %+v\\n\", string(data))\n\t\tc.didMessage(data)\n\t}\n}\n\nfunc (c *sockJSClient) Close() {\n\tc.ws.Close()\n}\n\n\/\/ Send a []byte message to server\nfunc (c *sockJSClient) Send(data []byte) error {\n\treturn websocket.Message.Send(c.ws, data)\n}\n\n\/\/ Send a string message to server\nfunc (c *sockJSClient) SendString(s string) error {\n\treturn websocket.Message.Send(c.ws, s)\n}\n\n\/\/ adapted from: https:\/\/github.com\/sockjs\/sockjs-client\/blob\/master\/lib\/sockjs.js#L146\nfunc (c *sockJSClient) didMessage(data []byte) error {\n\tswitch string(data[:1]) {\n\tcase \"o\":\n\t\t\/\/ that._dispatchOpen();\n\tcase \"a\":\n\t\tdata := data[1:]\n\t\tvar messages []json.RawMessage\n\t\terr := json.Unmarshal(data, &messages)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, msg := range messages {\n\t\t\tc.messages <- msg\n\t\t}\n\tcase \"m\":\n\t\tdata = data[1:]\n\t\tvar msg json.RawMessage\n\t\terr := json.Unmarshal(data, &msg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.messages <- msg\n\tcase \"c\":\n\t\t\/\/ var payload = JSON.parse(data.slice(1) || \"[]\")\n\t\t\/\/ that._didClose(payload[0], payload[1])\n\tcase \"h\":\n\t\t\/\/ that._dispatchHeartbeat()\n\t}\n\n\treturn nil\n}\n\n\/\/ Get next JSON message from server as map[string]interface{}\nfunc (c *sockJSClient) ReadJSON() (map[string]interface{}, error) {\n\tmsg, err := c.Read()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := make(map[string]interface{})\n\terr = json.Unmarshal(msg, &m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n\/\/ Get next message from server as string\nfunc (c *sockJSClient) ReadString() (string, error) {\n\tmsg, err := c.Read()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(msg), nil\n}\n\n\/\/ Get next message from server as []byte\nfunc (c *sockJSClient) Read() ([]byte, error) {\n\tselect {\n\tcase msg := <-c.messages:\n\t\treturn msg, nil\n\tcase <-time.After(1e9):\n\t\treturn nil, errors.New(\"timeout\")\n\t}\n}\n\n\/\/ send a string and expect reply\nfunc (c *sockJSClient) SendAndExpectString(sent, expected string) error {\n\treturn c.SendAndExpect([]byte(sent), []byte(expected))\n}\n\n\/\/ send a []byte and expect reply\nfunc (c *sockJSClient) SendAndExpect(sent []byte, expected []byte) error {\n\terr := c.Send(sent)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\t\tmsg, err := c.Read()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif bytes.Compare(msg, expected) == 0 {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc RandomStringLength(length int) string {\n\tr := make([]byte, length*6\/8)\n\tcrand.Read(r)\n\treturn base64.URLEncoding.EncodeToString(r)\n}\nbroker: add partly functioning benchmarks, will improve laterpackage main\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Make sure authWorker is running before running the tests. \/\/\n\/\/ You can run it with the following command: \/\/\n\/\/ cd \/opt\/koding && cake -c vagrant authWorker \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nimport (\n\t\"bytes\"\n\tcrand \"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"testing\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n)\n\n\/\/ sock.js protocol is described here:\n\/\/ http:\/\/sockjs.github.io\/sockjs-protocol\/sockjs-protocol-0.3.3.html#section-36\nconst url = \"ws:\/\/localhost:8008\/subscribe\/%d\/%s\/websocket\"\nconst origin = \"http:\/\/localhost\/\" \/\/ not checked on broker\n\n\/\/ returna a new sockjs url for client\nfunc newURL() string {\n\treturn fmt.Sprintf(url, rand.Intn(1000), RandomStringLength(8))\n}\n\n\/\/ This global instance of broker is run once when the tests are run by init() function.\nvar broker *Broker\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n\tbroker := NewBroker()\n\tbroker.Start()\n}\n\nfunc TestPingPong(t *testing.T) {\n\tclient, err := dialSockJS(newURL(), origin)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\n\tgo client.Run()\n\tdefer client.Close()\n\n\ttype testCase struct{ send, expect string }\n\tcases := []testCase{\n\t\ttestCase{`{\"action\": \"ping\"}`, `{\"routingKey\":\"broker.pong\",\"payload\":null}`},\n\t}\n\n\tfor _, tc := range cases {\n\t\terr = client.SendAndExpectString(tc.send, tc.expect)\n\t\tif err != nil {\n\t\t\tt.Errorf(err.Error())\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc TestPubSub(t *testing.T) {\n\t\/\/ Run subscriber\n\tsubscriber, err := dialSockJS(newURL(), origin)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tgo subscriber.Run()\n\tdefer subscriber.Close()\n\tmsg, err := subscriber.ReadJSON()\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tif msg[\"routingKey\"].(string) != \"broker.connected\" {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tt.Log(\"subscriber is running\")\n\n\t\/\/ Run publisher\n\tpublisher, err := dialSockJS(newURL(), origin)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tgo publisher.Run()\n\tdefer publisher.Close()\n\tmsg, err = publisher.ReadJSON()\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tif msg[\"routingKey\"].(string) != \"broker.connected\" {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tt.Log(\"publisher is running\")\n\n\t\/\/ Subscribe\n\terr = subscriber.SendString(`{\"action\": \"subscribe\", \"routingKeyPrefix\": \"client.foo\"}`)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tstr, err := subscriber.ReadString()\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tif str != `{\"routingKey\":\"broker.subscribed\",\"payload\":\"client.foo\"}` {\n\t\tt.Errorf(\"unexpected msg: %s\", str)\n\t\treturn\n\t}\n\tt.Log(\"subscribed\")\n\n\t\/\/ Publish a message\n\terr = publisher.SendString(`{\"action\": \"publish\", \"exchange\": \"broker\", \"routingKey\": \"client.foo\", \"payload\": \"{\\\"bar\\\": \\\"baz\\\"}\"}`)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tt.Log(\"published a message\")\n\n\t\/\/ Receive published message\n\tstr, err = subscriber.ReadString()\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t\treturn\n\t}\n\tif str != `{\"routingKey\":\"client.foo\",\"payload\":{\"bar\":\"baz\"}}` {\n\t\tt.Errorf(\"unexpected msg: %s\", str)\n\t\treturn\n\t}\n}\n\nfunc BenchmarkBroker_1_1(b *testing.B) { benchmarkBroker(b, 1, 1) }\nfunc BenchmarkBroker_10_10(b *testing.B) { benchmarkBroker(b, 10, 10) }\nfunc BenchmarkBroker_100_100(b *testing.B) { benchmarkBroker(b, 100, 100) }\nfunc BenchmarkBroker_1000_1000(b *testing.B) { benchmarkBroker(b, 1000, 1000) }\n\nvar nPublished int\n\nfunc benchmarkBroker(b *testing.B, nClient, nKey int) {\n\tvar err error\n\n\tb.Logf(\"connecting with %d clients\", nClient)\n\tclients := make([]*sockJSClient, nClient)\n\tfor i := 0; i < nClient; i++ {\n\t\tclients[i], err = dialSockJS(newURL(), origin)\n\t\tif err != nil {\n\t\t\tb.Errorf(err.Error())\n\t\t\treturn\n\t\t}\n\t\tgo clients[i].Run()\n\t\tdefer clients[i].Close()\n\t}\n\n\tb.Logf(\"generating %d keys\", nKey)\n\tkeys := make([]string, nKey)\n\tfor i := 0; i < nKey; i++ {\n\t\tkeys[i] = \"client.\" + RandomStringLength(8)\n\t}\n\n\tb.Logf(\"each client subscribes %d keys\", nKey)\n\tfor _, client := range clients {\n\t\tfor _, key := range keys {\n\t\t\tclient.SendString(fmt.Sprintf(`{\"action\": \"subscribe\", \"routingKeyPrefix\": \"%s\"}`, key))\n\t\t}\n\t}\n\n\tb.Logf(\"publishing %d random messages to random keys\", b.N)\n\t\/\/ conn := amqputil.CreateConnection(\"broker\")\n\t\/\/ defer conn.Close()\n\t\/\/ ch := amqputil.CreateChannel(conn)\n\t\/\/ defer ch.Close()\n\t\/\/ payload := fmt.Sprintf(`{\"random\": \"%s\"}`, RandomStringLength(1024)) \/\/ Must be JSON\n\tbody := fmt.Sprintf(`{\"action\": \"publish\", \"exchange\": \"broker\", \"routingKey\": \"%s\", \"payload\": \"{\\\"random\\\": \\\"%s\\\"}\"}`, keys[rand.Intn(nKey)], RandomStringLength(1024))\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t\/\/ err := ch.Publish(\"broker\", keys[rand.Intn(nKey)], false, false, amqp.Publishing{Body: []byte(payload)})\n\t\terr := clients[rand.Intn(nClient)].SendString(body)\n\t\tif err != nil {\n\t\t\tb.Errorf(err.Error())\n\t\t\treturn\n\t\t}\n\t\tnPublished++\n\t}\n\tfmt.Println(\"--- total published:\", nPublished)\n}\n\n\/\/ cheap imitation of sockjs-client js library\ntype sockJSClient struct {\n\tws *websocket.Conn\n\tmessages chan []byte\n}\n\nfunc dialSockJS(url, origin string) (*sockJSClient, error) {\n\tws, err := websocket.Dial(url, \"\", origin)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newSockJSClient(ws), nil\n}\n\nfunc newSockJSClient(ws *websocket.Conn) *sockJSClient {\n\treturn &sockJSClient{\n\t\tws: ws,\n\t\tmessages: make(chan []byte),\n\t}\n}\n\n\/\/ read messages from websocket and put it to the channel\nfunc (c *sockJSClient) Run() error {\n\tdefer close(c.messages)\n\tfor {\n\t\tvar data []byte\n\t\terr := websocket.Message.Receive(c.ws, &data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.didMessage(data)\n\t}\n}\n\nfunc (c *sockJSClient) Close() {\n\tc.ws.Close()\n}\n\n\/\/ Send a []byte message to server\nfunc (c *sockJSClient) Send(data []byte) error {\n\treturn websocket.Message.Send(c.ws, data)\n}\n\n\/\/ Send a string message to server\nfunc (c *sockJSClient) SendString(s string) error {\n\treturn websocket.Message.Send(c.ws, s)\n}\n\n\/\/ adapted from: https:\/\/github.com\/sockjs\/sockjs-client\/blob\/master\/lib\/sockjs.js#L146\nfunc (c *sockJSClient) didMessage(data []byte) error {\n\tswitch string(data[:1]) {\n\tcase \"o\":\n\t\t\/\/ that._dispatchOpen();\n\tcase \"a\":\n\t\tdata := data[1:]\n\t\tvar messages []json.RawMessage\n\t\terr := json.Unmarshal(data, &messages)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, msg := range messages {\n\t\t\tc.messages <- msg\n\t\t}\n\tcase \"m\":\n\t\tdata = data[1:]\n\t\tvar msg json.RawMessage\n\t\terr := json.Unmarshal(data, &msg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.messages <- msg\n\tcase \"c\":\n\t\t\/\/ var payload = JSON.parse(data.slice(1) || \"[]\")\n\t\t\/\/ that._didClose(payload[0], payload[1])\n\tcase \"h\":\n\t\t\/\/ that._dispatchHeartbeat()\n\t}\n\n\treturn nil\n}\n\n\/\/ Get next JSON message from server as map[string]interface{}\nfunc (c *sockJSClient) ReadJSON() (map[string]interface{}, error) {\n\tmsg, err := c.Read()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := make(map[string]interface{})\n\terr = json.Unmarshal(msg, &m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n\/\/ Get next message from server as string\nfunc (c *sockJSClient) ReadString() (string, error) {\n\tmsg, err := c.Read()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(msg), nil\n}\n\n\/\/ Get next message from server as []byte\nfunc (c *sockJSClient) Read() ([]byte, error) {\n\tselect {\n\tcase msg := <-c.messages:\n\t\treturn msg, nil\n\tcase <-time.After(1e9):\n\t\treturn nil, errors.New(\"timeout\")\n\t}\n}\n\n\/\/ send a string and expect reply\nfunc (c *sockJSClient) SendAndExpectString(sent, expected string) error {\n\treturn c.SendAndExpect([]byte(sent), []byte(expected))\n}\n\n\/\/ send a []byte and expect reply\nfunc (c *sockJSClient) SendAndExpect(sent []byte, expected []byte) error {\n\terr := c.Send(sent)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\t\tmsg, err := c.Read()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif bytes.Compare(msg, expected) == 0 {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc RandomStringLength(length int) string {\n\tr := make([]byte, length*6\/8)\n\tcrand.Read(r)\n\treturn base64.URLEncoding.EncodeToString(r)\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"koding\/db\/models\"\n\t\"koding\/tools\/utils\"\n\t\"koding\/virt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/jessevdk\/go-flags\"\n\t\"labix.org\/v2\/mgo\"\n\t\"labix.org\/v2\/mgo\/bson\"\n)\n\nconst (\n\tusage = `usage: [|all]\n\n\tlist\n\tstart\n\tshutdown\n\tstop\n\tip\n\tunprepare\n\tcreate-test-vms\n\trbd-orphans\n`\n)\n\nvar flagOpts struct {\n\tTemplates string `long:\"templates\" short:\"t\" description:\"Change template dir.\" default:\"files\/templates\"`\n}\n\nfunc main() {\n\tremainingArgs, err := flags.Parse(&flagOpts)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif err := virt.LoadTemplates(flagOpts.Templates); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif len(remainingArgs) == 0 {\n\t\tfmt.Fprintf(os.Stderr, usage)\n\t\tos.Exit(0)\n\t}\n\n\taction := remainingArgs[0]\n\tactionArgs := remainingArgs[1:]\n\n\tfn := actions[action]\n\tfn(actionArgs)\n}\n\nvar actions = map[string]func(args []string){\n\t\"list\": func(args []string) {\n\t\tdirs, err := ioutil.ReadDir(\"\/var\/lib\/lxc\")\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tfor _, dir := range dirs {\n\t\t\tif strings.HasPrefix(dir.Name(), \"vm-\") {\n\t\t\t\tfmt.Println(dir.Name())\n\t\t\t}\n\t\t}\n\n\t},\n\n\t\"start\": func(args []string) {\n\t\tfor _, vm := range selectVMs(args[0]) {\n\t\t\terr := vm.Start()\n\t\t\tfmt.Printf(\"%v: %v\\n%s\", vm, err)\n\t\t}\n\t},\n\n\t\"shutdown\": func(args []string) {\n\t\tfor _, vm := range selectVMs(args[0]) {\n\t\t\terr := vm.Shutdown()\n\t\t\tfmt.Printf(\"%v: %v\\n%s\", vm, err)\n\t\t}\n\t},\n\n\t\"stop\": func(args []string) {\n\t\tfor _, vm := range selectVMs(args[0]) {\n\t\t\terr := vm.Stop()\n\t\t\tfmt.Printf(\"%v: %v\\n%s\", vm, err)\n\t\t}\n\t},\n\n\t\"unprepare\": func(args []string) {\n\t\tfor _, vm := range selectVMs(args[0]) {\n\t\t\terr := vm.Unprepare()\n\t\t\tfmt.Printf(\"%v: %v\\n\", vm, err)\n\t\t}\n\t},\n\n\t\"ip\": func(args []string) {\n\t\tif len(args) != 2 {\n\t\t\tlog.Fatal(\"usage: ip \")\n\t\t}\n\n\t\tsession, err := mgo.Dial(args[0])\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tvm := new(models.VM)\n\t\tsession.SetSafe(&mgo.Safe{})\n\n\t\tvmId := strings.TrimPrefix(args[1], \"vm-\")\n\n\t\tdatabase := session.DB(\"\")\n\t\terr = database.C(\"jVMs\").Find(bson.M{\"_id\": bson.ObjectIdHex(vmId)}).One(vm)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfmt.Println(vm.IP.String())\n\t},\n\n\t\"create-test-vms\": func(args []string) {\n\t\tstartIP := net.IPv4(10, 128, 2, 7)\n\t\tif len(os.Args) >= 4 {\n\t\t\tstartIP = net.ParseIP(os.Args[3])\n\t\t}\n\t\tipPoolFetch, _ := utils.NewIntPool(utils.IPToInt(startIP), nil)\n\t\tcount, _ := strconv.Atoi(args[0])\n\t\tdone := make(chan int)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tgo func(i int) {\n\t\t\t\tvm := virt.VM{\n\t\t\t\t\tId: bson.NewObjectId(),\n\t\t\t\t\tIP: utils.IntToIP(<-ipPoolFetch),\n\t\t\t\t}\n\t\t\t\tvm.ApplyDefaults()\n\t\t\t\tvm.Prepare(false)\n\t\t\t\tdone <- i\n\t\t\t}(i)\n\t\t}\n\t\tfor i := 0; i < count; i++ {\n\t\t\tfmt.Println(<-done)\n\t\t}\n\t},\n\n\t\"rbd-orphans\": func(args []string) {\n\t\tif len(args) == 0 {\n\t\t\tlog.Fatal(\"usage: vmtool rbd-orphans \")\n\t\t}\n\n\t\tsession, err := mgo.Dial(args[0])\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tsession.SetSafe(&mgo.Safe{})\n\t\tdatabase := session.DB(\"\")\n\t\titer := database.C(\"jVMs\").Find(bson.M{}).Select(bson.M{\"_id\": 1}).Iter()\n\t\tvar vm struct {\n\t\t\tId bson.ObjectId `bson:\"_id\"`\n\t\t}\n\t\tids := make(map[string]bool)\n\t\tfor iter.Next(&vm) {\n\t\t\tids[\"vm-\"+vm.Id.Hex()] = true\n\t\t}\n\t\tif err := iter.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tcmd := exec.Command(\"\/usr\/bin\/rbd\", \"ls\", \"--pool\", \"vms\")\n\t\tpipe, _ := cmd.StdoutPipe()\n\t\tr := bufio.NewReader(pipe)\n\t\tif err := cmd.Start(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfmt.Println(\"RBD images without corresponding database entry:\")\n\t\tfor {\n\t\t\timage, err := r.ReadString('\\n')\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\timage = image[:len(image)-1]\n\n\t\t\tif !ids[image] {\n\t\t\t\tfmt.Println(image)\n\t\t\t}\n\t\t}\n\t},\n}\n\nfunc selectVMs(selector string) []*virt.VM {\n\tif selector == \"all\" {\n\t\tdirs, err := ioutil.ReadDir(\"\/var\/lib\/lxc\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tvms := make([]*virt.VM, 0)\n\t\tfor _, dir := range dirs {\n\t\t\tif strings.HasPrefix(dir.Name(), \"vm-\") {\n\t\t\t\tvms = append(vms, &virt.VM{Id: bson.ObjectIdHex(dir.Name()[3:])})\n\t\t\t}\n\t\t}\n\t\treturn vms\n\t}\n\n\tif strings.HasPrefix(selector, \"vm-\") {\n\t\t_, err := os.Stat(\"\/var\/lib\/lxc\/\" + selector)\n\t\tif err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tfmt.Println(\"No prepared VM with name: \" + selector)\n\t\t\tos.Exit(1)\n\t\t}\n\t\treturn []*virt.VM{&virt.VM{Id: bson.ObjectIdHex(selector[3:])}}\n\t}\n\n\tfmt.Println(\"Invalid selector: \" + selector)\n\tos.Exit(1)\n\treturn nil\n}\nvmtool: start and wait for network for test vmspackage main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"koding\/db\/models\"\n\t\"koding\/tools\/utils\"\n\t\"koding\/virt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jessevdk\/go-flags\"\n\t\"labix.org\/v2\/mgo\"\n\t\"labix.org\/v2\/mgo\/bson\"\n)\n\nconst (\n\tusage = `usage: [|all]\n\n\tlist\n\tstart\n\tshutdown\n\tstop\n\tip\n\tunprepare\n\tcreate-test-vms\n\trbd-orphans\n`\n)\n\nvar flagOpts struct {\n\tTemplates string `long:\"templates\" short:\"t\" description:\"Change template dir.\" default:\"files\/templates\"`\n}\n\nfunc main() {\n\tremainingArgs, err := flags.Parse(&flagOpts)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif err := virt.LoadTemplates(flagOpts.Templates); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif len(remainingArgs) == 0 {\n\t\tfmt.Fprintf(os.Stderr, usage)\n\t\tos.Exit(0)\n\t}\n\n\taction := remainingArgs[0]\n\tactionArgs := remainingArgs[1:]\n\n\tfn := actions[action]\n\tfn(actionArgs)\n}\n\nvar actions = map[string]func(args []string){\n\t\"list\": func(args []string) {\n\t\tdirs, err := ioutil.ReadDir(\"\/var\/lib\/lxc\")\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tfor _, dir := range dirs {\n\t\t\tif strings.HasPrefix(dir.Name(), \"vm-\") {\n\t\t\t\tfmt.Println(dir.Name())\n\t\t\t}\n\t\t}\n\n\t},\n\n\t\"start\": func(args []string) {\n\t\tfor _, vm := range selectVMs(args[0]) {\n\t\t\terr := vm.Start()\n\t\t\tfmt.Printf(\"%v: %v\\n%s\", vm, err)\n\t\t}\n\t},\n\n\t\"shutdown\": func(args []string) {\n\t\tfor _, vm := range selectVMs(args[0]) {\n\t\t\terr := vm.Shutdown()\n\t\t\tfmt.Printf(\"%v: %v\\n%s\", vm, err)\n\t\t}\n\t},\n\n\t\"stop\": func(args []string) {\n\t\tfor _, vm := range selectVMs(args[0]) {\n\t\t\terr := vm.Stop()\n\t\t\tfmt.Printf(\"%v: %v\\n%s\", vm, err)\n\t\t}\n\t},\n\n\t\"unprepare\": func(args []string) {\n\t\tfor _, vm := range selectVMs(args[0]) {\n\t\t\terr := vm.Unprepare()\n\t\t\tfmt.Printf(\"%v: %v\\n\", vm, err)\n\t\t}\n\t},\n\n\t\"ip\": func(args []string) {\n\t\tif len(args) != 2 {\n\t\t\tlog.Fatal(\"usage: ip \")\n\t\t}\n\n\t\tsession, err := mgo.Dial(args[0])\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tvm := new(models.VM)\n\t\tsession.SetSafe(&mgo.Safe{})\n\n\t\tvmId := strings.TrimPrefix(args[1], \"vm-\")\n\n\t\tdatabase := session.DB(\"\")\n\t\terr = database.C(\"jVMs\").Find(bson.M{\"_id\": bson.ObjectIdHex(vmId)}).One(vm)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfmt.Println(vm.IP.String())\n\t},\n\n\t\"create-test-vms\": func(args []string) {\n\t\tstartIP := net.IPv4(10, 128, 2, 7)\n\t\tif len(os.Args) >= 4 {\n\t\t\tstartIP = net.ParseIP(os.Args[3])\n\t\t}\n\t\tipPoolFetch, _ := utils.NewIntPool(utils.IPToInt(startIP), nil)\n\t\tcount, _ := strconv.Atoi(args[0])\n\n\t\tdone := make(chan string)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tgo func(i int) {\n\t\t\t\tvm := virt.VM{\n\t\t\t\t\tId: bson.NewObjectId(),\n\t\t\t\t\tIP: utils.IntToIP(<-ipPoolFetch),\n\t\t\t\t}\n\t\t\t\tvm.ApplyDefaults()\n\t\t\t\tfmt.Println(i, \"preparing...\")\n\t\t\t\tfor _ = range vm.Prepare(false) {\n\t\t\t\t}\n\n\t\t\t\tfmt.Println(i, \"starting...\")\n\t\t\t\tif err := vm.Start(); err != nil {\n\t\t\t\t\tlog.Println(i, \"start\", err)\n\t\t\t\t}\n\n\t\t\t\t\/\/ wait until network is up\n\t\t\t\tfmt.Println(i, \"waiting...\")\n\t\t\t\tif err := vm.WaitForNetwork(time.Second * 5); err != nil {\n\t\t\t\t\tlog.Print(i, \"WaitForNetwork\", err)\n\t\t\t\t}\n\t\t\t\tdone <- fmt.Sprintln(i, \"ready\", \"vm-\"+vm.Id.Hex())\n\t\t\t}(i)\n\t\t}\n\n\t\tfor i := 0; i < count; i++ {\n\t\t\tfmt.Println(<-done)\n\t\t}\n\t},\n\n\t\"rbd-orphans\": func(args []string) {\n\t\tif len(args) == 0 {\n\t\t\tlog.Fatal(\"usage: vmtool rbd-orphans \")\n\t\t}\n\n\t\tsession, err := mgo.Dial(args[0])\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tsession.SetSafe(&mgo.Safe{})\n\t\tdatabase := session.DB(\"\")\n\t\titer := database.C(\"jVMs\").Find(bson.M{}).Select(bson.M{\"_id\": 1}).Iter()\n\t\tvar vm struct {\n\t\t\tId bson.ObjectId `bson:\"_id\"`\n\t\t}\n\t\tids := make(map[string]bool)\n\t\tfor iter.Next(&vm) {\n\t\t\tids[\"vm-\"+vm.Id.Hex()] = true\n\t\t}\n\t\tif err := iter.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tcmd := exec.Command(\"\/usr\/bin\/rbd\", \"ls\", \"--pool\", \"vms\")\n\t\tpipe, _ := cmd.StdoutPipe()\n\t\tr := bufio.NewReader(pipe)\n\t\tif err := cmd.Start(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfmt.Println(\"RBD images without corresponding database entry:\")\n\t\tfor {\n\t\t\timage, err := r.ReadString('\\n')\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\timage = image[:len(image)-1]\n\n\t\t\tif !ids[image] {\n\t\t\t\tfmt.Println(image)\n\t\t\t}\n\t\t}\n\t},\n}\n\nfunc selectVMs(selector string) []*virt.VM {\n\tif selector == \"all\" {\n\t\tdirs, err := ioutil.ReadDir(\"\/var\/lib\/lxc\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tvms := make([]*virt.VM, 0)\n\t\tfor _, dir := range dirs {\n\t\t\tif strings.HasPrefix(dir.Name(), \"vm-\") {\n\t\t\t\tvms = append(vms, &virt.VM{Id: bson.ObjectIdHex(dir.Name()[3:])})\n\t\t\t}\n\t\t}\n\t\treturn vms\n\t}\n\n\tif strings.HasPrefix(selector, \"vm-\") {\n\t\t_, err := os.Stat(\"\/var\/lib\/lxc\/\" + selector)\n\t\tif err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tfmt.Println(\"No prepared VM with name: \" + selector)\n\t\t\tos.Exit(1)\n\t\t}\n\t\treturn []*virt.VM{&virt.VM{Id: bson.ObjectIdHex(selector[3:])}}\n\t}\n\n\tfmt.Println(\"Invalid selector: \" + selector)\n\tos.Exit(1)\n\treturn nil\n}\n<|endoftext|>"} {"text":"package ublox\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/mdigger\/geotrack\/mongo\"\n)\n\nfunc TestCache(t *testing.T) {\n\tmongodb, err := mongo.Connect(\"mongodb:\/\/localhost\/geotrace\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer mongodb.Close()\n\n\tcache, err := InitCache(mongodb, token)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor i := 0; i < 1000; i++ {\n\t\tdata, err := cache.Get(pointHome, DefaultProfile)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\t\/\/ fmt.Println(data)\n\t\tdata, err = cache.Get(pointWork, DefaultProfile)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\t\/\/ fmt.Println(data)\n\t\t_ = data\n\t\t\/\/ jsondata, err := json.Marshal(data)\n\t\t\/\/ if err != nil {\n\t\t\/\/ \tt.Fatal(err)\n\t\t\/\/ }\n\t\t\/\/ fmt.Println(\"json:\", string(jsondata))\n\t}\n}\ntest fixpackage ublox\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/mdigger\/geotrack\/mongo\"\n)\n\nfunc TestCache(t *testing.T) {\n\tmongodb, err := mongo.Connect(\"mongodb:\/\/localhost\/geotrace\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer mongodb.Close()\n\n\tcache, err := InitCache(mongodb, \"\", token)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor i := 0; i < 1000; i++ {\n\t\tdata, err := cache.Get(pointHome, DefaultProfile)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\t\/\/ fmt.Println(data)\n\t\tdata, err = cache.Get(pointWork, DefaultProfile)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\t\/\/ fmt.Println(data)\n\t\t_ = data\n\t\t\/\/ jsondata, err := json.Marshal(data)\n\t\t\/\/ if err != nil {\n\t\t\/\/ \tt.Fatal(err)\n\t\t\/\/ }\n\t\t\/\/ fmt.Println(\"json:\", string(jsondata))\n\t}\n}\n<|endoftext|>"} {"text":"package iris_test\n\nimport (\n\t\"github.com\/kataras\/iris\"\n\t\"github.com\/kataras\/iris\/httptest\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar testMarkdownContents = `## Hello Markdown from Iris\n\nThis is an example of Markdown with Iris\n\n\n\nFeatures\n--------\n\nAll features of Sundown are supported, including:\n\n* **Compatibility**. The Markdown v1.0.3 test suite passes with\n the --tidy option. Without --tidy, the differences are\n mostly in whitespace and entity escaping, where blackfriday is\n more consistent and cleaner.\n\n* **Common extensions**, including table support, fenced code\n blocks, autolinks, strikethroughs, non-strict emphasis, etc.\n\n* **Safety**. Blackfriday is paranoid when parsing, making it safe\n to feed untrusted user input without fear of bad things\n happening. The test suite stress tests this and there are no\n known inputs that make it crash. If you find one, please let me\n know and send me the input that does it.\n\n NOTE: \"safety\" in this context means *runtime safety only*. In order to\n protect yourself against JavaScript injection in untrusted content, see\n [this example](https:\/\/github.com\/russross\/blackfriday#sanitize-untrusted-content).\n\n* **Fast processing**. It is fast enough to render on-demand in\n most web applications without having to cache the output.\n\n* **Thread safety**. You can run multiple parsers in different\n goroutines without ill effect. There is no dependence on global\n shared state.\n\n* **Minimal dependencies**. Blackfriday only depends on standard\n library packages in Go. The source code is pretty\n self-contained, so it is easy to add to any project, including\n Google App Engine projects.\n\n* **Standards compliant**. Output successfully validates using the\n W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.\n\n\t[this is a link](https:\/\/github.com\/kataras\/iris) `\n\n\/\/ 10 seconds test\n\/\/ EXAMPLE: https:\/\/github.com\/iris-contrib\/examples\/tree\/master\/cache_body\nfunc TestCacheBody(t *testing.T) {\n\tiris.ResetDefault()\n\tiris.Config.CacheGCDuration = time.Duration(2) * time.Second\n\tiris.Config.IsDevelopment = true\n\tdefer iris.Close()\n\tvar i = 1\n\tbodyHandler := func(ctx *iris.Context) {\n\t\tif i%2 == 0 { \/\/ only for testing\n\t\t\tctx.SetStatusCode(iris.StatusNoContent)\n\t\t\ti++\n\t\t\treturn\n\t\t}\n\t\ti++\n\t\tctx.Markdown(iris.StatusOK, testMarkdownContents)\n\t}\n\n\texpiration := time.Duration(3 * time.Second)\n\n\tiris.Get(\"\/\", iris.Cache(bodyHandler, expiration))\n\n\te := httptest.New(iris.Default, t)\n\n\texpectedBody := iris.SerializeToString(\"text\/markdown\", testMarkdownContents)\n\n\te.GET(\"\/\").Expect().Status(iris.StatusOK).Body().Equal(expectedBody)\n\te.GET(\"\/\").Expect().Status(iris.StatusOK).Body().Equal(expectedBody) \/\/ the cache still son the corrrect body so no StatusNoContent fires\n\ttime.Sleep(time.Duration(5) * time.Second) \/\/ 4 depends on the CacheGCDuration not the expiration\n\n\t\/\/ the cache should be cleared and now i = 2 then it should run the iris.StatusNoContent with empty body ( we don't use the EmitError)\n\te.GET(\"\/\").Expect().Status(iris.StatusNoContent).Body().Empty()\n\ttime.Sleep(time.Duration(5) * time.Second)\n\n\te.GET(\"\/\").Expect().Status(iris.StatusOK).Body().Equal(expectedBody)\n\te.GET(\"\/\").Expect().Status(iris.StatusOK).Body().Equal(expectedBody)\n}\nFix travis testpackage iris_test\n\nimport (\n\t\"github.com\/kataras\/iris\"\n\t\"github.com\/kataras\/iris\/httptest\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar testMarkdownContents = `## Hello Markdown from Iris\n\nThis is an example of Markdown with Iris\n\n\n\nFeatures\n--------\n\nAll features of Sundown are supported, including:\n\n* **Compatibility**. The Markdown v1.0.3 test suite passes with\n the --tidy option. Without --tidy, the differences are\n mostly in whitespace and entity escaping, where blackfriday is\n more consistent and cleaner.\n\n* **Common extensions**, including table support, fenced code\n blocks, autolinks, strikethroughs, non-strict emphasis, etc.\n\n* **Safety**. Blackfriday is paranoid when parsing, making it safe\n to feed untrusted user input without fear of bad things\n happening. The test suite stress tests this and there are no\n known inputs that make it crash. If you find one, please let me\n know and send me the input that does it.\n\n NOTE: \"safety\" in this context means *runtime safety only*. In order to\n protect yourself against JavaScript injection in untrusted content, see\n [this example](https:\/\/github.com\/russross\/blackfriday#sanitize-untrusted-content).\n\n* **Fast processing**. It is fast enough to render on-demand in\n most web applications without having to cache the output.\n\n* **Thread safety**. You can run multiple parsers in different\n goroutines without ill effect. There is no dependence on global\n shared state.\n\n* **Minimal dependencies**. Blackfriday only depends on standard\n library packages in Go. The source code is pretty\n self-contained, so it is easy to add to any project, including\n Google App Engine projects.\n\n* **Standards compliant**. Output successfully validates using the\n W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.\n\n\t[this is a link](https:\/\/github.com\/kataras\/iris) `\n\n\/\/ 10 seconds test\n\/\/ EXAMPLE: https:\/\/github.com\/iris-contrib\/examples\/tree\/master\/cache_body\nfunc TestCacheCanRender(t *testing.T) {\n\tiris.ResetDefault()\n\tiris.Config.CacheGCDuration = time.Duration(2) * time.Second\n\tiris.Config.IsDevelopment = true\n\tdefer iris.Close()\n\tvar i = 1\n\tbodyHandler := func(ctx *iris.Context) {\n\t\tif i%2 == 0 { \/\/ only for testing\n\t\t\tctx.SetStatusCode(iris.StatusNoContent)\n\t\t\ti++\n\t\t\treturn\n\t\t}\n\t\ti++\n\t\tctx.Markdown(iris.StatusOK, testMarkdownContents)\n\t}\n\n\texpiration := time.Duration(15 * time.Second)\n\n\tiris.Get(\"\/\", iris.Cache(bodyHandler, expiration))\n\n\te := httptest.New(iris.Default, t)\n\n\texpectedBody := iris.SerializeToString(\"text\/markdown\", testMarkdownContents)\n\n\te.GET(\"\/\").Expect().Status(iris.StatusOK).Body().Equal(expectedBody)\n\te.GET(\"\/\").Expect().Status(iris.StatusOK).Body().Equal(expectedBody) \/\/ the 15 seconds didnt' passed so it should work\n\n\t\/\/ travis... and time sleep not a good idea for testing, we will see what we can do other day, the cache is tested on examples too*\n\t\/*e.GET(\"\/\").Expect().Status(iris.StatusOK).Body().Equal(expectedBody) \/\/ the cache still son the corrrect body so no StatusNoContent fires\n\ttime.Sleep(time.Duration(5) * time.Second) \/\/ 4 depends on the CacheGCDuration not the expiration\n\n\t\/\/ the cache should be cleared and now i = 2 then it should run the iris.StatusNoContent with empty body ( we don't use the EmitError)\n\te.GET(\"\/\").Expect().Status(iris.StatusNoContent).Body().Empty()\n\ttime.Sleep(time.Duration(5) * time.Second)\n\n\te.GET(\"\/\").Expect().Status(iris.StatusOK).Body().Equal(expectedBody)\n\te.GET(\"\/\").Expect().Status(iris.StatusOK).Body().Equal(expectedBody)*\/\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage leaderelection\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"k8s.io\/apimachinery\/pkg\/util\/clock\"\n\trl \"k8s.io\/client-go\/tools\/leaderelection\/resourcelock\"\n\t\"net\/http\"\n)\n\ntype fakeLock struct {\n\tidentity string\n}\n\n\/\/ Get is a dummy to allow us to have a fakeLock for testing.\nfunc (fl *fakeLock) Get() (ler *rl.LeaderElectionRecord, err error) {\n\treturn nil, nil\n}\n\n\/\/ Create is a dummy to allow us to have a fakeLock for testing.\nfunc (fl *fakeLock) Create(ler rl.LeaderElectionRecord) error {\n\treturn nil\n}\n\n\/\/ Update is a dummy to allow us to have a fakeLock for testing.\nfunc (fl *fakeLock) Update(ler rl.LeaderElectionRecord) error {\n\treturn nil\n}\n\n\/\/ RecordEvent is a dummy to allow us to have a fakeLock for testing.\nfunc (fl *fakeLock) RecordEvent(string) {}\n\n\/\/ Identity is a dummy to allow us to have a fakeLock for testing.\nfunc (fl *fakeLock) Identity() string {\n\treturn fl.identity\n}\n\n\/\/ Describe is a dummy to allow us to have a fakeLock for testing.\nfunc (fl *fakeLock) Describe() string {\n\treturn \"Dummy implementation of lock for testing\"\n}\n\n\/\/ TestLeaderElectionHealthChecker tests that the healthcheck for leader election handles its edge cases.\nfunc TestLeaderElectionHealthChecker(t *testing.T) {\n\tcurrent := time.Now()\n\treq := &http.Request{}\n\n\ttests := []struct {\n\t\tdescription string\n\t\texpected error\n\t\tadaptorTimeout time.Duration\n\t\telector *LeaderElector\n\t}{\n\t\t{\n\t\t\tdescription: \"call check before leader elector initialized\",\n\t\t\texpected: nil,\n\t\t\tadaptorTimeout: time.Second * 20,\n\t\t\telector: nil,\n\t\t},\n\t\t{\n\t\t\tdescription: \"call check when the the lease is far expired\",\n\t\t\texpected: fmt.Errorf(\"failed election to renew leadership on lease %s\", \"foo\"),\n\t\t\tadaptorTimeout: time.Second * 20,\n\t\t\telector: &LeaderElector{\n\t\t\t\tconfig: LeaderElectionConfig{\n\t\t\t\t\tLock: &fakeLock{identity: \"healthTest\"},\n\t\t\t\t\tLeaseDuration: time.Minute,\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t},\n\t\t\t\tobservedRecord: rl.LeaderElectionRecord{\n\t\t\t\t\tHolderIdentity: \"healthTest\",\n\t\t\t\t},\n\t\t\t\tobservedTime: current,\n\t\t\t\tclock: clock.NewFakeClock(current.Add(time.Hour)),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"call check when the the lease is far expired but held by another server\",\n\t\t\texpected: nil,\n\t\t\tadaptorTimeout: time.Second * 20,\n\t\t\telector: &LeaderElector{\n\t\t\t\tconfig: LeaderElectionConfig{\n\t\t\t\t\tLock: &fakeLock{identity: \"healthTest\"},\n\t\t\t\t\tLeaseDuration: time.Minute,\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t},\n\t\t\t\tobservedRecord: rl.LeaderElectionRecord{\n\t\t\t\t\tHolderIdentity: \"otherServer\",\n\t\t\t\t},\n\t\t\t\tobservedTime: current,\n\t\t\t\tclock: clock.NewFakeClock(current.Add(time.Hour)),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"call check when the the lease is not expired\",\n\t\t\texpected: nil,\n\t\t\tadaptorTimeout: time.Second * 20,\n\t\t\telector: &LeaderElector{\n\t\t\t\tconfig: LeaderElectionConfig{\n\t\t\t\t\tLock: &fakeLock{identity: \"healthTest\"},\n\t\t\t\t\tLeaseDuration: time.Minute,\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t},\n\t\t\t\tobservedRecord: rl.LeaderElectionRecord{\n\t\t\t\t\tHolderIdentity: \"healthTest\",\n\t\t\t\t},\n\t\t\t\tobservedTime: current,\n\t\t\t\tclock: clock.NewFakeClock(current),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"call check when the the lease is expired but inside the timeout\",\n\t\t\texpected: nil,\n\t\t\tadaptorTimeout: time.Second * 20,\n\t\t\telector: &LeaderElector{\n\t\t\t\tconfig: LeaderElectionConfig{\n\t\t\t\t\tLock: &fakeLock{identity: \"healthTest\"},\n\t\t\t\t\tLeaseDuration: time.Minute,\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t},\n\t\t\t\tobservedRecord: rl.LeaderElectionRecord{\n\t\t\t\t\tHolderIdentity: \"healthTest\",\n\t\t\t\t},\n\t\t\t\tobservedTime: current,\n\t\t\t\tclock: clock.NewFakeClock(current.Add(time.Minute).Add(time.Second)),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tadaptor := NewLeaderHealthzAdaptor(test.adaptorTimeout)\n\t\tif adaptor.le != nil {\n\t\t\tt.Errorf(\"[%s] leaderChecker started with a LeaderElector %v\", test.description, adaptor.le)\n\t\t}\n\t\tif test.elector != nil {\n\t\t\ttest.elector.config.WatchDog = adaptor\n\t\t\tadaptor.SetLeaderElection(test.elector)\n\t\t\tif adaptor.le == nil {\n\t\t\t\tt.Errorf(\"[%s] adaptor failed to set the LeaderElector\", test.description)\n\t\t\t}\n\t\t}\n\t\terr := adaptor.Check(req)\n\t\tif test.expected == nil {\n\t\t\tif err == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Errorf(\"[%s] called check, expected no error but received \\\"%v\\\"\", test.description, err)\n\t\t} else {\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"[%s] called check and failed to received the expected error \\\"%v\\\"\", test.description, test.expected)\n\t\t\t}\n\t\t\tif err.Error() != test.expected.Error() {\n\t\t\t\tt.Errorf(\"[%s] called check, expected %v, received %v\", test.description, test.expected, err)\n\t\t\t}\n\t\t}\n\t}\n}\nremove redundant words 'the' in comment\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage leaderelection\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"k8s.io\/apimachinery\/pkg\/util\/clock\"\n\trl \"k8s.io\/client-go\/tools\/leaderelection\/resourcelock\"\n\t\"net\/http\"\n)\n\ntype fakeLock struct {\n\tidentity string\n}\n\n\/\/ Get is a dummy to allow us to have a fakeLock for testing.\nfunc (fl *fakeLock) Get() (ler *rl.LeaderElectionRecord, err error) {\n\treturn nil, nil\n}\n\n\/\/ Create is a dummy to allow us to have a fakeLock for testing.\nfunc (fl *fakeLock) Create(ler rl.LeaderElectionRecord) error {\n\treturn nil\n}\n\n\/\/ Update is a dummy to allow us to have a fakeLock for testing.\nfunc (fl *fakeLock) Update(ler rl.LeaderElectionRecord) error {\n\treturn nil\n}\n\n\/\/ RecordEvent is a dummy to allow us to have a fakeLock for testing.\nfunc (fl *fakeLock) RecordEvent(string) {}\n\n\/\/ Identity is a dummy to allow us to have a fakeLock for testing.\nfunc (fl *fakeLock) Identity() string {\n\treturn fl.identity\n}\n\n\/\/ Describe is a dummy to allow us to have a fakeLock for testing.\nfunc (fl *fakeLock) Describe() string {\n\treturn \"Dummy implementation of lock for testing\"\n}\n\n\/\/ TestLeaderElectionHealthChecker tests that the healthcheck for leader election handles its edge cases.\nfunc TestLeaderElectionHealthChecker(t *testing.T) {\n\tcurrent := time.Now()\n\treq := &http.Request{}\n\n\ttests := []struct {\n\t\tdescription string\n\t\texpected error\n\t\tadaptorTimeout time.Duration\n\t\telector *LeaderElector\n\t}{\n\t\t{\n\t\t\tdescription: \"call check before leader elector initialized\",\n\t\t\texpected: nil,\n\t\t\tadaptorTimeout: time.Second * 20,\n\t\t\telector: nil,\n\t\t},\n\t\t{\n\t\t\tdescription: \"call check when the lease is far expired\",\n\t\t\texpected: fmt.Errorf(\"failed election to renew leadership on lease %s\", \"foo\"),\n\t\t\tadaptorTimeout: time.Second * 20,\n\t\t\telector: &LeaderElector{\n\t\t\t\tconfig: LeaderElectionConfig{\n\t\t\t\t\tLock: &fakeLock{identity: \"healthTest\"},\n\t\t\t\t\tLeaseDuration: time.Minute,\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t},\n\t\t\t\tobservedRecord: rl.LeaderElectionRecord{\n\t\t\t\t\tHolderIdentity: \"healthTest\",\n\t\t\t\t},\n\t\t\t\tobservedTime: current,\n\t\t\t\tclock: clock.NewFakeClock(current.Add(time.Hour)),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"call check when the lease is far expired but held by another server\",\n\t\t\texpected: nil,\n\t\t\tadaptorTimeout: time.Second * 20,\n\t\t\telector: &LeaderElector{\n\t\t\t\tconfig: LeaderElectionConfig{\n\t\t\t\t\tLock: &fakeLock{identity: \"healthTest\"},\n\t\t\t\t\tLeaseDuration: time.Minute,\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t},\n\t\t\t\tobservedRecord: rl.LeaderElectionRecord{\n\t\t\t\t\tHolderIdentity: \"otherServer\",\n\t\t\t\t},\n\t\t\t\tobservedTime: current,\n\t\t\t\tclock: clock.NewFakeClock(current.Add(time.Hour)),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"call check when the lease is not expired\",\n\t\t\texpected: nil,\n\t\t\tadaptorTimeout: time.Second * 20,\n\t\t\telector: &LeaderElector{\n\t\t\t\tconfig: LeaderElectionConfig{\n\t\t\t\t\tLock: &fakeLock{identity: \"healthTest\"},\n\t\t\t\t\tLeaseDuration: time.Minute,\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t},\n\t\t\t\tobservedRecord: rl.LeaderElectionRecord{\n\t\t\t\t\tHolderIdentity: \"healthTest\",\n\t\t\t\t},\n\t\t\t\tobservedTime: current,\n\t\t\t\tclock: clock.NewFakeClock(current),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"call check when the lease is expired but inside the timeout\",\n\t\t\texpected: nil,\n\t\t\tadaptorTimeout: time.Second * 20,\n\t\t\telector: &LeaderElector{\n\t\t\t\tconfig: LeaderElectionConfig{\n\t\t\t\t\tLock: &fakeLock{identity: \"healthTest\"},\n\t\t\t\t\tLeaseDuration: time.Minute,\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t},\n\t\t\t\tobservedRecord: rl.LeaderElectionRecord{\n\t\t\t\t\tHolderIdentity: \"healthTest\",\n\t\t\t\t},\n\t\t\t\tobservedTime: current,\n\t\t\t\tclock: clock.NewFakeClock(current.Add(time.Minute).Add(time.Second)),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tadaptor := NewLeaderHealthzAdaptor(test.adaptorTimeout)\n\t\tif adaptor.le != nil {\n\t\t\tt.Errorf(\"[%s] leaderChecker started with a LeaderElector %v\", test.description, adaptor.le)\n\t\t}\n\t\tif test.elector != nil {\n\t\t\ttest.elector.config.WatchDog = adaptor\n\t\t\tadaptor.SetLeaderElection(test.elector)\n\t\t\tif adaptor.le == nil {\n\t\t\t\tt.Errorf(\"[%s] adaptor failed to set the LeaderElector\", test.description)\n\t\t\t}\n\t\t}\n\t\terr := adaptor.Check(req)\n\t\tif test.expected == nil {\n\t\t\tif err == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Errorf(\"[%s] called check, expected no error but received \\\"%v\\\"\", test.description, err)\n\t\t} else {\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"[%s] called check and failed to received the expected error \\\"%v\\\"\", test.description, test.expected)\n\t\t\t}\n\t\t\tif err.Error() != test.expected.Error() {\n\t\t\t\tt.Errorf(\"[%s] called check, expected %v, received %v\", test.description, test.expected, err)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.\n\/\/ vespa document command\n\/\/ author: bratseth\n\npackage cmd\n\nimport (\n\t\"log\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/vespa-engine\/vespa\/util\"\n\t\"github.com\/vespa-engine\/vespa\/vespa\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(documentCmd)\n\tdocumentCmd.AddCommand(documentPutCmd)\n\tdocumentCmd.AddCommand(documentGetCmd)\n}\n\nvar documentCmd = &cobra.Command{\n\tUse: \"document\",\n\tShort: \"Issues the document operation in the given file to Vespa\",\n\tExample: `$ vespa document src\/test\/resources\/A-Head-Full-of-Dreams.json`,\n\tArgs: cobra.ExactArgs(1),\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tprintResult(vespa.Put(\"\", args[0], documentTarget()), false) \/\/ TODO: Use Send\n\t},\n}\n\nvar documentPostCmd = &cobra.Command{\n\tUse: \"put\",\n\tShort: \"Writes the document in the given file to Vespa\",\n\tArgs: cobra.RangeArgs(1, 2),\n\tExample: `$ vespa document put src\/test\/resources\/A-Head-Full-of-Dreams.json\n$ vespa document put id:mynamespace:music::a-head-full-of-dreams src\/test\/resources\/A-Head-Full-of-Dreams.json`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) == 1 {\n\t\t\tprintResult(vespa.Put(\"\", args[0], documentTarget()), false)\n\t\t} else {\n\t\t\tprintResult(vespa.Put(args[0], args[1], documentTarget()), false)\n\t\t}\n\t},\n}\n\nvar documentGetCmd = &cobra.Command{\n\tUse: \"get\",\n\tShort: \"Gets a document\",\n\tArgs: cobra.ExactArgs(1),\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tprintResult(vespa.Get(args[0], documentTarget()), true)\n\t},\n}\n\nfunc printResult(result util.OperationResult, payloadOnlyOnSuccess bool) {\n\tif !result.Success {\n\t\tlog.Print(color.Red(\"Error: \"), result.Message)\n\t} else if !(payloadOnlyOnSuccess && result.Payload != \"\") {\n\t\tlog.Print(color.Green(\"Success: \"), result.Message)\n\t}\n\n\tif result.Detail != \"\" {\n\t\tlog.Print(color.Brown(result.Detail))\n\t}\n\n\tif result.Payload != \"\" {\n\t\tif !payloadOnlyOnSuccess {\n\t\t\tlog.Println(\"\")\n\t\t}\n\t\tlog.Print(result.Payload)\n\t}\n}\npost -> put\/\/ Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.\n\/\/ vespa document command\n\/\/ author: bratseth\n\npackage cmd\n\nimport (\n\t\"log\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/vespa-engine\/vespa\/util\"\n\t\"github.com\/vespa-engine\/vespa\/vespa\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(documentCmd)\n\tdocumentCmd.AddCommand(documentPutCmd)\n\tdocumentCmd.AddCommand(documentGetCmd)\n}\n\nvar documentCmd = &cobra.Command{\n\tUse: \"document\",\n\tShort: \"Issues the document operation in the given file to Vespa\",\n\tExample: `$ vespa document src\/test\/resources\/A-Head-Full-of-Dreams.json`,\n\tArgs: cobra.ExactArgs(1),\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tprintResult(vespa.Put(\"\", args[0], documentTarget()), false) \/\/ TODO: Use Send\n\t},\n}\n\nvar documentPutCmd = &cobra.Command{\n\tUse: \"put\",\n\tShort: \"Writes the document in the given file to Vespa\",\n\tArgs: cobra.RangeArgs(1, 2),\n\tExample: `$ vespa document put src\/test\/resources\/A-Head-Full-of-Dreams.json\n$ vespa document put id:mynamespace:music::a-head-full-of-dreams src\/test\/resources\/A-Head-Full-of-Dreams.json`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) == 1 {\n\t\t\tprintResult(vespa.Put(\"\", args[0], documentTarget()), false)\n\t\t} else {\n\t\t\tprintResult(vespa.Put(args[0], args[1], documentTarget()), false)\n\t\t}\n\t},\n}\n\nvar documentGetCmd = &cobra.Command{\n\tUse: \"get\",\n\tShort: \"Gets a document\",\n\tArgs: cobra.ExactArgs(1),\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tprintResult(vespa.Get(args[0], documentTarget()), true)\n\t},\n}\n\nfunc printResult(result util.OperationResult, payloadOnlyOnSuccess bool) {\n\tif !result.Success {\n\t\tlog.Print(color.Red(\"Error: \"), result.Message)\n\t} else if !(payloadOnlyOnSuccess && result.Payload != \"\") {\n\t\tlog.Print(color.Green(\"Success: \"), result.Message)\n\t}\n\n\tif result.Detail != \"\" {\n\t\tlog.Print(color.Brown(result.Detail))\n\t}\n\n\tif result.Payload != \"\" {\n\t\tif !payloadOnlyOnSuccess {\n\t\t\tlog.Println(\"\")\n\t\t}\n\t\tlog.Print(result.Payload)\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2016-2017, Cyrill @ Schumacher.fm and the CaddyESI Contributors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n\/\/ use this file except in compliance with the License. You may obtain a copy of\n\/\/ the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations under\n\/\/ the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/vdobler\/ht\/cookiejar\"\n\t\"github.com\/vdobler\/ht\/ht\"\n)\n\nconst caddyAddress = `http:\/\/127.0.0.1:2017\/`\n\nfunc main() {\n\tjar, err := cookiejar.New(&cookiejar.Options{})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tc := ht.Collection{\n\t\tTests: testCollection,\n\t}\n\n\tvar exitStatus int\n\tif err := c.ExecuteConcurrent(runtime.NumCPU(), jar); err != nil {\n\t\texitStatus = 26 \/\/ line number ;-)\n\t\tprintln(\"ExecuteConcurrent:\", err.Error())\n\t}\n\n\tfor _, test := range c.Tests {\n\t\tif err := test.PrintReport(os.Stdout); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif test.Status > ht.Pass {\n\t\t\texitStatus = 35 \/\/ line number ;-)\n\n\t\t\tcolor.Red(\"Failed %s\", test.Name)\n\n\t\t\tif test.Response.BodyErr != nil {\n\t\t\t\tcolor.Yellow(fmt.Sprintf(\"Response Body Error: %s\\n\", test.Response.BodyErr))\n\t\t\t}\n\t\t\tcolor.Yellow(\"Response Body: %q\\n\", test.Response.BodyStr)\n\t\t}\n\t}\n\n\t\/\/ Travis CI requires an exit code for the build to fail. Anything not 0\n\t\/\/ will fail the build.\n\tos.Exit(exitStatus)\n}\n\n\/\/ RegisterTest adds a set of tests to the collection\nfunc RegisterTest(tests ...*ht.Test) {\n\ttestCollection = append(testCollection, tests...)\n}\n\nvar testCollection []*ht.Test\nht: Add background noise requests\/\/ Copyright 2016-2017, Cyrill @ Schumacher.fm and the CaddyESI Contributors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n\/\/ use this file except in compliance with the License. You may obtain a copy of\n\/\/ the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations under\n\/\/ the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/vdobler\/ht\/cookiejar\"\n\t\"github.com\/vdobler\/ht\/ht\"\n)\n\nconst caddyAddress = `http:\/\/127.0.0.1:2017\/`\n\nfunc main() {\n\t\/\/ \n\tgo func() {\n\t\tfor c := time.Tick(1 * time.Millisecond); ; <-c {\n\t\t\tt := pageRedis()\n\t\t\tif err := t.Run(); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t}()\n\t\/\/ <\/Background noise>\n\n\tjar, err := cookiejar.New(&cookiejar.Options{})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor _, t := range testCollection {\n\t\tt.Execution.PreSleep = time.Duration(rand.Intn(20)) * time.Millisecond\n\t}\n\n\tc := ht.Collection{\n\t\tTests: testCollection,\n\t}\n\n\tvar exitStatus int\n\tif err := c.ExecuteConcurrent(runtime.NumCPU(), jar); err != nil {\n\t\texitStatus = 26 \/\/ line number ;-)\n\t\tprintln(\"ExecuteConcurrent:\", err.Error())\n\t}\n\n\tfor _, test := range c.Tests {\n\t\tif err := test.PrintReport(os.Stdout); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif test.Status > ht.Pass {\n\t\t\texitStatus = 35 \/\/ line number ;-)\n\n\t\t\tcolor.Red(\"Failed %s\", test.Name)\n\n\t\t\tif test.Response.BodyErr != nil {\n\t\t\t\tcolor.Yellow(fmt.Sprintf(\"Response Body Error: %s\\n\", test.Response.BodyErr))\n\t\t\t}\n\t\t\tcolor.Yellow(\"Response Body: %q\\n\", test.Response.BodyStr)\n\t\t}\n\t}\n\n\t\/\/ Travis CI requires an exit code for the build to fail. Anything not 0\n\t\/\/ will fail the build.\n\tos.Exit(exitStatus)\n}\n\n\/\/ RegisterTest adds a set of tests to the collection\nfunc RegisterTest(tests ...*ht.Test) {\n\ttestCollection = append(testCollection, tests...)\n}\n\nvar testCollection []*ht.Test\n<|endoftext|>"} {"text":"\/\/ (c) 2019-2020, Ava Labs, Inc. All rights reserved.\n\/\/ See the file LICENSE for licensing terms.\n\npackage rpcchainvm\n\nimport (\n\t\"golang.org\/x\/net\/context\"\n\n\t\"google.golang.org\/grpc\"\n\n\t\"github.com\/hashicorp\/go-plugin\"\n\n\t\"github.com\/ava-labs\/avalanchego\/snow\/engine\/snowman\/block\"\n\t\"github.com\/ava-labs\/avalanchego\/vms\/rpcchainvm\/vmproto\"\n)\n\n\/\/ Handshake is a common handshake that is shared by plugin and host.\nvar Handshake = plugin.HandshakeConfig{\n\tProtocolVersion: 1,\n\tMagicCookieKey: \"VM_PLUGIN\",\n\tMagicCookieValue: \"dynamic\",\n}\n\n\/\/ PluginMap is the map of plugins we can dispense.\nvar PluginMap = map[string]plugin.Plugin{\n\t\"vm\": &Plugin{},\n}\n\n\/\/ Plugin is the implementation of plugin.Plugin so we can serve\/consume this.\n\/\/ We also implement GRPCPlugin so that this plugin can be served over gRPC.\ntype Plugin struct {\n\tplugin.NetRPCUnsupportedPlugin\n\t\/\/ Concrete implementation, written in Go. This is only used for plugins\n\t\/\/ that are written in Go.\n\tvm block.ChainVM\n}\n\n\/\/ New creates a new plugin from the provided VM\nfunc New(vm block.ChainVM) *Plugin { return &Plugin{vm: vm} }\n\n\/\/ GRPCServer registers a new GRPC server.\nfunc (p *Plugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error {\n\tvmproto.RegisterVMServer(s, NewServer(p.vm, broker))\n\treturn nil\n}\n\n\/\/ GRPCClient returns a new GRPC client\nfunc (p *Plugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) {\n\treturn NewClient(vmproto.NewVMClient(c), broker), nil\n}\nBump protocol version of rpcchainvm\/\/ (c) 2019-2020, Ava Labs, Inc. All rights reserved.\n\/\/ See the file LICENSE for licensing terms.\n\npackage rpcchainvm\n\nimport (\n\t\"golang.org\/x\/net\/context\"\n\n\t\"google.golang.org\/grpc\"\n\n\t\"github.com\/hashicorp\/go-plugin\"\n\n\t\"github.com\/ava-labs\/avalanchego\/snow\/engine\/snowman\/block\"\n\t\"github.com\/ava-labs\/avalanchego\/vms\/rpcchainvm\/vmproto\"\n)\n\n\/\/ Handshake is a common handshake that is shared by plugin and host.\nvar Handshake = plugin.HandshakeConfig{\n\tProtocolVersion: 2,\n\tMagicCookieKey: \"VM_PLUGIN\",\n\tMagicCookieValue: \"dynamic\",\n}\n\n\/\/ PluginMap is the map of plugins we can dispense.\nvar PluginMap = map[string]plugin.Plugin{\n\t\"vm\": &Plugin{},\n}\n\n\/\/ Plugin is the implementation of plugin.Plugin so we can serve\/consume this.\n\/\/ We also implement GRPCPlugin so that this plugin can be served over gRPC.\ntype Plugin struct {\n\tplugin.NetRPCUnsupportedPlugin\n\t\/\/ Concrete implementation, written in Go. This is only used for plugins\n\t\/\/ that are written in Go.\n\tvm block.ChainVM\n}\n\n\/\/ New creates a new plugin from the provided VM\nfunc New(vm block.ChainVM) *Plugin { return &Plugin{vm: vm} }\n\n\/\/ GRPCServer registers a new GRPC server.\nfunc (p *Plugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error {\n\tvmproto.RegisterVMServer(s, NewServer(p.vm, broker))\n\treturn nil\n}\n\n\/\/ GRPCClient returns a new GRPC client\nfunc (p *Plugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) {\n\treturn NewClient(vmproto.NewVMClient(c), broker), nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2016-2017 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ctmap\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\"\n\t\"net\"\n\t\"unsafe\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/bpf\"\n\t\"github.com\/cilium\/cilium\/pkg\/byteorder\"\n\t\"github.com\/cilium\/cilium\/pkg\/logging\"\n)\n\nvar log = logging.DefaultLogger\n\nconst (\n\tMapName6 = \"cilium_ct6_\"\n\tMapName4 = \"cilium_ct4_\"\n\tMapName6Global = MapName6 + \"global\"\n\tMapName4Global = MapName4 + \"global\"\n\n\tMapNumEntriesLocal = 64000\n\tMapNumEntriesGlobal = 1000000\n\n\tTUPLE_F_OUT = 0\n\tTUPLE_F_IN = 1\n\tTUPLE_F_RELATED = 2\n\n\t\/\/ MaxTime specifies the last possible time for GCFilter.Time\n\tMaxTime = math.MaxUint32\n\n\tnoAction = iota\n\tdeleteEntry\n)\n\ntype CtType int\n\n\/\/ CtKey is the interface describing keys to the conntrack maps.\ntype CtKey interface {\n\tbpf.MapKey\n\n\t\/\/ ToNetwork converts fields to network byte order.\n\tToNetwork() CtKey\n\n\t\/\/ ToHost converts fields to host byte order.\n\tToHost() CtKey\n\n\t\/\/ Dumps contents of key to buffer. Returns true if successful.\n\tDump(buffer *bytes.Buffer) bool\n}\n\n\/\/ CtEntry represents an entry in the connection tracking table.\ntype CtEntry struct {\n\trx_packets uint64\n\trx_bytes uint64\n\ttx_packets uint64\n\ttx_bytes uint64\n\tlifetime uint32\n\tflags uint16\n\t\/\/ revnat is in network byte order\n\trevnat uint16\n\tunused uint16\n\tsrc_sec_id uint32\n}\n\n\/\/ GetValuePtr returns the unsafe.Pointer for s.\nfunc (c *CtEntry) GetValuePtr() unsafe.Pointer { return unsafe.Pointer(c) }\n\n\/\/ String returns the readable format\nfunc (c *CtEntry) String() string {\n\treturn fmt.Sprintf(\"expires=%d rx_packets=%d rx_bytes=%d tx_packets=%d tx_bytes=%d flags=%x revnat=%d src_sec_id=%d\\n\",\n\t\tc.lifetime,\n\t\tc.rx_packets,\n\t\tc.rx_bytes,\n\t\tc.tx_packets,\n\t\tc.tx_bytes,\n\t\tc.flags,\n\t\tbyteorder.NetworkToHost(c.revnat),\n\t\tc.src_sec_id)\n}\n\n\/\/ CtEntryDump represents the key and value contained in the conntrack map.\ntype CtEntryDump struct {\n\tKey CtKey\n\tValue CtEntry\n}\n\nconst (\n\t\/\/ GCFilterNone doesn't filter the CT entries\n\tGCFilterNone = iota\n\t\/\/ GCFilterByTime filters CT entries by time\n\tGCFilterByTime\n)\n\n\/\/ GCFilterType is the type of a filter.\ntype GCFilterType uint\n\n\/\/ GCFilter contains the necessary fields to filter the CT maps.\n\/\/ Filtering by endpoint requires both EndpointID to be > 0 and\n\/\/ EndpointIP to be not nil.\ntype GCFilter struct {\n\tType GCFilterType\n\tTime uint32\n\tEndpointID uint16\n\tEndpointIP net.IP\n}\n\n\/\/ NewGCFilterBy creates a new GCFilter of the given type.\nfunc NewGCFilterBy(filterType GCFilterType) *GCFilter {\n\treturn &GCFilter{\n\t\tType: filterType,\n\t}\n}\n\n\/\/ TypeString returns the filter type in human readable way.\nfunc (f *GCFilter) TypeString() string {\n\tswitch f.Type {\n\tcase GCFilterNone:\n\t\treturn \"none\"\n\tcase GCFilterByTime:\n\t\treturn \"timeout\"\n\tdefault:\n\t\treturn \"(unknown)\"\n\t}\n}\n\n\/\/ ToString iterates through Map m and writes the values of the ct entries in m\n\/\/ to a string.\nfunc ToString(m *bpf.Map, mapName string) (string, error) {\n\tvar buffer bytes.Buffer\n\tentries, err := dumpToSlice(m, mapName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor _, entry := range entries {\n\t\tif !entry.Key.ToHost().Dump(&buffer) {\n\t\t\tcontinue\n\t\t}\n\n\t\tvalue := entry.Value\n\t\tbuffer.WriteString(\n\t\t\tfmt.Sprintf(\" expires=%d rx_packets=%d rx_bytes=%d tx_packets=%d tx_bytes=%d flags=%x revnat=%d src_sec_id=%d\\n\",\n\t\t\t\tvalue.lifetime,\n\t\t\t\tvalue.rx_packets,\n\t\t\t\tvalue.rx_bytes,\n\t\t\t\tvalue.tx_packets,\n\t\t\t\tvalue.tx_bytes,\n\t\t\t\tvalue.flags,\n\t\t\t\tbyteorder.NetworkToHost(value.revnat),\n\t\t\t\tvalue.src_sec_id,\n\t\t\t),\n\t\t)\n\n\t}\n\treturn buffer.String(), nil\n}\n\n\/\/ DumpToSlice iterates through map m and returns a slice mapping each key to\n\/\/ its value in m.\nfunc dumpToSlice(m *bpf.Map, mapType string) ([]CtEntryDump, error) {\n\tentries := []CtEntryDump{}\n\n\tswitch mapType {\n\tcase MapName6, MapName6Global:\n\t\tvar key, nextKey CtKey6Global\n\t\tfor {\n\t\t\terr := m.GetNextKey(&key, &nextKey)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tentry, err := m.Lookup(&nextKey)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tctEntry := entry.(*CtEntry)\n\n\t\t\tnK := nextKey\n\t\t\teDump := CtEntryDump{Key: &nK, Value: *ctEntry}\n\t\t\tentries = append(entries, eDump)\n\n\t\t\tkey = nextKey\n\t\t}\n\n\tcase MapName4, MapName4Global:\n\t\tvar key, nextKey CtKey4Global\n\t\tfor {\n\t\t\terr := m.GetNextKey(&key, &nextKey)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tentry, err := m.Lookup(&nextKey)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tctEntry := entry.(*CtEntry)\n\n\t\t\tnK := nextKey\n\t\t\teDump := CtEntryDump{Key: &nK, Value: *ctEntry}\n\t\t\tentries = append(entries, eDump)\n\n\t\t\tkey = nextKey\n\t\t}\n\t}\n\treturn entries, nil\n}\n\n\/\/ doGC6 iterates through a CTv6 map and drops entries based on the given\n\/\/ filter.\nfunc doGC6(m *bpf.Map, filter *GCFilter) int {\n\tvar (\n\t\taction, deleted int\n\t\tnextKey, tmpKey CtKey6Global\n\t)\n\n\terr := m.GetNextKey(&tmpKey, &nextKey)\n\tif err != nil {\n\t\treturn 0\n\t}\n\n\tfor {\n\t\tnextKeyValid := m.GetNextKey(&nextKey, &tmpKey)\n\t\tentryMap, err := m.Lookup(&nextKey)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"error during map Lookup\")\n\t\t\tbreak\n\t\t}\n\n\t\tentry := entryMap.(*CtEntry)\n\n\t\t\/\/ In CT entries, the source address of the conntrack entry (`saddr`) is\n\t\t\/\/ the destination of the packet received, therefore it's the packet's\n\t\t\/\/ destination IP\n\t\taction = filter.doFiltering(nextKey.daddr.IP(), nextKey.saddr.IP(), nextKey.sport, uint8(nextKey.nexthdr), nextKey.flags, entry)\n\n\t\tswitch action {\n\t\tcase deleteEntry:\n\t\t\terr := m.Delete(&nextKey)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Errorf(\"Unable to delete CT entry %s\", nextKey.String())\n\t\t\t} else {\n\t\t\t\tdeleted++\n\t\t\t}\n\t\t}\n\n\t\tif nextKeyValid != nil {\n\t\t\tbreak\n\t\t}\n\t\tnextKey = tmpKey\n\t}\n\treturn deleted\n}\n\n\/\/ doGC4 iterates through a CTv4 map and drops entries based on the given\n\/\/ filter.\nfunc doGC4(m *bpf.Map, filter *GCFilter) int {\n\tvar (\n\t\taction, deleted int\n\t\tnextKey, tmpKey CtKey4Global\n\t)\n\n\terr := m.GetNextKey(&tmpKey, &nextKey)\n\tif err != nil {\n\t\treturn 0\n\t}\n\n\tfor true {\n\t\tnextKeyValid := m.GetNextKey(&nextKey, &tmpKey)\n\t\tentryMap, err := m.Lookup(&nextKey)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"error during map Lookup\")\n\t\t\tbreak\n\t\t}\n\n\t\tentry := entryMap.(*CtEntry)\n\n\t\t\/\/ In CT entries, the source address of the conntrack entry (`saddr`) is\n\t\t\/\/ the destination of the packet received, therefore it's the packet's\n\t\t\/\/ destination IP\n\t\taction = filter.doFiltering(nextKey.daddr.IP(), nextKey.saddr.IP(), nextKey.sport, uint8(nextKey.nexthdr), nextKey.flags, entry)\n\n\t\tswitch action {\n\t\tcase deleteEntry:\n\t\t\terr := m.Delete(&nextKey)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Errorf(\"Unable to delete CT entry %s\", nextKey.String())\n\t\t\t} else {\n\t\t\t\tdeleted++\n\t\t\t}\n\t\t}\n\n\t\tif nextKeyValid != nil {\n\t\t\tbreak\n\t\t}\n\t\tnextKey = tmpKey\n\t}\n\treturn deleted\n}\n\nfunc (f *GCFilter) doFiltering(srcIP net.IP, dstIP net.IP, dstPort uint16, nextHdr, flags uint8, entry *CtEntry) (action int) {\n\t\/\/ Delete all entries with a lifetime smaller than f timestamp.\n\tif f.Type == GCFilterByTime && entry.lifetime < f.Time {\n\t\treturn deleteEntry\n\t}\n\n\treturn noAction\n}\n\n\/\/ GC runs garbage collection for map m with name mapName with the given filter.\n\/\/ It returns how many items were deleted from m.\nfunc GC(m *bpf.Map, mapName string, filter *GCFilter) int {\n\tif filter.Type == GCFilterByTime {\n\t\t\/\/ If LRUHashtable, no need to garbage collect as LRUHashtable cleans itself up.\n\t\t\/\/ FIXME: GH-3239 LRU logic is not handling timeouts gracefully enough\n\t\t\/\/ if m.MapInfo.MapType == bpf.MapTypeLRUHash {\n\t\t\/\/ \treturn 0\n\t\t\/\/ }\n\t\tt, _ := bpf.GetMtime()\n\t\ttsec := t \/ 1000000000\n\t\tfilter.Time = uint32(tsec)\n\t}\n\n\tswitch mapName {\n\tcase MapName6, MapName6Global:\n\t\treturn doGC6(m, filter)\n\tcase MapName4, MapName4Global:\n\t\treturn doGC4(m, filter)\n\tdefault:\n\t\treturn 0\n\t}\n}\n\n\/\/ Flush runs garbage collection for map m with the name mapName, deleting all\n\/\/ entries. The specified map must be already opened using bpf.OpenMap().\nfunc Flush(m *bpf.Map, mapName string) int {\n\tfilter := NewGCFilterBy(GCFilterByTime)\n\tfilter.Time = MaxTime\n\n\tswitch mapName {\n\tcase MapName6, MapName6Global:\n\t\treturn doGC6(m, filter)\n\tcase MapName4, MapName4Global:\n\t\treturn doGC4(m, filter)\n\tdefault:\n\t\treturn 0\n\t}\n}\nctmap: Make GC bpf map dumps more robust.\/\/ Copyright 2016-2017 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ctmap\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\"\n\t\"net\"\n\t\"unsafe\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/bpf\"\n\t\"github.com\/cilium\/cilium\/pkg\/byteorder\"\n\t\"github.com\/cilium\/cilium\/pkg\/logging\"\n)\n\nvar log = logging.DefaultLogger\n\nconst (\n\tMapName6 = \"cilium_ct6_\"\n\tMapName4 = \"cilium_ct4_\"\n\tMapName6Global = MapName6 + \"global\"\n\tMapName4Global = MapName4 + \"global\"\n\n\tMapNumEntriesLocal = 64000\n\tMapNumEntriesGlobal = 1000000\n\n\tTUPLE_F_OUT = 0\n\tTUPLE_F_IN = 1\n\tTUPLE_F_RELATED = 2\n\n\t\/\/ MaxTime specifies the last possible time for GCFilter.Time\n\tMaxTime = math.MaxUint32\n\n\tnoAction = iota\n\tdeleteEntry\n)\n\ntype CtType int\n\n\/\/ CtKey is the interface describing keys to the conntrack maps.\ntype CtKey interface {\n\tbpf.MapKey\n\n\t\/\/ ToNetwork converts fields to network byte order.\n\tToNetwork() CtKey\n\n\t\/\/ ToHost converts fields to host byte order.\n\tToHost() CtKey\n\n\t\/\/ Dumps contents of key to buffer. Returns true if successful.\n\tDump(buffer *bytes.Buffer) bool\n}\n\n\/\/ CtEntry represents an entry in the connection tracking table.\ntype CtEntry struct {\n\trx_packets uint64\n\trx_bytes uint64\n\ttx_packets uint64\n\ttx_bytes uint64\n\tlifetime uint32\n\tflags uint16\n\t\/\/ revnat is in network byte order\n\trevnat uint16\n\tunused uint16\n\tsrc_sec_id uint32\n}\n\n\/\/ GetValuePtr returns the unsafe.Pointer for s.\nfunc (c *CtEntry) GetValuePtr() unsafe.Pointer { return unsafe.Pointer(c) }\n\n\/\/ String returns the readable format\nfunc (c *CtEntry) String() string {\n\treturn fmt.Sprintf(\"expires=%d rx_packets=%d rx_bytes=%d tx_packets=%d tx_bytes=%d flags=%x revnat=%d src_sec_id=%d\\n\",\n\t\tc.lifetime,\n\t\tc.rx_packets,\n\t\tc.rx_bytes,\n\t\tc.tx_packets,\n\t\tc.tx_bytes,\n\t\tc.flags,\n\t\tbyteorder.NetworkToHost(c.revnat),\n\t\tc.src_sec_id)\n}\n\n\/\/ CtEntryDump represents the key and value contained in the conntrack map.\ntype CtEntryDump struct {\n\tKey CtKey\n\tValue CtEntry\n}\n\nconst (\n\t\/\/ GCFilterNone doesn't filter the CT entries\n\tGCFilterNone = iota\n\t\/\/ GCFilterByTime filters CT entries by time\n\tGCFilterByTime\n)\n\n\/\/ GCFilterType is the type of a filter.\ntype GCFilterType uint\n\n\/\/ GCFilter contains the necessary fields to filter the CT maps.\n\/\/ Filtering by endpoint requires both EndpointID to be > 0 and\n\/\/ EndpointIP to be not nil.\ntype GCFilter struct {\n\tType GCFilterType\n\tTime uint32\n\tEndpointID uint16\n\tEndpointIP net.IP\n}\n\n\/\/ NewGCFilterBy creates a new GCFilter of the given type.\nfunc NewGCFilterBy(filterType GCFilterType) *GCFilter {\n\treturn &GCFilter{\n\t\tType: filterType,\n\t}\n}\n\n\/\/ TypeString returns the filter type in human readable way.\nfunc (f *GCFilter) TypeString() string {\n\tswitch f.Type {\n\tcase GCFilterNone:\n\t\treturn \"none\"\n\tcase GCFilterByTime:\n\t\treturn \"timeout\"\n\tdefault:\n\t\treturn \"(unknown)\"\n\t}\n}\n\n\/\/ ToString iterates through Map m and writes the values of the ct entries in m\n\/\/ to a string.\nfunc ToString(m *bpf.Map, mapName string) (string, error) {\n\tvar buffer bytes.Buffer\n\tentries, err := dumpToSlice(m, mapName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor _, entry := range entries {\n\t\tif !entry.Key.ToHost().Dump(&buffer) {\n\t\t\tcontinue\n\t\t}\n\n\t\tvalue := entry.Value\n\t\tbuffer.WriteString(\n\t\t\tfmt.Sprintf(\" expires=%d rx_packets=%d rx_bytes=%d tx_packets=%d tx_bytes=%d flags=%x revnat=%d src_sec_id=%d\\n\",\n\t\t\t\tvalue.lifetime,\n\t\t\t\tvalue.rx_packets,\n\t\t\t\tvalue.rx_bytes,\n\t\t\t\tvalue.tx_packets,\n\t\t\t\tvalue.tx_bytes,\n\t\t\t\tvalue.flags,\n\t\t\t\tbyteorder.NetworkToHost(value.revnat),\n\t\t\t\tvalue.src_sec_id,\n\t\t\t),\n\t\t)\n\n\t}\n\treturn buffer.String(), nil\n}\n\n\/\/ DumpToSlice iterates through map m and returns a slice mapping each key to\n\/\/ its value in m.\nfunc dumpToSlice(m *bpf.Map, mapType string) ([]CtEntryDump, error) {\n\tentries := []CtEntryDump{}\n\n\tswitch mapType {\n\tcase MapName6, MapName6Global:\n\t\tvar key, nextKey CtKey6Global\n\t\tfor {\n\t\t\terr := m.GetNextKey(&key, &nextKey)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tentry, err := m.Lookup(&nextKey)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tctEntry := entry.(*CtEntry)\n\n\t\t\tnK := nextKey\n\t\t\teDump := CtEntryDump{Key: &nK, Value: *ctEntry}\n\t\t\tentries = append(entries, eDump)\n\n\t\t\tkey = nextKey\n\t\t}\n\n\tcase MapName4, MapName4Global:\n\t\tvar key, nextKey CtKey4Global\n\t\tfor {\n\t\t\terr := m.GetNextKey(&key, &nextKey)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tentry, err := m.Lookup(&nextKey)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tctEntry := entry.(*CtEntry)\n\n\t\t\tnK := nextKey\n\t\t\teDump := CtEntryDump{Key: &nK, Value: *ctEntry}\n\t\t\tentries = append(entries, eDump)\n\n\t\t\tkey = nextKey\n\t\t}\n\t}\n\treturn entries, nil\n}\n\n\/\/ doGC6 iterates through a CTv6 map and drops entries based on the given\n\/\/ filter.\nfunc doGC6(m *bpf.Map, filter *GCFilter) int {\n\tvar (\n\t\taction, deleted int\n\t\tprevKey, currentKey, nextKey CtKey6Global\n\t)\n\n\t\/\/ prevKey is initially invalid, causing GetNextKey to return the first key in the map as currentKey.\n\tprevKeyValid := false\n\terr := m.GetNextKey(&prevKey, ¤tKey)\n\tif err != nil {\n\t\t\/\/ Map is empty, nothing to clean up.\n\t\treturn 0\n\t}\n\n\tvar count uint32\n\tfor count = 1; count <= m.MapInfo.MaxEntries; count++ {\n\t\t\/\/ currentKey was returned by GetNextKey() so we know it existed in the map, but it may have been\n\t\t\/\/ deleted by a concurrent map operation. If currentKey is no longer in the map, nextKey will be\n\t\t\/\/ the first key in the map again. Use the nextKey only if we still find currentKey in the Lookup()\n\t\t\/\/ after the GetNextKey() call, this way we know nextKey is NOT the first key in the map.\n\t\tnextKeyValid := m.GetNextKey(¤tKey, &nextKey)\n\t\tentryMap, err := m.Lookup(¤tKey)\n\t\tif err != nil {\n\t\t\t\/\/ Restarting from a invalid key starts the iteration again from the beginning.\n\t\t\t\/\/ If we have a previously found key, try to restart from there instead\n\t\t\tif prevKeyValid {\n\t\t\t\tcurrentKey = prevKey\n\t\t\t\t\/\/ Restart from a given previous key only once, otherwise if the prevKey is\n\t\t\t\t\/\/ concurrently deleted we might loop forever trying to look it up.\n\t\t\t\tprevKeyValid = false\n\t\t\t} else {\n\t\t\t\t\/\/ Depending on exactly when currentKey was deleted from the map, nextKey may be the actual\n\t\t\t\t\/\/ keyelement after the deleted one, or the first element in the map.\n\t\t\t\tcurrentKey = nextKey\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tentry := entryMap.(*CtEntry)\n\n\t\t\/\/ In CT entries, the source address of the conntrack entry (`saddr`) is\n\t\t\/\/ the destination of the packet received, therefore it's the packet's\n\t\t\/\/ destination IP\n\t\taction = filter.doFiltering(currentKey.daddr.IP(), currentKey.saddr.IP(), currentKey.sport,\n\t\t\tuint8(currentKey.nexthdr), currentKey.flags, entry)\n\n\t\tswitch action {\n\t\tcase deleteEntry:\n\t\t\terr := m.Delete(¤tKey)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Errorf(\"Unable to delete CT entry %s\", currentKey.String())\n\t\t\t} else {\n\t\t\t\tdeleted++\n\t\t\t}\n\t\t}\n\n\t\tif nextKeyValid != nil {\n\t\t\tbreak\n\t\t}\n\t\t\/\/ remember the last found key\n\t\tprevKey = currentKey\n\t\tprevKeyValid = true\n\t\t\/\/ continue from the next key\n\t\tcurrentKey = nextKey\n\t}\n\n\tif count > m.MapInfo.MaxEntries {\n\t\t\/\/ TODO Add a metric we can bump and observe here.\n\t\tlog.WithError(err).Warning(\"Garbage collection on IPv6 CT map failed to finish\")\n\t}\n\n\treturn deleted\n}\n\n\/\/ doGC4 iterates through a CTv4 map and drops entries based on the given\n\/\/ filter.\nfunc doGC4(m *bpf.Map, filter *GCFilter) int {\n\tvar (\n\t\taction, deleted int\n\t\tprevKey, currentKey, nextKey CtKey4Global\n\t)\n\n\t\/\/ prevKey is initially invalid, causing GetNextKey to return the first key in the map as currentKey.\n\tprevKeyValid := false\n\terr := m.GetNextKey(&prevKey, ¤tKey)\n\tif err != nil {\n\t\t\/\/ Map is empty, nothing to clean up.\n\t\treturn 0\n\t}\n\n\tvar count uint32\n\tfor count = 1; count <= m.MapInfo.MaxEntries; count++ {\n\t\t\/\/ currentKey was returned by GetNextKey() so we know it existed in the map, but it may have been\n\t\t\/\/ deleted by a concurrent map operation. If currentKey is no longer in the map, nextKey will be\n\t\t\/\/ the first key in the map again. Use the nextKey only if we still find currentKey in the Lookup()\n\t\t\/\/ after the GetNextKey() call, this way we know nextKey is NOT the first key in the map.\n\t\tnextKeyValid := m.GetNextKey(¤tKey, &nextKey)\n\t\tentryMap, err := m.Lookup(¤tKey)\n\t\tif err != nil {\n\t\t\t\/\/ Restarting from a invalid key starts the iteration again from the beginning.\n\t\t\t\/\/ If we have a previously found key, try to restart from there instead\n\t\t\tif prevKeyValid {\n\t\t\t\tcurrentKey = prevKey\n\t\t\t\t\/\/ Restart from a given previous key only once, otherwise if the prevKey is\n\t\t\t\t\/\/ concurrently deleted we might loop forever trying to look it up.\n\t\t\t\tprevKeyValid = false\n\t\t\t} else {\n\t\t\t\t\/\/ Depending on exactly when currentKey was deleted from the map, nextKey may be the actual\n\t\t\t\t\/\/ keyelement after the deleted one, or the first element in the map.\n\t\t\t\tcurrentKey = nextKey\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tentry := entryMap.(*CtEntry)\n\n\t\t\/\/ In CT entries, the source address of the conntrack entry (`saddr`) is\n\t\t\/\/ the destination of the packet received, therefore it's the packet's\n\t\t\/\/ destination IP\n\t\taction = filter.doFiltering(currentKey.daddr.IP(), currentKey.saddr.IP(), currentKey.sport,\n\t\t\tuint8(currentKey.nexthdr), currentKey.flags, entry)\n\n\t\tswitch action {\n\t\tcase deleteEntry:\n\t\t\terr := m.Delete(¤tKey)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Errorf(\"Unable to delete CT entry %s\", currentKey.String())\n\t\t\t} else {\n\t\t\t\tdeleted++\n\t\t\t}\n\t\t}\n\n\t\tif nextKeyValid != nil {\n\t\t\tbreak\n\t\t}\n\t\t\/\/ remember the last found key\n\t\tprevKey = currentKey\n\t\tprevKeyValid = true\n\t\t\/\/ continue from the next key\n\t\tcurrentKey = nextKey\n\t}\n\n\tif count > m.MapInfo.MaxEntries {\n\t\t\/\/ TODO Add a metric we can bump and observe here.\n\t\tlog.WithError(err).Warning(\"Garbage collection on IPv4 CT map failed to finish\")\n\t}\n\n\treturn deleted\n}\n\nfunc (f *GCFilter) doFiltering(srcIP net.IP, dstIP net.IP, dstPort uint16, nextHdr, flags uint8, entry *CtEntry) (action int) {\n\t\/\/ Delete all entries with a lifetime smaller than f timestamp.\n\tif f.Type == GCFilterByTime && entry.lifetime < f.Time {\n\t\treturn deleteEntry\n\t}\n\n\treturn noAction\n}\n\n\/\/ GC runs garbage collection for map m with name mapName with the given filter.\n\/\/ It returns how many items were deleted from m.\nfunc GC(m *bpf.Map, mapName string, filter *GCFilter) int {\n\tif filter.Type == GCFilterByTime {\n\t\t\/\/ If LRUHashtable, no need to garbage collect as LRUHashtable cleans itself up.\n\t\t\/\/ FIXME: GH-3239 LRU logic is not handling timeouts gracefully enough\n\t\t\/\/ if m.MapInfo.MapType == bpf.MapTypeLRUHash {\n\t\t\/\/ \treturn 0\n\t\t\/\/ }\n\t\tt, _ := bpf.GetMtime()\n\t\ttsec := t \/ 1000000000\n\t\tfilter.Time = uint32(tsec)\n\t}\n\n\tswitch mapName {\n\tcase MapName6, MapName6Global:\n\t\treturn doGC6(m, filter)\n\tcase MapName4, MapName4Global:\n\t\treturn doGC4(m, filter)\n\tdefault:\n\t\treturn 0\n\t}\n}\n\n\/\/ Flush runs garbage collection for map m with the name mapName, deleting all\n\/\/ entries. The specified map must be already opened using bpf.OpenMap().\nfunc Flush(m *bpf.Map, mapName string) int {\n\tfilter := NewGCFilterBy(GCFilterByTime)\n\tfilter.Time = MaxTime\n\n\tswitch mapName {\n\tcase MapName6, MapName6Global:\n\t\treturn doGC6(m, filter)\n\tcase MapName4, MapName4Global:\n\t\treturn doGC4(m, filter)\n\tdefault:\n\t\treturn 0\n\t}\n}\n<|endoftext|>"} {"text":"package proxy\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/asaskevich\/govalidator\"\n\t\"github.com\/hellofresh\/janus\/pkg\/router\"\n)\n\n\/\/ Route is the container for a proxy and it's handlers\ntype Route struct {\n\tproxy *Definition\n\thandlers []router.Constructor\n}\n\ntype routeJSONProxy struct {\n\tProxy *Definition `json:\"proxy\"`\n}\n\n\/\/ NewRoute creates an instance of Route\nfunc NewRoute(proxy *Definition, handlers ...router.Constructor) *Route {\n\treturn &Route{proxy, handlers}\n}\n\n\/\/ JSONMarshal encodes route struct to JSON\nfunc (r *Route) JSONMarshal() ([]byte, error) {\n\treturn json.Marshal(routeJSONProxy{r.proxy})\n}\n\n\/\/ JSONUnmarshalRoute decodes route struct from JSON\nfunc JSONUnmarshalRoute(rawRoute []byte) (*Route, error) {\n\tvar proxyRoute routeJSONProxy\n\tif err := json.Unmarshal(rawRoute, &proxyRoute); err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewRoute(proxyRoute.Proxy), nil\n}\n\n\/\/ Definition defines proxy rules for a route\ntype Definition struct {\n\tPreserveHost bool `bson:\"preserve_host\" json:\"preserve_host\" mapstructure:\"preserve_host\"`\n\tListenPath string `bson:\"listen_path\" json:\"listen_path\" mapstructure:\"listen_path\" valid:\"required\"`\n\tUpstreamURL string `bson:\"upstream_url\" json:\"upstream_url\" mapstructure:\"upstream_url\" valid:\"url,required\"`\n\tStripPath bool `bson:\"strip_path\" json:\"strip_path\" mapstructure:\"strip_path\"`\n\tAppendPath bool `bson:\"append_path\" json:\"append_path\" mapstructure:\"append_path\"`\n\tEnableLoadBalancing bool `bson:\"enable_load_balancing\" json:\"enable_load_balancing\" mapstructure:\"enable_load_balancing\"`\n\tMethods []string `bson:\"methods\" json:\"methods\"`\n\tHosts []string `bson:\"hosts\" json:\"hosts\"`\n}\n\n\/\/ Validate validates proxy data\nfunc (d *Definition) Validate() (bool, error) {\n\treturn govalidator.ValidateStruct(d)\n}\nAdded constructor fot proxypackage proxy\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/asaskevich\/govalidator\"\n\t\"github.com\/hellofresh\/janus\/pkg\/router\"\n)\n\n\/\/ Route is the container for a proxy and it's handlers\ntype Route struct {\n\tproxy *Definition\n\thandlers []router.Constructor\n}\n\ntype routeJSONProxy struct {\n\tProxy *Definition `json:\"proxy\"`\n}\n\n\/\/ NewRoute creates an instance of Route\nfunc NewRoute(proxy *Definition, handlers ...router.Constructor) *Route {\n\treturn &Route{proxy, handlers}\n}\n\n\/\/ JSONMarshal encodes route struct to JSON\nfunc (r *Route) JSONMarshal() ([]byte, error) {\n\treturn json.Marshal(routeJSONProxy{r.proxy})\n}\n\n\/\/ JSONUnmarshalRoute decodes route struct from JSON\nfunc JSONUnmarshalRoute(rawRoute []byte) (*Route, error) {\n\tvar proxyRoute routeJSONProxy\n\tif err := json.Unmarshal(rawRoute, &proxyRoute); err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewRoute(proxyRoute.Proxy), nil\n}\n\n\/\/ Definition defines proxy rules for a route\ntype Definition struct {\n\tPreserveHost bool `bson:\"preserve_host\" json:\"preserve_host\" mapstructure:\"preserve_host\"`\n\tListenPath string `bson:\"listen_path\" json:\"listen_path\" mapstructure:\"listen_path\" valid:\"required\"`\n\tUpstreamURL string `bson:\"upstream_url\" json:\"upstream_url\" mapstructure:\"upstream_url\" valid:\"url,required\"`\n\tStripPath bool `bson:\"strip_path\" json:\"strip_path\" mapstructure:\"strip_path\"`\n\tAppendPath bool `bson:\"append_path\" json:\"append_path\" mapstructure:\"append_path\"`\n\tEnableLoadBalancing bool `bson:\"enable_load_balancing\" json:\"enable_load_balancing\" mapstructure:\"enable_load_balancing\"`\n\tMethods []string `bson:\"methods\" json:\"methods\"`\n\tHosts []string `bson:\"hosts\" json:\"hosts\"`\n}\n\n\/\/ NewDefinition creates a new Proxy Definition with default values\nfunc NewDefinition() *Definition {\n\treturn &Definition{\n\t\tMethods: make([]string, 0),\n\t\tHosts: make([]string, 0),\n\t}\n}\n\n\/\/ Validate validates proxy data\nfunc (d *Definition) Validate() (bool, error) {\n\treturn govalidator.ValidateStruct(d)\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage server\n\nimport (\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"math\/big\"\n\t\"net\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\tk8snet \"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/runtime\"\n\t\"k8s.io\/client-go\/tools\/remotecommand\"\n\tk8scert \"k8s.io\/client-go\/util\/cert\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/server\/streaming\"\n\t\"k8s.io\/utils\/exec\"\n\n\tctrdutil \"github.com\/containerd\/cri\/pkg\/containerd\/util\"\n)\n\nconst (\n\t\/\/ OrganizationName is is the name of this organization, used for certificates etc.\n\tOrganizationName = \"containerd\"\n\t\/\/ CRIName is the common name of the CRI plugin\n\tCRIName = \"cri\"\n)\n\nfunc newStreamServer(c *criService, addr, port string) (streaming.Server, error) {\n\tif addr == \"\" {\n\t\ta, err := k8snet.ChooseBindAddress(nil)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to get stream server address\")\n\t\t}\n\t\taddr = a.String()\n\t}\n\tconfig := streaming.DefaultConfig\n\tconfig.Addr = net.JoinHostPort(addr, port)\n\truntime := newStreamRuntime(c)\n\ttlsCert, err := newTLSCert()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to generate tls certificate for stream server\")\n\t}\n\tconfig.TLSConfig = &tls.Config{\n\t\tCertificates: []tls.Certificate{tlsCert},\n\t\tInsecureSkipVerify: true,\n\t}\n\treturn streaming.NewServer(config, runtime)\n}\n\ntype streamRuntime struct {\n\tc *criService\n}\n\nfunc newStreamRuntime(c *criService) streaming.Runtime {\n\treturn &streamRuntime{c: c}\n}\n\n\/\/ Exec executes a command inside the container. exec.ExitError is returned if the command\n\/\/ returns non-zero exit code.\nfunc (s *streamRuntime) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser,\n\ttty bool, resize <-chan remotecommand.TerminalSize) error {\n\texitCode, err := s.c.execInContainer(ctrdutil.NamespacedContext(), containerID, execOptions{\n\t\tcmd: cmd,\n\t\tstdin: stdin,\n\t\tstdout: stdout,\n\t\tstderr: stderr,\n\t\ttty: tty,\n\t\tresize: resize,\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to exec in container\")\n\t}\n\tif *exitCode == 0 {\n\t\treturn nil\n\t}\n\treturn &exec.CodeExitError{\n\t\tErr: errors.Errorf(\"error executing command %v, exit code %d\", cmd, *exitCode),\n\t\tCode: int(*exitCode),\n\t}\n}\n\nfunc (s *streamRuntime) Attach(containerID string, in io.Reader, out, err io.WriteCloser, tty bool,\n\tresize <-chan remotecommand.TerminalSize) error {\n\treturn s.c.attachContainer(ctrdutil.NamespacedContext(), containerID, in, out, err, tty, resize)\n}\n\nfunc (s *streamRuntime) PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error {\n\tif port <= 0 || port > math.MaxUint16 {\n\t\treturn errors.Errorf(\"invalid port %d\", port)\n\t}\n\treturn s.c.portForward(podSandboxID, port, stream)\n}\n\n\/\/ handleResizing spawns a goroutine that processes the resize channel, calling resizeFunc for each\n\/\/ remotecommand.TerminalSize received from the channel. The resize channel must be closed elsewhere to stop the\n\/\/ goroutine.\nfunc handleResizing(resize <-chan remotecommand.TerminalSize, resizeFunc func(size remotecommand.TerminalSize)) {\n\tif resize == nil {\n\t\treturn\n\t}\n\n\tgo func() {\n\t\tdefer runtime.HandleCrash()\n\n\t\tfor {\n\t\t\tsize, ok := <-resize\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif size.Height < 1 || size.Width < 1 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresizeFunc(size)\n\t\t}\n\t}()\n}\n\n\/\/ newTLSCert returns a tls.certificate loaded from a newly generated\n\/\/ x509certificate from a newly generated rsa public\/private key pair. The\n\/\/ x509certificate is self signed.\n\/\/ TODO (mikebrow): replace \/ rewrite this function to support using CA\n\/\/ signing of the cetificate. Requires a security plan for kubernetes regarding\n\/\/ CRI connections \/ streaming, etc. For example, kubernetes could configure or\n\/\/ require a CA service and pass a configuration down through CRI.\nfunc newTLSCert() (tls.Certificate, error) {\n\tfail := func(err error) (tls.Certificate, error) { return tls.Certificate{}, err }\n\tvar years = 1 \/\/ duration of certificate\n\n\t\/\/ Generate new private key\n\tprivKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn fail(errors.Wrap(err, \"private key cannot be created\"))\n\t}\n\n\t\/\/ Generate pem block using the private key\n\tkeyPem := pem.EncodeToMemory(&pem.Block{\n\t\tType: k8scert.RSAPrivateKeyBlockType,\n\t\tBytes: x509.MarshalPKCS1PrivateKey(privKey),\n\t})\n\n\t\/\/ Generate a new random serial number for certificate\n\tserialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))\n\tif err != nil {\n\t\treturn fail(errors.Wrap(err, \"failed to generate serial number\"))\n\t}\n\thostName, err := os.Hostname()\n\tif err != nil {\n\t\treturn fail(errors.Wrap(err, \"failed to get hostname\"))\n\t}\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\treturn fail(errors.Wrap(err, \"failed to get host IP addresses\"))\n\t}\n\n\t\/\/ Configure and create new certificate\n\ttml := x509.Certificate{\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().AddDate(years, 0, 0),\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tCommonName: fmt.Sprintf(\"%s:%s:%s\", OrganizationName, CRIName, hostName),\n\t\t\tOrganization: []string{OrganizationName},\n\t\t},\n\t\tBasicConstraintsValid: true,\n\t}\n\tfor _, addr := range addrs {\n\t\tvar ip net.IP\n\n\t\tswitch v := addr.(type) {\n\t\tcase *net.IPNet:\n\t\t\tip = v.IP\n\t\tcase *net.IPAddr:\n\t\t\tip = v.IP\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t\ttml.IPAddresses = append(tml.IPAddresses, ip)\n\t\ttml.DNSNames = append(tml.DNSNames, ip.String())\n\t}\n\n\tcert, err := x509.CreateCertificate(rand.Reader, &tml, &tml, &privKey.PublicKey, privKey)\n\tif err != nil {\n\t\treturn fail(errors.Wrap(err, \"certificate cannot be created\"))\n\t}\n\n\t\/\/ Generate a pem block with the certificate\n\tcertPem := pem.EncodeToMemory(&pem.Block{\n\t\tType: k8scert.CertificateBlockType,\n\t\tBytes: cert,\n\t})\n\n\t\/\/ Load the tls certificate\n\ttlsCert, err := tls.X509KeyPair(certPem, keyPem)\n\tif err != nil {\n\t\treturn fail(errors.Wrap(err, \"certificate could not be loaded\"))\n\t}\n\n\treturn tlsCert, nil\n}\nMake const private.\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage server\n\nimport (\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"math\/big\"\n\t\"net\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\tk8snet \"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/runtime\"\n\t\"k8s.io\/client-go\/tools\/remotecommand\"\n\tk8scert \"k8s.io\/client-go\/util\/cert\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/server\/streaming\"\n\t\"k8s.io\/utils\/exec\"\n\n\tctrdutil \"github.com\/containerd\/cri\/pkg\/containerd\/util\"\n)\n\nconst (\n\t\/\/ certOrganizationName is the name of this organization, used for certificates etc.\n\tcertOrganizationName = \"containerd\"\n\t\/\/ certCommonName is the common name of the CRI plugin\n\tcertCommonName = \"cri\"\n)\n\nfunc newStreamServer(c *criService, addr, port string) (streaming.Server, error) {\n\tif addr == \"\" {\n\t\ta, err := k8snet.ChooseBindAddress(nil)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to get stream server address\")\n\t\t}\n\t\taddr = a.String()\n\t}\n\tconfig := streaming.DefaultConfig\n\tconfig.Addr = net.JoinHostPort(addr, port)\n\truntime := newStreamRuntime(c)\n\ttlsCert, err := newTLSCert()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to generate tls certificate for stream server\")\n\t}\n\tconfig.TLSConfig = &tls.Config{\n\t\tCertificates: []tls.Certificate{tlsCert},\n\t\tInsecureSkipVerify: true,\n\t}\n\treturn streaming.NewServer(config, runtime)\n}\n\ntype streamRuntime struct {\n\tc *criService\n}\n\nfunc newStreamRuntime(c *criService) streaming.Runtime {\n\treturn &streamRuntime{c: c}\n}\n\n\/\/ Exec executes a command inside the container. exec.ExitError is returned if the command\n\/\/ returns non-zero exit code.\nfunc (s *streamRuntime) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser,\n\ttty bool, resize <-chan remotecommand.TerminalSize) error {\n\texitCode, err := s.c.execInContainer(ctrdutil.NamespacedContext(), containerID, execOptions{\n\t\tcmd: cmd,\n\t\tstdin: stdin,\n\t\tstdout: stdout,\n\t\tstderr: stderr,\n\t\ttty: tty,\n\t\tresize: resize,\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to exec in container\")\n\t}\n\tif *exitCode == 0 {\n\t\treturn nil\n\t}\n\treturn &exec.CodeExitError{\n\t\tErr: errors.Errorf(\"error executing command %v, exit code %d\", cmd, *exitCode),\n\t\tCode: int(*exitCode),\n\t}\n}\n\nfunc (s *streamRuntime) Attach(containerID string, in io.Reader, out, err io.WriteCloser, tty bool,\n\tresize <-chan remotecommand.TerminalSize) error {\n\treturn s.c.attachContainer(ctrdutil.NamespacedContext(), containerID, in, out, err, tty, resize)\n}\n\nfunc (s *streamRuntime) PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error {\n\tif port <= 0 || port > math.MaxUint16 {\n\t\treturn errors.Errorf(\"invalid port %d\", port)\n\t}\n\treturn s.c.portForward(podSandboxID, port, stream)\n}\n\n\/\/ handleResizing spawns a goroutine that processes the resize channel, calling resizeFunc for each\n\/\/ remotecommand.TerminalSize received from the channel. The resize channel must be closed elsewhere to stop the\n\/\/ goroutine.\nfunc handleResizing(resize <-chan remotecommand.TerminalSize, resizeFunc func(size remotecommand.TerminalSize)) {\n\tif resize == nil {\n\t\treturn\n\t}\n\n\tgo func() {\n\t\tdefer runtime.HandleCrash()\n\n\t\tfor {\n\t\t\tsize, ok := <-resize\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif size.Height < 1 || size.Width < 1 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresizeFunc(size)\n\t\t}\n\t}()\n}\n\n\/\/ newTLSCert returns a tls.certificate loaded from a newly generated\n\/\/ x509certificate from a newly generated rsa public\/private key pair. The\n\/\/ x509certificate is self signed.\n\/\/ TODO (mikebrow): replace \/ rewrite this function to support using CA\n\/\/ signing of the cetificate. Requires a security plan for kubernetes regarding\n\/\/ CRI connections \/ streaming, etc. For example, kubernetes could configure or\n\/\/ require a CA service and pass a configuration down through CRI.\nfunc newTLSCert() (tls.Certificate, error) {\n\tfail := func(err error) (tls.Certificate, error) { return tls.Certificate{}, err }\n\tvar years = 1 \/\/ duration of certificate\n\n\t\/\/ Generate new private key\n\tprivKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn fail(errors.Wrap(err, \"private key cannot be created\"))\n\t}\n\n\t\/\/ Generate pem block using the private key\n\tkeyPem := pem.EncodeToMemory(&pem.Block{\n\t\tType: k8scert.RSAPrivateKeyBlockType,\n\t\tBytes: x509.MarshalPKCS1PrivateKey(privKey),\n\t})\n\n\t\/\/ Generate a new random serial number for certificate\n\tserialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))\n\tif err != nil {\n\t\treturn fail(errors.Wrap(err, \"failed to generate serial number\"))\n\t}\n\thostName, err := os.Hostname()\n\tif err != nil {\n\t\treturn fail(errors.Wrap(err, \"failed to get hostname\"))\n\t}\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\treturn fail(errors.Wrap(err, \"failed to get host IP addresses\"))\n\t}\n\n\t\/\/ Configure and create new certificate\n\ttml := x509.Certificate{\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().AddDate(years, 0, 0),\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tCommonName: fmt.Sprintf(\"%s:%s:%s\", certOrganizationName, certCommonName, hostName),\n\t\t\tOrganization: []string{certOrganizationName},\n\t\t},\n\t\tBasicConstraintsValid: true,\n\t}\n\tfor _, addr := range addrs {\n\t\tvar ip net.IP\n\n\t\tswitch v := addr.(type) {\n\t\tcase *net.IPNet:\n\t\t\tip = v.IP\n\t\tcase *net.IPAddr:\n\t\t\tip = v.IP\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t\ttml.IPAddresses = append(tml.IPAddresses, ip)\n\t\ttml.DNSNames = append(tml.DNSNames, ip.String())\n\t}\n\n\tcert, err := x509.CreateCertificate(rand.Reader, &tml, &tml, &privKey.PublicKey, privKey)\n\tif err != nil {\n\t\treturn fail(errors.Wrap(err, \"certificate cannot be created\"))\n\t}\n\n\t\/\/ Generate a pem block with the certificate\n\tcertPem := pem.EncodeToMemory(&pem.Block{\n\t\tType: k8scert.CertificateBlockType,\n\t\tBytes: cert,\n\t})\n\n\t\/\/ Load the tls certificate\n\ttlsCert, err := tls.X509KeyPair(certPem, keyPem)\n\tif err != nil {\n\t\treturn fail(errors.Wrap(err, \"certificate could not be loaded\"))\n\t}\n\n\treturn tlsCert, nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage plans_test\n\nimport (\n\t\"fmt\"\n\n\t. \"github.com\/pingcap\/check\"\n\t\"github.com\/pingcap\/tidb\"\n\t\"github.com\/pingcap\/tidb\/expression\/expressions\"\n\t\"github.com\/pingcap\/tidb\/kv\"\n\t\"github.com\/pingcap\/tidb\/model\"\n\tmysql \"github.com\/pingcap\/tidb\/mysqldef\"\n\t\"github.com\/pingcap\/tidb\/parser\/opcode\"\n\t\"github.com\/pingcap\/tidb\/plan\/plans\"\n\t\"github.com\/pingcap\/tidb\/rset\/rsets\"\n\t\"github.com\/pingcap\/tidb\/sessionctx\/variable\"\n\t\"github.com\/pingcap\/tidb\/stmt\"\n)\n\ntype testShowSuit struct {\n\ttxn kv.Transaction\n\tvars map[string]interface{}\n}\n\n\/\/ implement Context interface\nfunc (p *testShowSuit) GetTxn(forceNew bool) (kv.Transaction, error) { return p.txn, nil }\n\nfunc (p *testShowSuit) FinishTxn(rollback bool) error { return nil }\n\n\/\/ SetValue saves a value associated with this context for key\nfunc (p *testShowSuit) SetValue(key fmt.Stringer, value interface{}) {\n\tp.vars[key.String()] = value\n}\n\n\/\/ Value returns the value associated with this context for key\nfunc (p *testShowSuit) Value(key fmt.Stringer) interface{} {\n\treturn p.vars[key.String()]\n}\n\n\/\/ ClearValue clears the value associated with this context for key\nfunc (p *testShowSuit) ClearValue(key fmt.Stringer) {}\n\nvar _ = Suite(&testShowSuit{})\n\nfunc (p *testShowSuit) SetUpSuite(c *C) {\n\tvar err error\n\tstore, err := tidb.NewStore(tidb.EngineGoLevelDBMemory)\n\tc.Assert(err, IsNil)\n\tp.vars = map[string]interface{}{}\n\tp.txn, _ = store.Begin()\n\tvariable.BindSessionVars(p)\n}\n\nfunc (p *testShowSuit) TestShowVariables(c *C) {\n\tpln := &plans.ShowPlan{\n\t\tTarget: stmt.ShowVariables,\n\t\tGlobalScope: true,\n\t\tPattern: &expressions.PatternLike{\n\t\t\tPattern: &expressions.Value{\n\t\t\t\tVal: \"character_set_results\",\n\t\t\t},\n\t\t},\n\t}\n\tfls := pln.GetFields()\n\tc.Assert(fls, HasLen, 2)\n\tc.Assert(fls[0].Name, Equals, \"Variable_name\")\n\tc.Assert(fls[1].Name, Equals, \"Value\")\n\tc.Assert(fls[0].Col.Tp, Equals, mysql.TypeVarchar)\n\tc.Assert(fls[0].Col.Tp, Equals, mysql.TypeVarchar)\n\n\tsessionVars := variable.GetSessionVars(p)\n\tret := map[string]string{}\n\trset := rsets.Recordset{\n\t\tCtx: p,\n\t\tPlan: pln,\n\t}\n\trset.Do(func(data []interface{}) (bool, error) {\n\t\tret[data[0].(string)] = data[1].(string)\n\t\treturn true, nil\n\t})\n\tc.Assert(ret, HasLen, 1)\n\tv, ok := ret[\"character_set_results\"]\n\tc.Assert(ok, IsTrue)\n\tc.Assert(v, Equals, \"latin1\")\n\t\/\/ Set session variable to utf8\n\tsessionVars.Systems[\"character_set_results\"] = \"utf8\"\n\tpln.Close()\n\trset.Do(func(data []interface{}) (bool, error) {\n\t\tret[data[0].(string)] = data[1].(string)\n\t\treturn true, nil\n\t})\n\tc.Assert(ret, HasLen, 1)\n\tv, ok = ret[\"character_set_results\"]\n\tc.Assert(ok, IsTrue)\n\t\/\/ Show global varibale get latin1\n\tc.Assert(v, Equals, \"latin1\")\n\n\tpln.GlobalScope = false\n\tpln.Close()\n\trset.Do(func(data []interface{}) (bool, error) {\n\t\tret[data[0].(string)] = data[1].(string)\n\t\treturn true, nil\n\t})\n\tc.Assert(ret, HasLen, 1)\n\tv, ok = ret[\"character_set_results\"]\n\tc.Assert(ok, IsTrue)\n\t\/\/ Show session varibale get utf8\n\tc.Assert(v, Equals, \"utf8\")\n\tpln.Close()\n\tpln.Pattern = nil\n\tpln.Where = &expressions.BinaryOperation{\n\t\tL: &expressions.Ident{CIStr: model.NewCIStr(\"Variable_name\")},\n\t\tR: expressions.Value{Val: \"autocommit\"},\n\t\tOp: opcode.EQ,\n\t}\n\n\tret = map[string]string{}\n\tsessionVars.Systems[\"autocommit\"] = \"on\"\n\trset.Do(func(data []interface{}) (bool, error) {\n\t\tret[data[0].(string)] = data[1].(string)\n\t\treturn true, nil\n\t})\n\n\tc.Assert(ret, HasLen, 1)\n\tv, ok = ret[\"autocommit\"]\n\tc.Assert(ok, IsTrue)\n\tc.Assert(v, Equals, \"on\")\n\n\tpln.Target = stmt.ShowWarnings\n\tfls = pln.GetFields()\n\tc.Assert(fls, HasLen, 3)\n\tc.Assert(fls[1].Col.Tp, Equals, mysql.TypeLong)\n\n\tpln.Target = stmt.ShowCharset\n\tfls = pln.GetFields()\n\tc.Assert(fls, HasLen, 4)\n\tc.Assert(fls[3].Col.Tp, Equals, mysql.TypeLonglong)\n}\n\nfunc (p *testShowSuit) TearDownSuite(c *C) {\n\tp.txn.Commit()\n}\nplans: Address comment\/\/ Copyright 2015 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage plans_test\n\nimport (\n\t\"fmt\"\n\n\t. \"github.com\/pingcap\/check\"\n\t\"github.com\/pingcap\/tidb\"\n\t\"github.com\/pingcap\/tidb\/expression\/expressions\"\n\t\"github.com\/pingcap\/tidb\/kv\"\n\t\"github.com\/pingcap\/tidb\/model\"\n\tmysql \"github.com\/pingcap\/tidb\/mysqldef\"\n\t\"github.com\/pingcap\/tidb\/parser\/opcode\"\n\t\"github.com\/pingcap\/tidb\/plan\/plans\"\n\t\"github.com\/pingcap\/tidb\/rset\/rsets\"\n\t\"github.com\/pingcap\/tidb\/sessionctx\/variable\"\n\t\"github.com\/pingcap\/tidb\/stmt\"\n)\n\ntype testShowSuit struct {\n\ttxn kv.Transaction\n\tvars map[string]interface{}\n}\n\n\/\/ implement Context interface\nfunc (p *testShowSuit) GetTxn(forceNew bool) (kv.Transaction, error) { return p.txn, nil }\n\nfunc (p *testShowSuit) FinishTxn(rollback bool) error { return nil }\n\n\/\/ SetValue saves a value associated with this context for key\nfunc (p *testShowSuit) SetValue(key fmt.Stringer, value interface{}) {\n\tp.vars[key.String()] = value\n}\n\n\/\/ Value returns the value associated with this context for key\nfunc (p *testShowSuit) Value(key fmt.Stringer) interface{} {\n\treturn p.vars[key.String()]\n}\n\n\/\/ ClearValue clears the value associated with this context for key\nfunc (p *testShowSuit) ClearValue(key fmt.Stringer) {}\n\nvar _ = Suite(&testShowSuit{})\n\nfunc (p *testShowSuit) SetUpSuite(c *C) {\n\tvar err error\n\tstore, err := tidb.NewStore(tidb.EngineGoLevelDBMemory)\n\tc.Assert(err, IsNil)\n\tp.vars = map[string]interface{}{}\n\tp.txn, _ = store.Begin()\n\tvariable.BindSessionVars(p)\n}\n\nfunc (p *testShowSuit) TestShowVariables(c *C) {\n\tpln := &plans.ShowPlan{\n\t\tTarget: stmt.ShowVariables,\n\t\tGlobalScope: true,\n\t\tPattern: &expressions.PatternLike{\n\t\t\tPattern: &expressions.Value{\n\t\t\t\tVal: \"character_set_results\",\n\t\t\t},\n\t\t},\n\t}\n\tfls := pln.GetFields()\n\tc.Assert(fls, HasLen, 2)\n\tc.Assert(fls[0].Name, Equals, \"Variable_name\")\n\tc.Assert(fls[1].Name, Equals, \"Value\")\n\tc.Assert(fls[0].Col.Tp, Equals, mysql.TypeVarchar)\n\tc.Assert(fls[1].Col.Tp, Equals, mysql.TypeVarchar)\n\n\tsessionVars := variable.GetSessionVars(p)\n\tret := map[string]string{}\n\trset := rsets.Recordset{\n\t\tCtx: p,\n\t\tPlan: pln,\n\t}\n\trset.Do(func(data []interface{}) (bool, error) {\n\t\tret[data[0].(string)] = data[1].(string)\n\t\treturn true, nil\n\t})\n\tc.Assert(ret, HasLen, 1)\n\tv, ok := ret[\"character_set_results\"]\n\tc.Assert(ok, IsTrue)\n\tc.Assert(v, Equals, \"latin1\")\n\t\/\/ Set session variable to utf8\n\tsessionVars.Systems[\"character_set_results\"] = \"utf8\"\n\tpln.Close()\n\trset.Do(func(data []interface{}) (bool, error) {\n\t\tret[data[0].(string)] = data[1].(string)\n\t\treturn true, nil\n\t})\n\tc.Assert(ret, HasLen, 1)\n\tv, ok = ret[\"character_set_results\"]\n\tc.Assert(ok, IsTrue)\n\t\/\/ Show global varibale get latin1\n\tc.Assert(v, Equals, \"latin1\")\n\n\tpln.GlobalScope = false\n\tpln.Close()\n\trset.Do(func(data []interface{}) (bool, error) {\n\t\tret[data[0].(string)] = data[1].(string)\n\t\treturn true, nil\n\t})\n\tc.Assert(ret, HasLen, 1)\n\tv, ok = ret[\"character_set_results\"]\n\tc.Assert(ok, IsTrue)\n\t\/\/ Show session varibale get utf8\n\tc.Assert(v, Equals, \"utf8\")\n\tpln.Close()\n\tpln.Pattern = nil\n\tpln.Where = &expressions.BinaryOperation{\n\t\tL: &expressions.Ident{CIStr: model.NewCIStr(\"Variable_name\")},\n\t\tR: expressions.Value{Val: \"autocommit\"},\n\t\tOp: opcode.EQ,\n\t}\n\n\tret = map[string]string{}\n\tsessionVars.Systems[\"autocommit\"] = \"on\"\n\trset.Do(func(data []interface{}) (bool, error) {\n\t\tret[data[0].(string)] = data[1].(string)\n\t\treturn true, nil\n\t})\n\n\tc.Assert(ret, HasLen, 1)\n\tv, ok = ret[\"autocommit\"]\n\tc.Assert(ok, IsTrue)\n\tc.Assert(v, Equals, \"on\")\n\n\tpln.Target = stmt.ShowWarnings\n\tfls = pln.GetFields()\n\tc.Assert(fls, HasLen, 3)\n\tc.Assert(fls[1].Col.Tp, Equals, mysql.TypeLong)\n\n\tpln.Target = stmt.ShowCharset\n\tfls = pln.GetFields()\n\tc.Assert(fls, HasLen, 4)\n\tc.Assert(fls[3].Col.Tp, Equals, mysql.TypeLonglong)\n}\n\nfunc (p *testShowSuit) TearDownSuite(c *C) {\n\tp.txn.Commit()\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2016 Factom Foundation\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage factom_test\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t. \"github.com\/FactomProject\/factom\"\n)\n\nvar ()\n\nfunc TestNewChain(t *testing.T) {\n\tent := new(Entry)\n\tent.ChainID = \"\"\n\tent.Content = []byte(\"This is a test Entry.\")\n\tent.ExtIDs = append(ent.ExtIDs, []byte(\"This is the first extid.\"))\n\tent.ExtIDs = append(ent.ExtIDs, []byte(\"This is the second extid.\"))\n\n\tnewChain := NewChain(ent)\n\texpectedID := \"5a402200c5cf278e47905ce52d7d64529a0291829a7bd230072c5468be709069\"\n\n\tif newChain.ChainID != expectedID {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expectedID, newChain.ChainID)\n\t}\n\tt.Log(newChain.ChainID)\n\n\tcfb := NewChainFromBytes(ent.Content, ent.ExtIDs...)\n\tif cfb.ChainID != expectedID {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expectedID, cfb.ChainID)\n\t}\n\tt.Log(cfb.ChainID)\n\n\tcfs := NewChainFromStrings(\n\t\t\"This is a test Entry.\",\n\t\t\"This is the first extid.\",\n\t\t\"This is the second extid.\",\n\t)\n\tif cfs.ChainID != expectedID {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expectedID, cfs.ChainID)\n\t}\n\tt.Log(cfs.ChainID)\n}\n\nfunc TestIfExists(t *testing.T) {\n\tsimlatedFactomdResponse := `{\n \"jsonrpc\": \"2.0\",\n \"id\": 0,\n \"result\": {\n \"ChainHead\": \"f65f67774139fa78344dcdd302631a0d646db0c2be4d58e3e48b2a188c1b856c\"\n }\n}`\n\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tfmt.Fprintln(w, simlatedFactomdResponse)\n\t}))\n\tdefer ts.Close()\n\n\turl := ts.URL[7:]\n\tSetFactomdServer(url)\n\n\texpectedID := \"f65f67774139fa78344dcdd302631a0d646db0c2be4d58e3e48b2a188c1b856c\"\n\t\/\/fmt.Println(ChainExists(expectedID))\n\tif ChainExists(expectedID) != true {\n\t\tt.Errorf(\"chain %s does not exist\", expectedID)\n\t}\n}\n\nfunc TestIfNotExists(t *testing.T) {\n\tsimlatedFactomdResponse := `{\"jsonrpc\":\"2.0\",\"id\":0,\"error\":{\"code\":-32009,\"message\":\"Missing Chain Head\"}}`\n\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tfmt.Fprintln(w, simlatedFactomdResponse)\n\t}))\n\tdefer ts.Close()\n\n\turl := ts.URL[7:]\n\tSetFactomdServer(url)\n\tunexpectedID := \"5a402200c5cf278e47905ce52d7d64529a0291829a7bd230072c5468be709069\"\n\n\tif ChainExists(unexpectedID) != false {\n\t\tt.Errorf(\"chain %s shouldn't exist\", unexpectedID)\n\t}\n}\n\nfunc TestComposeChainCommit(t *testing.T) {\n\ttype response struct {\n\t\tMessage string `json:\"message\"`\n\t}\n\tecAddr, _ := GetECAddress(\"Es2Rf7iM6PdsqfYCo3D1tnAR65SkLENyWJG1deUzpRMQmbh9F3eG\")\n\tent := new(Entry)\n\tent.ChainID = \"954d5a49fd70d9b8bcdb35d252267829957f7ef7fa6c74f88419bdc5e82209f4\"\n\tent.Content = []byte(\"test!\")\n\tent.ExtIDs = append(ent.ExtIDs, []byte(\"test\"))\n\tnewChain := NewChain(ent)\n\n\tcCommit, _ := ComposeChainCommit(newChain, ecAddr)\n\tr := new(response)\n\tjson.Unmarshal(cCommit.Params, r)\n\tbinCommit, _ := hex.DecodeString(r.Message)\n\tt.Logf(\"%x\", binCommit)\n\n\t\/\/the commit has a timestamp which is updated new for each time it is called. This means it is different after each call.\n\t\/\/we will check the non-changing parts\n\n\tif len(binCommit) != 200 {\n\t\tt.Error(\"expected commit to be 200 bytes long, instead got\", len(binCommit))\n\t}\n\tresult := binCommit[0:1]\n\texpected := []byte{0x00}\n\tif !bytes.Equal(result, expected) {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expected, result)\n\t}\n\t\/\/skip the 6 bytes of the timestamp\n\tresult = binCommit[7:136]\n\texpected, _ = hex.DecodeString(\"516870d4c0e1ee2d5f0d415e51fc10ae6b8d895561e9314afdc33048194d76f07cc61c8a81aea23d76ff6447689757dc1e36af66e300ce3e06b8d816c79acfd2285ed45081d5b8819a678d13c7c2d04f704b34c74e8aaecd9bd34609bee047200b3b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29\")\n\n\tif !bytes.Equal(result, expected) {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expected, result)\n\t}\n}\n\nfunc TestComposeChainReveal(t *testing.T) {\n\n\tent := new(Entry)\n\tent.ChainID = \"954d5a49fd70d9b8bcdb35d252267829957f7ef7fa6c74f88419bdc5e82209f4\"\n\tent.Content = []byte(\"test!\")\n\tent.ExtIDs = append(ent.ExtIDs, []byte(\"test\"))\n\tnewChain := NewChain(ent)\n\n\tcReveal, _ := ComposeChainReveal(newChain)\n\n\texpectedResponse := `{\"entry\":\"00954d5a49fd70d9b8bcdb35d252267829957f7ef7fa6c74f88419bdc5e82209f400060004746573747465737421\"}`\n\tif expectedResponse != string(cReveal.Params) {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expectedResponse, cReveal.Params)\n\t}\n}\n\nfunc TestCommitChain(t *testing.T) {\n\tsimlatedFactomdResponse := `{\n \"jsonrpc\":\"2.0\",\n \"id\":0,\n \"result\":{\n \"message\":\"Chain Commit Success\",\n \"txid\":\"76e123d133a841fe3e08c5e3f3d392f8431f2d7668890c03f003f541efa8fc61\"\n }\n}`\n\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tfmt.Fprintln(w, simlatedFactomdResponse)\n\t}))\n\tdefer ts.Close()\n\n\turl := ts.URL[7:]\n\tSetFactomdServer(url)\n\n\tent := new(Entry)\n\tent.ChainID = \"954d5a49fd70d9b8bcdb35d252267829957f7ef7fa6c74f88419bdc5e82209f4\"\n\tent.Content = []byte(\"test!\")\n\tent.ExtIDs = append(ent.ExtIDs, []byte(\"test\"))\n\tnewChain := NewChain(ent)\n\tecAddr, _ := GetECAddress(\"Es2Rf7iM6PdsqfYCo3D1tnAR65SkLENyWJG1deUzpRMQmbh9F3eG\")\n\n\texpectedResponse := \"76e123d133a841fe3e08c5e3f3d392f8431f2d7668890c03f003f541efa8fc61\"\n\tresponse, _ := CommitChain(newChain, ecAddr)\n\n\tif expectedResponse != response {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expectedResponse, response)\n\t}\n\tt.Log(response)\n}\n\nfunc TestRevealChain(t *testing.T) {\n\tsimlatedFactomdResponse := `{\n \"jsonrpc\": \"2.0\",\n \"id\": 0,\n \"result\": {\n \"message\": \"Entry Reveal Success\",\n \"entryhash\": \"f5c956749fc3eba4acc60fd485fb100e601070a44fcce54ff358d60669854734\"\n }\n}`\n\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tfmt.Fprintln(w, simlatedFactomdResponse)\n\t}))\n\tdefer ts.Close()\n\n\turl := ts.URL[7:]\n\tSetFactomdServer(url)\n\n\tent := new(Entry)\n\tent.ChainID = \"954d5a49fd70d9b8bcdb35d252267829957f7ef7fa6c74f88419bdc5e82209f4\"\n\tent.Content = []byte(\"test!\")\n\tent.ExtIDs = append(ent.ExtIDs, []byte(\"test\"))\n\tnewChain := NewChain(ent)\n\n\texpectedResponse := \"f5c956749fc3eba4acc60fd485fb100e601070a44fcce54ff358d60669854734\"\n\tresponse, _ := RevealChain(newChain)\n\n\tif expectedResponse != response {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expectedResponse, response)\n\t}\n\tt.Log(response)\n}\ntest updates\/\/ Copyright 2016 Factom Foundation\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage factom_test\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t. \"github.com\/FactomProject\/factom\"\n)\n\nvar ()\n\nfunc TestNewChain(t *testing.T) {\n\tent := new(Entry)\n\tent.ChainID = \"\"\n\tent.Content = []byte(\"This is a test Entry.\")\n\tent.ExtIDs = append(ent.ExtIDs, []byte(\"This is the first extid.\"))\n\tent.ExtIDs = append(ent.ExtIDs, []byte(\"This is the second extid.\"))\n\n\tnewChain := NewChain(ent)\n\texpectedID := \"5a402200c5cf278e47905ce52d7d64529a0291829a7bd230072c5468be709069\"\n\n\tif newChain.ChainID != expectedID {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expectedID, newChain.ChainID)\n\t}\n\tt.Log(newChain.ChainID)\n\n\tcfb := NewChainFromBytes(ent.Content, ent.ExtIDs...)\n\tif cfb.ChainID != expectedID {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expectedID, cfb.ChainID)\n\t}\n\tt.Log(cfb.ChainID)\n\n\tcfs := NewChainFromStrings(\n\t\t\"This is a test Entry.\",\n\t\t\"This is the first extid.\",\n\t\t\"This is the second extid.\",\n\t)\n\tif cfs.ChainID != expectedID {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expectedID, cfs.ChainID)\n\t}\n\tt.Log(cfs.ChainID)\n}\n\nfunc TestIfExists(t *testing.T) {\n\tsimlatedFactomdResponse := `{\n \"jsonrpc\": \"2.0\",\n \"id\": 0,\n \"result\": {\n \"ChainHead\": \"f65f67774139fa78344dcdd302631a0d646db0c2be4d58e3e48b2a188c1b856c\"\n }\n}`\n\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tfmt.Fprintln(w, simlatedFactomdResponse)\n\t}))\n\tdefer ts.Close()\n\n\turl := ts.URL[7:]\n\tSetFactomdServer(url)\n\n\texpectedID := \"f65f67774139fa78344dcdd302631a0d646db0c2be4d58e3e48b2a188c1b856c\"\n\t\/\/fmt.Println(ChainExists(expectedID))\n\tif ChainExists(expectedID) != true {\n\t\tt.Errorf(\"chain %s does not exist\", expectedID)\n\t}\n}\n\nfunc TestIfNotExists(t *testing.T) {\n\tsimlatedFactomdResponse := `{\"jsonrpc\":\"2.0\",\"id\":0,\"error\":{\"code\":-32009,\"message\":\"Missing Chain Head\"}}`\n\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tfmt.Fprintln(w, simlatedFactomdResponse)\n\t}))\n\tdefer ts.Close()\n\n\turl := ts.URL[7:]\n\tSetFactomdServer(url)\n\tunexpectedID := \"5a402200c5cf278e47905ce52d7d64529a0291829a7bd230072c5468be709069\"\n\n\tif ChainExists(unexpectedID) != false {\n\t\tt.Errorf(\"chain %s shouldn't exist\", unexpectedID)\n\t}\n}\n\nfunc TestComposeChainCommit(t *testing.T) {\n\ttype response struct {\n\t\tMessage string `json:\"message\"`\n\t}\n\tecAddr, err := GetECAddress(\"Es2Rf7iM6PdsqfYCo3D1tnAR65SkLENyWJG1deUzpRMQmbh9F3eG\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tent := new(Entry)\n\tent.ChainID = \"954d5a49fd70d9b8bcdb35d252267829957f7ef7fa6c74f88419bdc5e82209f4\"\n\tent.Content = []byte(\"test!\")\n\tent.ExtIDs = append(ent.ExtIDs, []byte(\"test\"))\n\tnewChain := NewChain(ent)\n\n\tcCommit, err := ComposeChainCommit(newChain, ecAddr)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tr := new(response)\n\tjson.Unmarshal(cCommit.Params, r)\n\tbinCommit, _ := hex.DecodeString(r.Message)\n\tt.Logf(\"%x\", binCommit)\n\n\t\/\/the commit has a timestamp which is updated new for each time it is called. This means it is different after each call.\n\t\/\/we will check the non-changing parts\n\n\tif len(binCommit) != 200 {\n\t\tt.Error(\"expected commit to be 200 bytes long, instead got\", len(binCommit))\n\t}\n\tresult := binCommit[0:1]\n\texpected := []byte{0x00}\n\tif !bytes.Equal(result, expected) {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expected, result)\n\t}\n\t\/\/skip the 6 bytes of the timestamp\n\tresult = binCommit[7:136]\n\texpected, err = hex.DecodeString(\"516870d4c0e1ee2d5f0d415e51fc10ae6b8d895561e9314afdc33048194d76f07cc61c8a81aea23d76ff6447689757dc1e36af66e300ce3e06b8d816c79acfd2285ed45081d5b8819a678d13c7c2d04f704b34c74e8aaecd9bd34609bee047200b3b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif !bytes.Equal(result, expected) {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expected, result)\n\t}\n}\n\nfunc TestComposeChainReveal(t *testing.T) {\n\n\tent := new(Entry)\n\tent.ChainID = \"954d5a49fd70d9b8bcdb35d252267829957f7ef7fa6c74f88419bdc5e82209f4\"\n\tent.Content = []byte(\"test!\")\n\tent.ExtIDs = append(ent.ExtIDs, []byte(\"test\"))\n\tnewChain := NewChain(ent)\n\n\tcReveal, err := ComposeChainReveal(newChain)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\texpectedResponse := `{\"entry\":\"00954d5a49fd70d9b8bcdb35d252267829957f7ef7fa6c74f88419bdc5e82209f400060004746573747465737421\"}`\n\tif expectedResponse != string(cReveal.Params) {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expectedResponse, cReveal.Params)\n\t}\n}\n\nfunc TestCommitChain(t *testing.T) {\n\tsimlatedFactomdResponse := `{\n \"jsonrpc\":\"2.0\",\n \"id\":0,\n \"result\":{\n \"message\":\"Chain Commit Success\",\n \"txid\":\"76e123d133a841fe3e08c5e3f3d392f8431f2d7668890c03f003f541efa8fc61\"\n }\n}`\n\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tfmt.Fprintln(w, simlatedFactomdResponse)\n\t}))\n\tdefer ts.Close()\n\n\turl := ts.URL[7:]\n\tSetFactomdServer(url)\n\n\tent := new(Entry)\n\tent.ChainID = \"954d5a49fd70d9b8bcdb35d252267829957f7ef7fa6c74f88419bdc5e82209f4\"\n\tent.Content = []byte(\"test!\")\n\tent.ExtIDs = append(ent.ExtIDs, []byte(\"test\"))\n\tnewChain := NewChain(ent)\n\tecAddr, err := GetECAddress(\"Es2Rf7iM6PdsqfYCo3D1tnAR65SkLENyWJG1deUzpRMQmbh9F3eG\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\texpectedResponse := \"76e123d133a841fe3e08c5e3f3d392f8431f2d7668890c03f003f541efa8fc61\"\n\tresponse, _ := CommitChain(newChain, ecAddr)\n\n\tif expectedResponse != response {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expectedResponse, response)\n\t}\n\tt.Log(response)\n}\n\nfunc TestRevealChain(t *testing.T) {\n\tsimlatedFactomdResponse := `{\n \"jsonrpc\": \"2.0\",\n \"id\": 0,\n \"result\": {\n \"message\": \"Entry Reveal Success\",\n \"entryhash\": \"f5c956749fc3eba4acc60fd485fb100e601070a44fcce54ff358d60669854734\"\n }\n}`\n\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tfmt.Fprintln(w, simlatedFactomdResponse)\n\t}))\n\tdefer ts.Close()\n\n\turl := ts.URL[7:]\n\tSetFactomdServer(url)\n\n\tent := new(Entry)\n\tent.ChainID = \"954d5a49fd70d9b8bcdb35d252267829957f7ef7fa6c74f88419bdc5e82209f4\"\n\tent.Content = []byte(\"test!\")\n\tent.ExtIDs = append(ent.ExtIDs, []byte(\"test\"))\n\tnewChain := NewChain(ent)\n\n\texpectedResponse := \"f5c956749fc3eba4acc60fd485fb100e601070a44fcce54ff358d60669854734\"\n\tresponse, err := RevealChain(newChain)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif expectedResponse != response {\n\t\tt.Errorf(\"expected:%s\\nrecieved:%s\", expectedResponse, response)\n\t}\n\tt.Log(response)\n}\n<|endoftext|>"} {"text":"package calcium\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"sync\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/projecteru2\/core\/engine\"\n\tenginetypes \"github.com\/projecteru2\/core\/engine\/types\"\n\t\"github.com\/projecteru2\/core\/log\"\n\t\"github.com\/projecteru2\/core\/types\"\n\t\"github.com\/projecteru2\/core\/utils\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar winchCommand = []byte{0x80} \/\/ 128, non-ASCII\nvar escapeCommand = []byte{0x1d} \/\/ 29, ^]\n\ntype window struct {\n\tHeight uint `json:\"Row\"`\n\tWidth uint `json:\"Col\"`\n}\n\nfunc execuateInside(ctx context.Context, client engine.API, ID, cmd, user string, env []string, privileged bool) ([]byte, error) {\n\tcmds := utils.MakeCommandLineArgs(cmd)\n\texecConfig := &enginetypes.ExecConfig{\n\t\tUser: user,\n\t\tCmd: cmds,\n\t\tPrivileged: privileged,\n\t\tEnv: env,\n\t\tAttachStderr: true,\n\t\tAttachStdout: true,\n\t}\n\tb := []byte{}\n\texecID, stdout, stderr, _, err := client.Execute(ctx, ID, execConfig)\n\tif err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\n\tfor m := range processStdStream(ctx, stdout, stderr, bufio.ScanLines, byte('\\n')) {\n\t\tb = append(b, m.Data...)\n\t}\n\n\texitCode, err := client.ExecExitCode(ctx, execID)\n\tif err != nil {\n\t\treturn b, errors.WithStack(err)\n\t}\n\tif exitCode != 0 {\n\t\treturn b, errors.WithStack(fmt.Errorf(\"%s\", b))\n\t}\n\treturn b, nil\n}\n\nfunc distributionInspect(ctx context.Context, node *types.Node, image string, digests []string) bool {\n\tremoteDigest, err := node.Engine.ImageRemoteDigest(ctx, image)\n\tif err != nil {\n\t\tlog.Errorf(ctx, \"[distributionInspect] get manifest failed %v\", err)\n\t\treturn false\n\t}\n\n\tfor _, digest := range digests {\n\t\tif digest == remoteDigest {\n\t\t\tlog.Debugf(ctx, \"[distributionInspect] Local digest %s\", digest)\n\t\t\tlog.Debugf(ctx, \"[distributionInspect] Remote digest %s\", remoteDigest)\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Pull an image\nfunc pullImage(ctx context.Context, node *types.Node, image string) error {\n\tlog.Infof(ctx, \"[pullImage] Pulling image %s\", image)\n\tif image == \"\" {\n\t\treturn errors.WithStack(types.ErrNoImage)\n\t}\n\n\t\/\/ check local\n\texists := false\n\tdigests, err := node.Engine.ImageLocalDigests(ctx, image)\n\tif err != nil {\n\t\tlog.Errorf(ctx, \"[pullImage] Check image failed %v\", err)\n\t} else {\n\t\tlog.Debug(ctx, \"[pullImage] Local Image exists\")\n\t\texists = true\n\t}\n\n\tif exists && distributionInspect(ctx, node, image, digests) {\n\t\tlog.Debug(ctx, \"[pullImage] Image cached, skip pulling\")\n\t\treturn nil\n\t}\n\n\tlog.Info(\"[pullImage] Image not cached, pulling\")\n\trc, err := node.Engine.ImagePull(ctx, image, false)\n\tdefer utils.EnsureReaderClosed(ctx, rc)\n\tif err != nil {\n\t\tlog.Errorf(ctx, \"[pullImage] Error during pulling image %s: %v\", image, err)\n\t\treturn errors.WithStack(err)\n\t}\n\tlog.Infof(ctx, \"[pullImage] Done pulling image %s\", image)\n\treturn nil\n}\n\nfunc makeCopyMessage(id, name, path string, err error, data io.ReadCloser) *types.CopyMessage {\n\treturn &types.CopyMessage{\n\t\tID: id,\n\t\tName: name,\n\t\tPath: path,\n\t\tError: err,\n\t\tData: data,\n\t}\n}\n\nfunc processVirtualizationInStream(\n\tctx context.Context,\n\tinStream io.WriteCloser,\n\tinCh <-chan []byte,\n\tresizeFunc func(height, width uint) error,\n) <-chan struct{} { \/\/ nolint\n\tspecialPrefixCallback := map[string]func([]byte){\n\t\tstring(winchCommand): func(body []byte) {\n\t\t\tw := &window{}\n\t\t\tif err := json.Unmarshal(body, w); err != nil {\n\t\t\t\tlog.Errorf(ctx, \"[processVirtualizationInStream] invalid winch command: %q\", body)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := resizeFunc(w.Height, w.Width); err != nil {\n\t\t\t\tlog.Errorf(ctx, \"[processVirtualizationInStream] resize window error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t},\n\n\t\tstring(escapeCommand): func(_ []byte) {\n\t\t\tinStream.Close()\n\t\t},\n\t}\n\treturn rawProcessVirtualizationInStream(ctx, inStream, inCh, specialPrefixCallback)\n}\n\nfunc rawProcessVirtualizationInStream(\n\tctx context.Context,\n\tinStream io.WriteCloser,\n\tinCh <-chan []byte,\n\tspecialPrefixCallback map[string]func([]byte),\n) <-chan struct{} {\n\tdone := make(chan struct{})\n\tutils.SentryGo(func() {\n\t\tdefer close(done)\n\t\tdefer inStream.Close()\n\n\t\tfor cmd := range inCh {\n\t\t\tif len(cmd) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif f, ok := specialPrefixCallback[string(cmd[:1])]; ok {\n\t\t\t\tf(cmd[1:])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, err := inStream.Write(cmd); err != nil {\n\t\t\t\tlog.Errorf(ctx, \"[rawProcessVirtualizationInStream] failed to write virtual input stream: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n\n\treturn done\n}\n\nfunc processVirtualizationOutStream(\n\tctx context.Context,\n\toutStream io.ReadCloser,\n\tsplitFunc bufio.SplitFunc,\n\tsplit byte,\n\n) <-chan []byte {\n\toutCh := make(chan []byte)\n\tutils.SentryGo(func() {\n\t\tdefer close(outCh)\n\t\tif outStream == nil {\n\t\t\treturn\n\t\t}\n\t\tdefer outStream.Close()\n\t\tscanner := bufio.NewScanner(outStream)\n\t\tscanner.Split(splitFunc)\n\t\tfor scanner.Scan() {\n\t\t\tbs := scanner.Bytes()\n\t\t\tif split != 0 {\n\t\t\t\tbs = append(bs, split)\n\t\t\t}\n\t\t\toutCh <- bs\n\t\t}\n\t\tif err := scanner.Err(); err != nil {\n\t\t\tlog.Warnf(ctx, \"[processVirtualizationOutStream] failed to read output from output stream: %v\", err)\n\t\t}\n\t})\n\treturn outCh\n}\n\nfunc processBuildImageStream(ctx context.Context, reader io.ReadCloser) chan *types.BuildImageMessage {\n\tch := make(chan *types.BuildImageMessage)\n\tutils.SentryGo(func() {\n\t\tdefer close(ch)\n\t\tdefer utils.EnsureReaderClosed(ctx, reader)\n\t\tdecoder := json.NewDecoder(reader)\n\t\tfor {\n\t\t\tmessage := &types.BuildImageMessage{}\n\t\t\terr := decoder.Decode(message)\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tmalformed, _ := ioutil.ReadAll(decoder.Buffered()) \/\/ TODO err check\n\t\t\t\t\tlog.Errorf(ctx, \"[processBuildImageStream] Decode image message failed %v, buffered: %s\", err, string(malformed))\n\t\t\t\t\tmessage.Error = err.Error()\n\t\t\t\t\tch <- message\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tch <- message\n\t\t}\n\t})\n\treturn ch\n}\n\nfunc processStdStream(ctx context.Context, stdout, stderr io.ReadCloser, splitFunc bufio.SplitFunc, split byte) chan types.StdStreamMessage {\n\tch := make(chan types.StdStreamMessage)\n\n\twg := sync.WaitGroup{}\n\n\twg.Add(1)\n\tutils.SentryGo(func() {\n\t\tdefer wg.Done()\n\t\tfor data := range processVirtualizationOutStream(ctx, stdout, splitFunc, split) {\n\t\t\tch <- types.StdStreamMessage{Data: data, StdStreamType: types.Stdout}\n\t\t}\n\t})\n\n\twg.Add(1)\n\tutils.SentryGo(func() {\n\t\tdefer wg.Done()\n\t\tfor data := range processVirtualizationOutStream(ctx, stderr, splitFunc, split) {\n\t\t\tch <- types.StdStreamMessage{Data: data, StdStreamType: types.Stderr}\n\t\t}\n\t})\n\n\tutils.SentryGo(func() {\n\t\tdefer close(ch)\n\t\twg.Wait()\n\t})\n\n\treturn ch\n}\nbugfix: consumer mustn't end until producer closes (#408)package calcium\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"sync\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/projecteru2\/core\/engine\"\n\tenginetypes \"github.com\/projecteru2\/core\/engine\/types\"\n\t\"github.com\/projecteru2\/core\/log\"\n\t\"github.com\/projecteru2\/core\/types\"\n\t\"github.com\/projecteru2\/core\/utils\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar winchCommand = []byte{0x80} \/\/ 128, non-ASCII\nvar escapeCommand = []byte{0x1d} \/\/ 29, ^]\n\ntype window struct {\n\tHeight uint `json:\"Row\"`\n\tWidth uint `json:\"Col\"`\n}\n\nfunc execuateInside(ctx context.Context, client engine.API, ID, cmd, user string, env []string, privileged bool) ([]byte, error) {\n\tcmds := utils.MakeCommandLineArgs(cmd)\n\texecConfig := &enginetypes.ExecConfig{\n\t\tUser: user,\n\t\tCmd: cmds,\n\t\tPrivileged: privileged,\n\t\tEnv: env,\n\t\tAttachStderr: true,\n\t\tAttachStdout: true,\n\t}\n\tb := []byte{}\n\texecID, stdout, stderr, _, err := client.Execute(ctx, ID, execConfig)\n\tif err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\n\tfor m := range processStdStream(ctx, stdout, stderr, bufio.ScanLines, byte('\\n')) {\n\t\tb = append(b, m.Data...)\n\t}\n\n\texitCode, err := client.ExecExitCode(ctx, execID)\n\tif err != nil {\n\t\treturn b, errors.WithStack(err)\n\t}\n\tif exitCode != 0 {\n\t\treturn b, errors.WithStack(fmt.Errorf(\"%s\", b))\n\t}\n\treturn b, nil\n}\n\nfunc distributionInspect(ctx context.Context, node *types.Node, image string, digests []string) bool {\n\tremoteDigest, err := node.Engine.ImageRemoteDigest(ctx, image)\n\tif err != nil {\n\t\tlog.Errorf(ctx, \"[distributionInspect] get manifest failed %v\", err)\n\t\treturn false\n\t}\n\n\tfor _, digest := range digests {\n\t\tif digest == remoteDigest {\n\t\t\tlog.Debugf(ctx, \"[distributionInspect] Local digest %s\", digest)\n\t\t\tlog.Debugf(ctx, \"[distributionInspect] Remote digest %s\", remoteDigest)\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Pull an image\nfunc pullImage(ctx context.Context, node *types.Node, image string) error {\n\tlog.Infof(ctx, \"[pullImage] Pulling image %s\", image)\n\tif image == \"\" {\n\t\treturn errors.WithStack(types.ErrNoImage)\n\t}\n\n\t\/\/ check local\n\texists := false\n\tdigests, err := node.Engine.ImageLocalDigests(ctx, image)\n\tif err != nil {\n\t\tlog.Errorf(ctx, \"[pullImage] Check image failed %v\", err)\n\t} else {\n\t\tlog.Debug(ctx, \"[pullImage] Local Image exists\")\n\t\texists = true\n\t}\n\n\tif exists && distributionInspect(ctx, node, image, digests) {\n\t\tlog.Debug(ctx, \"[pullImage] Image cached, skip pulling\")\n\t\treturn nil\n\t}\n\n\tlog.Info(\"[pullImage] Image not cached, pulling\")\n\trc, err := node.Engine.ImagePull(ctx, image, false)\n\tdefer utils.EnsureReaderClosed(ctx, rc)\n\tif err != nil {\n\t\tlog.Errorf(ctx, \"[pullImage] Error during pulling image %s: %v\", image, err)\n\t\treturn errors.WithStack(err)\n\t}\n\tlog.Infof(ctx, \"[pullImage] Done pulling image %s\", image)\n\treturn nil\n}\n\nfunc makeCopyMessage(id, name, path string, err error, data io.ReadCloser) *types.CopyMessage {\n\treturn &types.CopyMessage{\n\t\tID: id,\n\t\tName: name,\n\t\tPath: path,\n\t\tError: err,\n\t\tData: data,\n\t}\n}\n\nfunc processVirtualizationInStream(\n\tctx context.Context,\n\tinStream io.WriteCloser,\n\tinCh <-chan []byte,\n\tresizeFunc func(height, width uint) error,\n) <-chan struct{} { \/\/ nolint\n\tspecialPrefixCallback := map[string]func([]byte){\n\t\tstring(winchCommand): func(body []byte) {\n\t\t\tw := &window{}\n\t\t\tif err := json.Unmarshal(body, w); err != nil {\n\t\t\t\tlog.Errorf(ctx, \"[processVirtualizationInStream] invalid winch command: %q\", body)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := resizeFunc(w.Height, w.Width); err != nil {\n\t\t\t\tlog.Errorf(ctx, \"[processVirtualizationInStream] resize window error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t},\n\n\t\tstring(escapeCommand): func(_ []byte) {\n\t\t\tinStream.Close()\n\t\t},\n\t}\n\treturn rawProcessVirtualizationInStream(ctx, inStream, inCh, specialPrefixCallback)\n}\n\nfunc rawProcessVirtualizationInStream(\n\tctx context.Context,\n\tinStream io.WriteCloser,\n\tinCh <-chan []byte,\n\tspecialPrefixCallback map[string]func([]byte),\n) <-chan struct{} {\n\tdone := make(chan struct{})\n\tutils.SentryGo(func() {\n\t\tdefer close(done)\n\t\tdefer inStream.Close()\n\n\t\tfor cmd := range inCh {\n\t\t\tif len(cmd) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif f, ok := specialPrefixCallback[string(cmd[:1])]; ok {\n\t\t\t\tf(cmd[1:])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, err := inStream.Write(cmd); err != nil {\n\t\t\t\tlog.Errorf(ctx, \"[rawProcessVirtualizationInStream] failed to write virtual input stream: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t})\n\n\treturn done\n}\n\nfunc processVirtualizationOutStream(\n\tctx context.Context,\n\toutStream io.ReadCloser,\n\tsplitFunc bufio.SplitFunc,\n\tsplit byte,\n\n) <-chan []byte {\n\toutCh := make(chan []byte)\n\tutils.SentryGo(func() {\n\t\tdefer close(outCh)\n\t\tif outStream == nil {\n\t\t\treturn\n\t\t}\n\t\tdefer outStream.Close()\n\t\tscanner := bufio.NewScanner(outStream)\n\t\tscanner.Split(splitFunc)\n\t\tfor scanner.Scan() {\n\t\t\tbs := scanner.Bytes()\n\t\t\tif split != 0 {\n\t\t\t\tbs = append(bs, split)\n\t\t\t}\n\t\t\toutCh <- bs\n\t\t}\n\t\tif err := scanner.Err(); err != nil {\n\t\t\tlog.Warnf(ctx, \"[processVirtualizationOutStream] failed to read output from output stream: %v\", err)\n\t\t}\n\t})\n\treturn outCh\n}\n\nfunc processBuildImageStream(ctx context.Context, reader io.ReadCloser) chan *types.BuildImageMessage {\n\tch := make(chan *types.BuildImageMessage)\n\tutils.SentryGo(func() {\n\t\tdefer close(ch)\n\t\tdefer utils.EnsureReaderClosed(ctx, reader)\n\t\tdecoder := json.NewDecoder(reader)\n\t\tfor {\n\t\t\tmessage := &types.BuildImageMessage{}\n\t\t\terr := decoder.Decode(message)\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tmalformed, _ := ioutil.ReadAll(decoder.Buffered()) \/\/ TODO err check\n\t\t\t\t\tlog.Errorf(ctx, \"[processBuildImageStream] Decode image message failed %v, buffered: %s\", err, string(malformed))\n\t\t\t\t\tmessage.Error = err.Error()\n\t\t\t\t\tch <- message\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tch <- message\n\t\t}\n\t})\n\treturn ch\n}\n\nfunc processStdStream(ctx context.Context, stdout, stderr io.ReadCloser, splitFunc bufio.SplitFunc, split byte) chan types.StdStreamMessage {\n\tch := make(chan types.StdStreamMessage)\n\n\twg := sync.WaitGroup{}\n\n\twg.Add(1)\n\tutils.SentryGo(func() {\n\t\tdefer wg.Done()\n\t\tfor data := range processVirtualizationOutStream(ctx, stdout, splitFunc, split) {\n\t\t\tch <- types.StdStreamMessage{Data: data, StdStreamType: types.Stdout}\n\t\t}\n\t})\n\n\twg.Add(1)\n\tutils.SentryGo(func() {\n\t\tdefer wg.Done()\n\t\tfor data := range processVirtualizationOutStream(ctx, stderr, splitFunc, split) {\n\t\t\tch <- types.StdStreamMessage{Data: data, StdStreamType: types.Stderr}\n\t\t}\n\t})\n\n\tutils.SentryGo(func() {\n\t\tdefer close(ch)\n\t\twg.Wait()\n\t})\n\n\treturn ch\n}\n<|endoftext|>"} {"text":"package pkg\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"sync\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\n\t\"github.com\/ketchuphq\/ketchup\/proto\/ketchup\/packages\"\n\t\"github.com\/ketchuphq\/ketchup\/util\/errors\"\n)\n\ntype Registry struct {\n\tURL string\n\tRegistry *packages.Registry\n\n\tmu sync.RWMutex\n}\n\nfunc (r *Registry) Proto() *packages.Registry {\n\tr.mu.RLock()\n\tdefer r.mu.RUnlock()\n\treturn proto.Clone(r.Registry).(*packages.Registry)\n}\n\nfunc (r *Registry) Sync() error {\n\tres, err := http.Get(r.URL)\n\tif err != nil {\n\t\treturn errors.Wrap(err)\n\t}\n\tb, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn errors.Wrap(err)\n\t}\n\trepo := &packages.Registry{}\n\terr = json.Unmarshal(b, &repo)\n\tif err != nil {\n\t\treturn errors.Wrap(err)\n\t}\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tr.Registry = repo\n\treturn nil\n}\n\nfunc (r *Registry) Search(name string) (*packages.Package, error) {\n\terr := r.Sync()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.mu.RLock()\n\tdefer r.mu.RUnlock()\n\tfor _, p := range r.Registry.Packages {\n\t\tif p.GetName() == name {\n\t\t\treturn p, nil\n\t\t}\n\t}\n\treturn nil, nil\n}\n\nfunc (r *Registry) Match(re *regexp.Regexp) ([]*packages.Package, error) {\n\terr := r.Sync()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.mu.RLock()\n\tdefer r.mu.RUnlock()\n\tout := []*packages.Package{}\n\tfor _, p := range r.Registry.Packages {\n\t\tif re.MatchString(p.GetName()) {\n\t\t\tout = append(out, p)\n\t\t}\n\t}\n\treturn out, nil\n}\n\n\/\/ FetchDefaultRegistry fetches the default registry\nfunc (m *Module) Registry(registryURL string) *Registry {\n\treturn &Registry{URL: registryURL}\n}\n\n\/\/ press registry daemon should periodically scrape\nfunc getGithubTags(p *packages.Package) {\n\t\/\/ paginate should cache\n}\n\nfunc getBitbucketTags(p *packages.Package) {\n\t\/\/ paginate should cache\n}\npkg: Comments and handle status code for registry syncing.package pkg\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"sync\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\n\t\"github.com\/ketchuphq\/ketchup\/proto\/ketchup\/packages\"\n\t\"github.com\/ketchuphq\/ketchup\/util\/errors\"\n)\n\ntype Registry struct {\n\tURL string\n\tRegistry *packages.Registry\n\n\tmu sync.RWMutex\n}\n\n\/\/ Proto returns a clone of the underlying registry proto\nfunc (r *Registry) Proto() *packages.Registry {\n\tr.mu.RLock()\n\tdefer r.mu.RUnlock()\n\treturn proto.Clone(r.Registry).(*packages.Registry)\n}\n\n\/\/ Sync the repo data from the source\nfunc (r *Registry) Sync() error {\n\tres, err := http.Get(r.URL)\n\tif err != nil {\n\t\treturn errors.Wrap(err)\n\t}\n\tif res.StatusCode > 299 {\n\t\treturn errors.New(\"unexpected status code from %s: %d\", r.URL, res.StatusCode)\n\t}\n\tb, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn errors.Wrap(err)\n\t}\n\trepo := &packages.Registry{}\n\terr = json.Unmarshal(b, &repo)\n\tif err != nil {\n\t\treturn errors.Wrap(err)\n\t}\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tr.Registry = repo\n\treturn nil\n}\n\n\/\/ Search the repo for a package with the given name\nfunc (r *Registry) Search(name string) (*packages.Package, error) {\n\terr := r.Sync()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.mu.RLock()\n\tdefer r.mu.RUnlock()\n\tfor _, p := range r.Registry.Packages {\n\t\tif p.GetName() == name {\n\t\t\treturn p, nil\n\t\t}\n\t}\n\treturn nil, nil\n}\n\n\/\/ Match searches the registry for all packages with name matching the\n\/\/ given regex.\nfunc (r *Registry) Match(re *regexp.Regexp) ([]*packages.Package, error) {\n\terr := r.Sync()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.mu.RLock()\n\tdefer r.mu.RUnlock()\n\tout := []*packages.Package{}\n\tfor _, p := range r.Registry.Packages {\n\t\tif re.MatchString(p.GetName()) {\n\t\t\tout = append(out, p)\n\t\t}\n\t}\n\treturn out, nil\n}\n\n\/\/ Registry creates and returns a new registry for the given url\nfunc (m *Module) Registry(registryURL string) *Registry {\n\treturn &Registry{URL: registryURL}\n}\n\n\/\/ press registry daemon should periodically scrape\nfunc getGithubTags(p *packages.Package) {\n\t\/\/ paginate should cache\n}\n\nfunc getBitbucketTags(p *packages.Package) {\n\t\/\/ paginate should cache\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bitbucket.org\/anacrolix\/go.torrent\/dht\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n)\n\ntype pingResponse struct {\n\taddr string\n\tkrpc dht.Msg\n}\n\nvar (\n\ttableFileName = flag.String(\"tableFile\", \"\", \"name of file for storing node info\")\n\tserveAddr = flag.String(\"serveAddr\", \":0\", \"local UDP address\")\n\tinfoHash = flag.String(\"infoHash\", \"\", \"torrent infohash\")\n\n\ts dht.Server\n)\n\nfunc loadTable() error {\n\tif *tableFileName == \"\" {\n\t\treturn nil\n\t}\n\tf, err := os.Open(*tableFileName)\n\tif os.IsNotExist(err) {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error opening table file: %s\", err)\n\t}\n\tdefer f.Close()\n\tadded := 0\n\tfor {\n\t\tb := make([]byte, dht.CompactNodeInfoLen)\n\t\t_, err := io.ReadFull(f, b)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error reading table file: %s\", err)\n\t\t}\n\t\tvar ni dht.NodeInfo\n\t\terr = ni.UnmarshalCompact(b)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error unmarshaling compact node info: %s\", err)\n\t\t}\n\t\ts.AddNode(ni)\n\t\tadded++\n\t}\n\tlog.Printf(\"loaded %d nodes from table file\", added)\n\treturn nil\n}\n\nfunc init() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\tflag.Parse()\n\tswitch len(*infoHash) {\n\tcase 20:\n\tcase 40:\n\t\tif _, err := fmt.Sscanf(*infoHash, \"%x\", infoHash); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tdefault:\n\t\tlog.Fatal(\"require 20 byte infohash\")\n\t}\n\tvar err error\n\ts.Socket, err = net.ListenUDP(\"udp4\", func() *net.UDPAddr {\n\t\taddr, err := net.ResolveUDPAddr(\"udp4\", *serveAddr)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"error resolving serve addr: %s\", err)\n\t\t}\n\t\treturn addr\n\t}())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\ts.Init()\n\terr = loadTable()\n\tif err != nil {\n\t\tlog.Fatalf(\"error loading table: %s\", err)\n\t}\n\tlog.Printf(\"dht server on %s, ID is %q\", s.Socket.LocalAddr(), s.IDString())\n\tsetupSignals()\n}\n\nfunc saveTable() error {\n\tgoodNodes := s.Nodes()\n\tif *tableFileName == \"\" {\n\t\tif len(goodNodes) != 0 {\n\t\t\tlog.Printf(\"discarding %d good nodes!\", len(goodNodes))\n\t\t}\n\t\treturn nil\n\t}\n\tf, err := os.OpenFile(*tableFileName, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error opening table file: %s\", err)\n\t}\n\tdefer f.Close()\n\tfor _, nodeInfo := range goodNodes {\n\t\tvar b [dht.CompactNodeInfoLen]byte\n\t\terr := nodeInfo.PutCompact(b[:])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error compacting node info: %s\", err)\n\t\t}\n\t\t_, err = f.Write(b[:])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error writing compact node info: %s\", err)\n\t\t}\n\t}\n\tlog.Printf(\"saved %d nodes to table file\", len(goodNodes))\n\treturn nil\n}\n\nfunc setupSignals() {\n\tch := make(chan os.Signal)\n\tsignal.Notify(ch)\n\tgo func() {\n\t\t<-ch\n\t\ts.StopServing()\n\t}()\n}\n\nfunc main() {\n\t\/\/ go s.Bootstrap()\n\tgo func() {\n\t\tps, err := s.GetPeers(*infoHash)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfor sl := range ps.Values {\n\t\t\tfor _, p := range sl {\n\t\t\t\tfmt.Println(p)\n\t\t\t}\n\t\t}\n\t\ts.StopServing()\n\t}()\n\terr := s.Serve()\n\tif err := saveTable(); err != nil {\n\t\tlog.Printf(\"error saving node table: %s\", err)\n\t}\n\tif err != nil {\n\t\tlog.Fatalf(\"error serving dht: %s\", err)\n\t}\n}\ndht-get-peers: Some improvementspackage main\n\nimport (\n\t\"bitbucket.org\/anacrolix\/go.torrent\/dht\"\n\t\"bitbucket.org\/anacrolix\/go.torrent\/tracker\"\n\t_ \"bitbucket.org\/anacrolix\/go.torrent\/util\/profile\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n)\n\ntype pingResponse struct {\n\taddr string\n\tkrpc dht.Msg\n}\n\nvar (\n\ttableFileName = flag.String(\"tableFile\", \"\", \"name of file for storing node info\")\n\tserveAddr = flag.String(\"serveAddr\", \":0\", \"local UDP address\")\n\tinfoHash = flag.String(\"infoHash\", \"\", \"torrent infohash\")\n\n\ts dht.Server\n)\n\nfunc loadTable() error {\n\tif *tableFileName == \"\" {\n\t\treturn nil\n\t}\n\tf, err := os.Open(*tableFileName)\n\tif os.IsNotExist(err) {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error opening table file: %s\", err)\n\t}\n\tdefer f.Close()\n\tadded := 0\n\tfor {\n\t\tb := make([]byte, dht.CompactNodeInfoLen)\n\t\t_, err := io.ReadFull(f, b)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error reading table file: %s\", err)\n\t\t}\n\t\tvar ni dht.NodeInfo\n\t\terr = ni.UnmarshalCompact(b)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error unmarshaling compact node info: %s\", err)\n\t\t}\n\t\ts.AddNode(ni)\n\t\tadded++\n\t}\n\tlog.Printf(\"loaded %d nodes from table file\", added)\n\treturn nil\n}\n\nfunc init() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\tflag.Parse()\n\tswitch len(*infoHash) {\n\tcase 20:\n\tcase 40:\n\t\t_, err := fmt.Sscanf(*infoHash, \"%x\", infoHash)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tdefault:\n\t\tlog.Fatal(\"require 20 byte infohash\")\n\t}\n\tvar err error\n\ts.Socket, err = net.ListenUDP(\"udp4\", func() *net.UDPAddr {\n\t\taddr, err := net.ResolveUDPAddr(\"udp4\", *serveAddr)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"error resolving serve addr: %s\", err)\n\t\t}\n\t\treturn addr\n\t}())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\ts.Init()\n\terr = loadTable()\n\tif err != nil {\n\t\tlog.Fatalf(\"error loading table: %s\", err)\n\t}\n\tlog.Printf(\"dht server on %s, ID is %q\", s.Socket.LocalAddr(), s.IDString())\n\tsetupSignals()\n}\n\nfunc saveTable() error {\n\tgoodNodes := s.Nodes()\n\tif *tableFileName == \"\" {\n\t\tif len(goodNodes) != 0 {\n\t\t\tlog.Printf(\"discarding %d good nodes!\", len(goodNodes))\n\t\t}\n\t\treturn nil\n\t}\n\tf, err := os.OpenFile(*tableFileName, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error opening table file: %s\", err)\n\t}\n\tdefer f.Close()\n\tfor _, nodeInfo := range goodNodes {\n\t\tvar b [dht.CompactNodeInfoLen]byte\n\t\terr := nodeInfo.PutCompact(b[:])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error compacting node info: %s\", err)\n\t\t}\n\t\t_, err = f.Write(b[:])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error writing compact node info: %s\", err)\n\t\t}\n\t}\n\tlog.Printf(\"saved %d nodes to table file\", len(goodNodes))\n\treturn nil\n}\n\nfunc setupSignals() {\n\tch := make(chan os.Signal)\n\tsignal.Notify(ch, os.Interrupt)\n\tgo func() {\n\t\t<-ch\n\t\ts.StopServing()\n\t}()\n}\n\nfunc main() {\n\tgo func() {\n\t\tdefer s.StopServing()\n\t\tif err := s.Bootstrap(); err != nil {\n\t\t\tlog.Printf(\"error bootstrapping: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tsaveTable()\n\t\tps, err := s.GetPeers(*infoHash)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tseen := make(map[tracker.CompactPeer]struct{})\n\t\tfor sl := range ps.Values {\n\t\t\tfor _, p := range sl {\n\t\t\t\tif _, ok := seen[p]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tseen[p] = struct{}{}\n\t\t\t\tfmt.Println((&net.UDPAddr{\n\t\t\t\t\tIP: p.IP[:],\n\t\t\t\t\tPort: int(p.Port),\n\t\t\t\t}).String())\n\t\t\t}\n\t\t}\n\t}()\n\terr := s.Serve()\n\tif err := saveTable(); err != nil {\n\t\tlog.Printf(\"error saving node table: %s\", err)\n\t}\n\tif err != nil {\n\t\tlog.Fatalf(\"error serving dht: %s\", err)\n\t}\n}\n<|endoftext|>"} {"text":"package command\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/funkygao\/gafka\/ctx\"\n\t\"github.com\/funkygao\/gocli\"\n\t\"github.com\/funkygao\/golib\/gofmt\"\n\t\"github.com\/funkygao\/golib\/top\"\n\t\"github.com\/ryanuber\/columnize\"\n)\n\ntype Haproxy struct {\n\tUi cli.Ui\n\tCmd string\n\n\tzone string\n}\n\nfunc (this *Haproxy) Run(args []string) (exitCode int) {\n\tvar topMode bool\n\tcmdFlags := flag.NewFlagSet(\"haproxy\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { this.Ui.Output(this.Help()) }\n\tcmdFlags.StringVar(&this.zone, \"z\", ctx.DefaultZone(), \"\")\n\tcmdFlags.BoolVar(&topMode, \"top\", true, \"\")\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\tzone := ctx.Zone(this.zone)\n\tif topMode {\n\t\theader, _ := this.getStats(zone.HaProxyStatsUri[0])\n\t\tt := top.New(header, \"%8s %4s %15s %15s %8s %6s %8s %10s %8s %8s %5s %7s %9s %6s\")\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\trows := make([]string, 0)\n\t\t\t\tfor _, uri := range zone.HaProxyStatsUri {\n\t\t\t\t\t_, r := this.getStats(uri)\n\t\t\t\t\trows = append(rows, r...)\n\t\t\t\t}\n\t\t\t\tt.Refresh(rows)\n\n\t\t\t\ttime.Sleep(time.Second * 3)\n\t\t\t}\n\t\t}()\n\t\tif err := t.Start(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\tfor _, uri := range zone.HaProxyStatsUri {\n\t\t\tthis.fetchStats(uri)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (*Haproxy) Synopsis() string {\n\treturn \"Query ehaproxy cluster for load stats\"\n}\n\nfunc (this *Haproxy) getStats(statsUri string) (header string, rows []string) {\n\tclient := http.Client{Timeout: time.Second * 30}\n\tresp, err := client.Get(statsUri)\n\tswallow(err)\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tswallow(fmt.Errorf(\"fetch[%s] stats got status: %d\", resp.StatusCode))\n\t}\n\n\tvar records map[string]map[string]int64\n\treader := json.NewDecoder(resp.Body)\n\terr = reader.Decode(&records)\n\tswallow(err)\n\n\tu, err := url.Parse(statsUri)\n\tswallow(err)\n\tvar shortHostname string\n\tif strings.Contains(u.Host, \":\") {\n\t\tu.Host = u.Host[:strings.Index(u.Host, \":\")]\n\t}\n\ttuples := strings.SplitN(u.Host, \".\", 4)\n\tif len(tuples) < 4 {\n\t\tshortHostname = u.Host\n\t} else {\n\t\tshortHostname = tuples[3]\n\t}\n\tif len(shortHostname) > 8 {\n\t\tshortHostname = shortHostname[:8]\n\t}\n\n\tsortedSvcs := make([]string, 0)\n\tfor svc, _ := range records {\n\t\tsortedSvcs = append(sortedSvcs, svc)\n\t}\n\tsort.Strings(sortedSvcs)\n\n\tsortedCols := make([]string, 0)\n\tfor k, _ := range records[\"pub\"] {\n\t\tsortedCols = append(sortedCols, k)\n\t}\n\tsort.Strings(sortedCols)\n\n\theader = strings.Join(append([]string{\"host\", \"svc\"}, sortedCols...), \"|\")\n\tfor _, svc := range sortedSvcs {\n\t\tstats := records[svc]\n\n\t\tvar vals = []string{shortHostname, svc}\n\t\tfor _, k := range sortedCols {\n\t\t\tv := stats[k]\n\n\t\t\tvals = append(vals, gofmt.Comma(v))\n\t\t}\n\n\t\trows = append(rows, strings.Join(vals, \"|\"))\n\t}\n\n\treturn\n}\n\nfunc (this *Haproxy) fetchStats(statsUri string) {\n\tclient := http.Client{Timeout: time.Second * 30}\n\tresp, err := client.Get(statsUri)\n\tswallow(err)\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tswallow(fmt.Errorf(\"fetch[%s] stats got status: %d\", resp.StatusCode))\n\t}\n\n\tvar records map[string]map[string]int64\n\treader := json.NewDecoder(resp.Body)\n\terr = reader.Decode(&records)\n\tswallow(err)\n\n\tu, err := url.Parse(statsUri)\n\tswallow(err)\n\tthis.Ui.Info(u.Host)\n\n\tsortedSvcs := make([]string, 0)\n\tfor svc, _ := range records {\n\t\tsortedSvcs = append(sortedSvcs, svc)\n\t}\n\tsort.Strings(sortedSvcs)\n\n\tsortedCols := make([]string, 0)\n\tfor k, _ := range records[\"pub\"] {\n\t\tsortedCols = append(sortedCols, k)\n\t}\n\tsort.Strings(sortedCols)\n\n\tlines := []string{strings.Join(append([]string{\"svc\"}, sortedCols...), \"|\")}\n\tfor _, svc := range sortedSvcs {\n\t\tstats := records[svc]\n\n\t\tvar vals = []string{svc}\n\t\tfor _, k := range sortedCols {\n\t\t\tv := stats[k]\n\n\t\t\tvals = append(vals, gofmt.Comma(v))\n\t\t}\n\n\t\tlines = append(lines, strings.Join(vals, \"|\"))\n\t}\n\n\tthis.Ui.Output(columnize.SimpleFormat(lines))\n}\n\nfunc (this *Haproxy) Help() string {\n\thelp := fmt.Sprintf(`\nUsage: %s haproxy [options]\n\n %s\n\nOptions:\n\n -z zone\n\n -top\n Top mode\n\n`, this.Cmd, this.Synopsis())\n\treturn strings.TrimSpace(help)\n}\nrefmtpackage command\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/funkygao\/gafka\/ctx\"\n\t\"github.com\/funkygao\/gocli\"\n\t\"github.com\/funkygao\/golib\/gofmt\"\n\t\"github.com\/funkygao\/golib\/top\"\n\t\"github.com\/ryanuber\/columnize\"\n)\n\ntype Haproxy struct {\n\tUi cli.Ui\n\tCmd string\n\n\tzone string\n}\n\nfunc (this *Haproxy) Run(args []string) (exitCode int) {\n\tvar topMode bool\n\tcmdFlags := flag.NewFlagSet(\"haproxy\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { this.Ui.Output(this.Help()) }\n\tcmdFlags.StringVar(&this.zone, \"z\", ctx.DefaultZone(), \"\")\n\tcmdFlags.BoolVar(&topMode, \"top\", true, \"\")\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\tzone := ctx.Zone(this.zone)\n\tif topMode {\n\t\theader, _ := this.getStats(zone.HaProxyStatsUri[0])\n\t\tt := top.New(header, \"%8s %4s %21s %21s %9s %6s %8s %12s %8s %8s %7s %7s %14s %6s\")\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\trows := make([]string, 0)\n\t\t\t\tfor _, uri := range zone.HaProxyStatsUri {\n\t\t\t\t\t_, r := this.getStats(uri)\n\t\t\t\t\trows = append(rows, r...)\n\t\t\t\t}\n\t\t\t\tt.Refresh(rows)\n\n\t\t\t\ttime.Sleep(time.Second * 3)\n\t\t\t}\n\t\t}()\n\t\tif err := t.Start(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\tfor _, uri := range zone.HaProxyStatsUri {\n\t\t\tthis.fetchStats(uri)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (*Haproxy) Synopsis() string {\n\treturn \"Query ehaproxy cluster for load stats\"\n}\n\nfunc (this *Haproxy) getStats(statsUri string) (header string, rows []string) {\n\tclient := http.Client{Timeout: time.Second * 30}\n\tresp, err := client.Get(statsUri)\n\tswallow(err)\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tswallow(fmt.Errorf(\"fetch[%s] stats got status: %d\", resp.StatusCode))\n\t}\n\n\tvar records map[string]map[string]int64\n\treader := json.NewDecoder(resp.Body)\n\terr = reader.Decode(&records)\n\tswallow(err)\n\n\tu, err := url.Parse(statsUri)\n\tswallow(err)\n\tvar shortHostname string\n\tif strings.Contains(u.Host, \":\") {\n\t\tu.Host = u.Host[:strings.Index(u.Host, \":\")]\n\t}\n\ttuples := strings.SplitN(u.Host, \".\", 4)\n\tif len(tuples) < 4 {\n\t\tshortHostname = u.Host\n\t} else {\n\t\tshortHostname = tuples[3]\n\t}\n\tif len(shortHostname) > 8 {\n\t\tshortHostname = shortHostname[:8]\n\t}\n\n\tsortedSvcs := make([]string, 0)\n\tfor svc, _ := range records {\n\t\tsortedSvcs = append(sortedSvcs, svc)\n\t}\n\tsort.Strings(sortedSvcs)\n\n\tsortedCols := make([]string, 0)\n\tfor k, _ := range records[\"pub\"] {\n\t\tsortedCols = append(sortedCols, k)\n\t}\n\tsort.Strings(sortedCols)\n\n\theader = strings.Join(append([]string{\"host\", \"svc\"}, sortedCols...), \"|\")\n\tfor _, svc := range sortedSvcs {\n\t\tstats := records[svc]\n\n\t\tvar vals = []string{shortHostname, svc}\n\t\tfor _, k := range sortedCols {\n\t\t\tv := stats[k]\n\n\t\t\tvals = append(vals, gofmt.Comma(v))\n\t\t}\n\n\t\trows = append(rows, strings.Join(vals, \"|\"))\n\t}\n\n\treturn\n}\n\nfunc (this *Haproxy) fetchStats(statsUri string) {\n\tclient := http.Client{Timeout: time.Second * 30}\n\tresp, err := client.Get(statsUri)\n\tswallow(err)\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tswallow(fmt.Errorf(\"fetch[%s] stats got status: %d\", resp.StatusCode))\n\t}\n\n\tvar records map[string]map[string]int64\n\treader := json.NewDecoder(resp.Body)\n\terr = reader.Decode(&records)\n\tswallow(err)\n\n\tu, err := url.Parse(statsUri)\n\tswallow(err)\n\tthis.Ui.Info(u.Host)\n\n\tsortedSvcs := make([]string, 0)\n\tfor svc, _ := range records {\n\t\tsortedSvcs = append(sortedSvcs, svc)\n\t}\n\tsort.Strings(sortedSvcs)\n\n\tsortedCols := make([]string, 0)\n\tfor k, _ := range records[\"pub\"] {\n\t\tsortedCols = append(sortedCols, k)\n\t}\n\tsort.Strings(sortedCols)\n\n\tlines := []string{strings.Join(append([]string{\"svc\"}, sortedCols...), \"|\")}\n\tfor _, svc := range sortedSvcs {\n\t\tstats := records[svc]\n\n\t\tvar vals = []string{svc}\n\t\tfor _, k := range sortedCols {\n\t\t\tv := stats[k]\n\n\t\t\tvals = append(vals, gofmt.Comma(v))\n\t\t}\n\n\t\tlines = append(lines, strings.Join(vals, \"|\"))\n\t}\n\n\tthis.Ui.Output(columnize.SimpleFormat(lines))\n}\n\nfunc (this *Haproxy) Help() string {\n\thelp := fmt.Sprintf(`\nUsage: %s haproxy [options]\n\n %s\n\nOptions:\n\n -z zone\n\n -top\n Top mode\n\n`, this.Cmd, this.Synopsis())\n\treturn strings.TrimSpace(help)\n}\n<|endoftext|>"} {"text":"package command\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/funkygao\/gafka\/cmd\/kateway\/api\"\n\t\"github.com\/funkygao\/gafka\/ctx\"\n\t\"github.com\/funkygao\/gafka\/zk\"\n\t\"github.com\/funkygao\/gocli\"\n\t\"github.com\/funkygao\/golib\/color\"\n\t\"github.com\/funkygao\/golib\/gofmt\"\n\t\"github.com\/funkygao\/golib\/pipestream\"\n\tzklib \"github.com\/samuel\/go-zookeeper\/zk\"\n)\n\ntype Kateway struct {\n\tUi cli.Ui\n\tCmd string\n\n\tzone string\n\tid string\n\tconfigMode bool\n\tlogLevel string\n\tconfigOption string\n\tlongFmt bool\n\tresetCounter string\n\tlistClients bool\n\tvisualLog string\n\tcheckup bool\n}\n\nfunc (this *Kateway) Run(args []string) (exitCode int) {\n\tcmdFlags := flag.NewFlagSet(\"kateway\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { this.Ui.Output(this.Help()) }\n\tcmdFlags.StringVar(&this.zone, \"z\", \"\", \"\")\n\tcmdFlags.BoolVar(&this.configMode, \"cf\", false, \"\")\n\tcmdFlags.StringVar(&this.id, \"id\", \"\", \"\")\n\tcmdFlags.BoolVar(&this.longFmt, \"l\", false, \"\")\n\tcmdFlags.StringVar(&this.configOption, \"option\", \"\", \"\")\n\tcmdFlags.StringVar(&this.resetCounter, \"reset\", \"\", \"\")\n\tcmdFlags.BoolVar(&this.listClients, \"clients\", false, \"\")\n\tcmdFlags.StringVar(&this.logLevel, \"loglevel\", \"\", \"\")\n\tcmdFlags.StringVar(&this.visualLog, \"visualog\", \"\", \"\")\n\tcmdFlags.BoolVar(&this.checkup, \"checkup\", false, \"\")\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 2\n\t}\n\n\tif this.visualLog != \"\" {\n\t\tthis.doVisualize()\n\t\treturn\n\t}\n\n\tif this.configMode {\n\t\tif validateArgs(this, this.Ui).\n\t\t\trequire(\"-z\").\n\t\t\trequireAdminRights(\"-z\").\n\t\t\tinvalid(args) {\n\t\t\treturn 2\n\t\t}\n\n\t\tzkzone := zk.NewZkZone(zk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone)))\n\t\tif this.logLevel != \"\" {\n\t\t\tif this.id != \"\" {\n\t\t\t\tkw := zkzone.KatewayInfoById(this.id)\n\t\t\t\tif kw == nil {\n\t\t\t\t\tpanic(fmt.Sprintf(\"kateway %s invalid entry found in zk\", this.id))\n\t\t\t\t}\n\n\t\t\t\tthis.callKateway(kw, \"PUT\", fmt.Sprintf(\"log\/%s\", this.logLevel))\n\t\t\t} else {\n\t\t\t\t\/\/ apply on all kateways\n\t\t\t\tkws, _ := zkzone.KatewayInfos()\n\t\t\t\tfor _, kw := range kws {\n\t\t\t\t\tthis.callKateway(kw, \"PUT\", fmt.Sprintf(\"log\/%s\", this.logLevel))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif this.resetCounter != \"\" {\n\t\t\tif this.id != \"\" {\n\t\t\t\tkw := zkzone.KatewayInfoById(this.id)\n\t\t\t\tif kw == nil {\n\t\t\t\t\tpanic(fmt.Sprintf(\"kateway %d invalid entry found in zk\", this.id))\n\t\t\t\t}\n\n\t\t\t\tthis.callKateway(kw, \"DELETE\", fmt.Sprintf(\"counter\/%s\", this.resetCounter))\n\t\t\t} else {\n\t\t\t\t\/\/ apply on all kateways\n\t\t\t\tkws, _ := zkzone.KatewayInfos()\n\t\t\t\tfor _, kw := range kws {\n\t\t\t\t\tthis.callKateway(kw, \"DELETE\", fmt.Sprintf(\"counter\/%s\", this.resetCounter))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif this.configOption != \"\" {\n\t\t\tparts := strings.SplitN(this.configOption, \"=\", 2)\n\t\t\tk, v := parts[0], parts[1]\n\t\t\tif this.id != \"\" {\n\t\t\t\tkw := zkzone.KatewayInfoById(this.id)\n\t\t\t\tif kw == nil {\n\t\t\t\t\tpanic(fmt.Sprintf(\"kateway %d invalid entry found in zk\", this.id))\n\t\t\t\t}\n\n\t\t\t\tthis.callKateway(kw, \"PUT\", fmt.Sprintf(\"options\/%s\/%s\", k, v))\n\t\t\t} else {\n\t\t\t\t\/\/ apply on all kateways\n\t\t\t\tkws, _ := zkzone.KatewayInfos()\n\t\t\t\tfor _, kw := range kws {\n\t\t\t\t\tthis.callKateway(kw, \"PUT\", fmt.Sprintf(\"options\/%s\/%s\", k, v))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn\n\t}\n\n\tif this.checkup {\n\t\tif validateArgs(this, this.Ui).\n\t\t\trequire(\"-z\").\n\t\t\trequireAdminRights(\"-z\").\n\t\t\tinvalid(args) {\n\t\t\treturn 2\n\t\t}\n\n\t\tzkzone := zk.NewZkZone(zk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone)))\n\t\tthis.runCheckup(zkzone)\n\t\treturn\n\t}\n\n\t\/\/ display mode\n\tforSortedZones(func(zkzone *zk.ZkZone) {\n\t\tif this.zone != \"\" && zkzone.Name() != this.zone {\n\t\t\treturn\n\t\t}\n\n\t\tmysqlDsn, err := zkzone.KatewayMysqlDsn()\n\t\tif err != nil {\n\t\t\tthis.Ui.Error(err.Error())\n\t\t\tthis.Ui.Warn(fmt.Sprintf(\"kateway[%s] mysql DSN not set on zk yet\", this.zone))\n\t\t\tthis.Ui.Output(\"e,g.\")\n\t\t\tthis.Ui.Output(fmt.Sprintf(\"%s pubsub:pubsub@tcp(10.77.135.217:10010)\/pubsub?charset=utf8&timeout=10s\",\n\t\t\t\tzk.KatewayMysqlPath))\n\t\t\treturn\n\t\t}\n\t\tthis.Ui.Output(fmt.Sprintf(\"zone[%s] manager db: %s\", color.Blue(zkzone.Name()), mysqlDsn))\n\n\t\tkateways, err := zkzone.KatewayInfos()\n\t\tif err != nil {\n\t\t\tif err == zklib.ErrNoNode {\n\t\t\t\tthis.Ui.Output(\"no kateway running\")\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tswallow(err)\n\t\t\t}\n\t\t}\n\n\t\tfor _, kw := range kateways {\n\t\t\tif this.id != \"\" && this.id != kw.Id {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tthis.Ui.Info(fmt.Sprintf(\"id:%-2s host:%s cpu:%-2s up:%s\",\n\t\t\t\tkw.Id, kw.Host, kw.Cpu,\n\t\t\t\tgofmt.PrettySince(kw.Ctime)))\n\t\t\tthis.Ui.Output(fmt.Sprintf(\" ver: %s\\n build: %s\\n built: %s\\n log: %s\\n pub: %s\\n sub: %s\\n man: %s\\n dbg: %s\",\n\t\t\t\tkw.Ver,\n\t\t\t\tkw.Build,\n\t\t\t\tkw.BuiltAt,\n\t\t\t\tthis.getKatewayLogLevel(kw.ManAddr),\n\t\t\t\tkw.PubAddr,\n\t\t\t\tkw.SubAddr,\n\t\t\t\tkw.ManAddr,\n\t\t\t\tkw.DebugAddr,\n\t\t\t))\n\n\t\t\tif this.longFmt {\n\t\t\t\tthis.Ui.Output(\" full status:\")\n\t\t\t\tthis.Ui.Output(this.getKatewayStatus(kw.ManAddr))\n\t\t\t}\n\n\t\t\tif this.listClients {\n\t\t\t\tclients := this.getClientsInfo(kw.ManAddr)\n\t\t\t\tthis.Ui.Output(\" pub clients:\")\n\t\t\t\tpubClients := clients[\"pub\"]\n\t\t\t\tsort.Strings(pubClients)\n\t\t\t\tfor _, client := range pubClients {\n\t\t\t\t\t\/\/ pub client in blue\n\t\t\t\t\tthis.Ui.Output(color.Blue(\" %s\", client))\n\t\t\t\t}\n\n\t\t\t\tthis.Ui.Output(\" sub clients:\")\n\t\t\t\tsubClients := clients[\"sub\"]\n\t\t\t\tsort.Strings(subClients)\n\t\t\t\tfor _, client := range subClients {\n\t\t\t\t\t\/\/ sub client in\n\t\t\t\t\tthis.Ui.Output(color.Yellow(\" %s\", client))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t})\n\n\treturn\n}\n\nfunc (this *Kateway) getClientsInfo(url string) map[string][]string {\n\turl = fmt.Sprintf(\"http:\/\/%s\/clients\", url)\n\tbody, err := this.callHttp(url, \"GET\")\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tvar v map[string][]string\n\tjson.Unmarshal(body, &v)\n\treturn v\n}\n\nfunc (this Kateway) getKatewayStatus(url string) string {\n\turl = fmt.Sprintf(\"http:\/\/%s\/status\", url)\n\tbody, err := this.callHttp(url, \"GET\")\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\treturn string(body)\n}\n\nfunc (this *Kateway) getKatewayLogLevel(url string) string {\n\turl = fmt.Sprintf(\"http:\/\/%s\/status\", url)\n\tbody, err := this.callHttp(url, \"GET\")\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\tvar v map[string]interface{}\n\tjson.Unmarshal(body, &v)\n\treturn v[\"loglevel\"].(string)\n}\n\nfunc (this *Kateway) callHttp(url string, method string) (body []byte, err error) {\n\tvar req *http.Request\n\treq, err = http.NewRequest(method, url, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar response *http.Response\n\ttimeout := time.Second * 10\n\tclient := &http.Client{\n\t\tTimeout: timeout,\n\t\tTransport: &http.Transport{\n\t\t\tMaxIdleConnsPerHost: 1,\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout: timeout,\n\t\t\t}).Dial,\n\t\t\tDisableKeepAlives: true,\n\t\t\tResponseHeaderTimeout: timeout,\n\t\t\tTLSHandshakeTimeout: timeout,\n\t\t},\n\t}\n\n\tresponse, err = client.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tbody, err = ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresponse.Body.Close()\n\n\tif response.StatusCode != http.StatusOK {\n\t\tthis.Ui.Error(fmt.Sprintf(\"%s %s %s\", url, response.Status, string(body)))\n\t}\n\n\treturn\n}\n\nfunc (this *Kateway) callKateway(kw *zk.KatewayMeta, method string, uri string) (err error) {\n\turl := fmt.Sprintf(\"http:\/\/%s\/%s\", kw.ManAddr, uri)\n\t_, err = this.callHttp(url, method)\n\treturn\n}\n\nfunc (this *Kateway) runCheckup(zkzone *zk.ZkZone) {\n\tvar (\n\t\tmyApp string\n\t\thisApp string\n\t\tsecret string\n\t\tver string = \"v1\"\n\t\ttopic string = \"smoketestonly\"\n\t)\n\tswitch this.zone {\n\tcase \"sit\":\n\t\tmyApp = \"35\"\n\t\thisApp = \"35\"\n\t\tsecret = \"04dd44d8dad048e6a18ffd153eb8f642\"\n\n\tcase \"prod\":\n\t\tmyApp = \"30\"\n\t\thisApp = \"30\"\n\t\tsecret = \"32f02594f55743eeb1efcf75db6dd8a0\"\n\t}\n\n\trand.Seed(time.Now().UTC().UnixNano())\n\n\tkws, err := zkzone.KatewayInfos()\n\tswallow(err)\n\tfor _, kw := range kws {\n\t\tif this.id != \"\" && kw.Id != this.id {\n\t\t\tcontinue\n\t\t}\n\n\t\tcf := api.DefaultConfig()\n\t\tcf.AppId = myApp\n\t\tcf.Debug = false\n\t\tcf.Secret = secret\n\t\tcli := api.NewClient(myApp, cf)\n\t\tcli.Connect(fmt.Sprintf(\"http:\/\/%s\", kw.PubAddr))\n\t\tmsgId := rand.Int()\n\t\tmsg := fmt.Sprintf(\"smoke %d\", msgId)\n\t\tthis.Ui.Output(fmt.Sprintf(\"Pub: %s\", msg))\n\t\terr := cli.Publish(topic, ver, \"\", []byte(msg))\n\t\tswallow(err)\n\n\t\tcli.Connect(fmt.Sprintf(\"http:\/\/%s\", kw.SubAddr))\n\t\tcli.Subscribe(hisApp, topic, ver, \"__smoketestonly__\", func(statusCode int, msg []byte) error {\n\t\t\tif statusCode == http.StatusNoContent {\n\t\t\t\tthis.Ui.Output(\"no content, sub again\")\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tthis.Ui.Output(fmt.Sprintf(\"Sub: %s, http:%s\", string(msg),\n\t\t\t\thttp.StatusText(statusCode)))\n\n\t\t\treturn api.ErrSubStop\n\t\t})\n\n\t\tthis.Ui.Info(fmt.Sprintf(\"curl -H'Appid: %s' -H'Subkey: %s' -i http:\/\/%s\/status\/%s\/%s\/%s\",\n\t\t\tmyApp, secret, kw.SubAddr, hisApp, topic, ver))\n\n\t\t\/\/ 1. 查询某个pubsub topic的partition数量\n\t\t\/\/ 2. 查看pubsub系统某个topic的生产、消费状态\n\t\t\/\/ 3. pub\n\t\t\/\/ 4. sub\n\t}\n\n}\n\nfunc (this *Kateway) doVisualize() {\n\tcmd := pipestream.New(\"\/usr\/local\/bin\/logstalgia\", \"-f\", this.visualLog)\n\terr := cmd.Open()\n\tswallow(err)\n\tdefer cmd.Close()\n\n\tscanner := bufio.NewScanner(cmd.Reader())\n\tscanner.Split(bufio.ScanLines)\n\tfor scanner.Scan() {\n\t}\n}\n\nfunc (*Kateway) Synopsis() string {\n\treturn \"List\/Config online kateway instances\"\n}\n\nfunc (this *Kateway) Help() string {\n\thelp := fmt.Sprintf(`\nUsage: %s kateway -z zone [options]\n\n List\/Config online kateway instances\n\nOptions:\n\n -checkup\n Checkup for online kateway instances\n\n -visualog access log filename\n Visualize the kateway access log with Logstalgia\n You must install Logstalgia beforehand\n\n -id kateway id\n Execute on a single kateway instance. By default, apply on all\n\n -clients\n List online pub\/sub clients\n\n -l\n Use a long listing format\n \n -cf\n Enter config mode\n\n -reset metrics name\n Reset kateway metric counter by name\n\n -loglevel \n Set kateway log level\n \n -option =\n Set kateway options value\n\n`, this.Cmd)\n\treturn strings.TrimSpace(help)\n}\nfix compile errpackage command\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/funkygao\/gafka\/cmd\/kateway\/api\"\n\t\"github.com\/funkygao\/gafka\/ctx\"\n\t\"github.com\/funkygao\/gafka\/zk\"\n\t\"github.com\/funkygao\/gocli\"\n\t\"github.com\/funkygao\/golib\/color\"\n\t\"github.com\/funkygao\/golib\/gofmt\"\n\t\"github.com\/funkygao\/golib\/pipestream\"\n\tzklib \"github.com\/samuel\/go-zookeeper\/zk\"\n)\n\ntype Kateway struct {\n\tUi cli.Ui\n\tCmd string\n\n\tzone string\n\tid string\n\tconfigMode bool\n\tlogLevel string\n\tconfigOption string\n\tlongFmt bool\n\tresetCounter string\n\tlistClients bool\n\tvisualLog string\n\tcheckup bool\n}\n\nfunc (this *Kateway) Run(args []string) (exitCode int) {\n\tcmdFlags := flag.NewFlagSet(\"kateway\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { this.Ui.Output(this.Help()) }\n\tcmdFlags.StringVar(&this.zone, \"z\", \"\", \"\")\n\tcmdFlags.BoolVar(&this.configMode, \"cf\", false, \"\")\n\tcmdFlags.StringVar(&this.id, \"id\", \"\", \"\")\n\tcmdFlags.BoolVar(&this.longFmt, \"l\", false, \"\")\n\tcmdFlags.StringVar(&this.configOption, \"option\", \"\", \"\")\n\tcmdFlags.StringVar(&this.resetCounter, \"reset\", \"\", \"\")\n\tcmdFlags.BoolVar(&this.listClients, \"clients\", false, \"\")\n\tcmdFlags.StringVar(&this.logLevel, \"loglevel\", \"\", \"\")\n\tcmdFlags.StringVar(&this.visualLog, \"visualog\", \"\", \"\")\n\tcmdFlags.BoolVar(&this.checkup, \"checkup\", false, \"\")\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 2\n\t}\n\n\tif this.visualLog != \"\" {\n\t\tthis.doVisualize()\n\t\treturn\n\t}\n\n\tif this.configMode {\n\t\tif validateArgs(this, this.Ui).\n\t\t\trequire(\"-z\").\n\t\t\trequireAdminRights(\"-z\").\n\t\t\tinvalid(args) {\n\t\t\treturn 2\n\t\t}\n\n\t\tzkzone := zk.NewZkZone(zk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone)))\n\t\tif this.logLevel != \"\" {\n\t\t\tif this.id != \"\" {\n\t\t\t\tkw := zkzone.KatewayInfoById(this.id)\n\t\t\t\tif kw == nil {\n\t\t\t\t\tpanic(fmt.Sprintf(\"kateway %s invalid entry found in zk\", this.id))\n\t\t\t\t}\n\n\t\t\t\tthis.callKateway(kw, \"PUT\", fmt.Sprintf(\"log\/%s\", this.logLevel))\n\t\t\t} else {\n\t\t\t\t\/\/ apply on all kateways\n\t\t\t\tkws, _ := zkzone.KatewayInfos()\n\t\t\t\tfor _, kw := range kws {\n\t\t\t\t\tthis.callKateway(kw, \"PUT\", fmt.Sprintf(\"log\/%s\", this.logLevel))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif this.resetCounter != \"\" {\n\t\t\tif this.id != \"\" {\n\t\t\t\tkw := zkzone.KatewayInfoById(this.id)\n\t\t\t\tif kw == nil {\n\t\t\t\t\tpanic(fmt.Sprintf(\"kateway %d invalid entry found in zk\", this.id))\n\t\t\t\t}\n\n\t\t\t\tthis.callKateway(kw, \"DELETE\", fmt.Sprintf(\"counter\/%s\", this.resetCounter))\n\t\t\t} else {\n\t\t\t\t\/\/ apply on all kateways\n\t\t\t\tkws, _ := zkzone.KatewayInfos()\n\t\t\t\tfor _, kw := range kws {\n\t\t\t\t\tthis.callKateway(kw, \"DELETE\", fmt.Sprintf(\"counter\/%s\", this.resetCounter))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif this.configOption != \"\" {\n\t\t\tparts := strings.SplitN(this.configOption, \"=\", 2)\n\t\t\tk, v := parts[0], parts[1]\n\t\t\tif this.id != \"\" {\n\t\t\t\tkw := zkzone.KatewayInfoById(this.id)\n\t\t\t\tif kw == nil {\n\t\t\t\t\tpanic(fmt.Sprintf(\"kateway %d invalid entry found in zk\", this.id))\n\t\t\t\t}\n\n\t\t\t\tthis.callKateway(kw, \"PUT\", fmt.Sprintf(\"options\/%s\/%s\", k, v))\n\t\t\t} else {\n\t\t\t\t\/\/ apply on all kateways\n\t\t\t\tkws, _ := zkzone.KatewayInfos()\n\t\t\t\tfor _, kw := range kws {\n\t\t\t\t\tthis.callKateway(kw, \"PUT\", fmt.Sprintf(\"options\/%s\/%s\", k, v))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn\n\t}\n\n\tif this.checkup {\n\t\tif validateArgs(this, this.Ui).\n\t\t\trequire(\"-z\").\n\t\t\trequireAdminRights(\"-z\").\n\t\t\tinvalid(args) {\n\t\t\treturn 2\n\t\t}\n\n\t\tzkzone := zk.NewZkZone(zk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone)))\n\t\tthis.runCheckup(zkzone)\n\t\treturn\n\t}\n\n\t\/\/ display mode\n\tforSortedZones(func(zkzone *zk.ZkZone) {\n\t\tif this.zone != \"\" && zkzone.Name() != this.zone {\n\t\t\treturn\n\t\t}\n\n\t\tmysqlDsn, err := zkzone.KatewayMysqlDsn()\n\t\tif err != nil {\n\t\t\tthis.Ui.Error(err.Error())\n\t\t\tthis.Ui.Warn(fmt.Sprintf(\"kateway[%s] mysql DSN not set on zk yet\", this.zone))\n\t\t\tthis.Ui.Output(\"e,g.\")\n\t\t\tthis.Ui.Output(fmt.Sprintf(\"%s pubsub:pubsub@tcp(10.77.135.217:10010)\/pubsub?charset=utf8&timeout=10s\",\n\t\t\t\tzk.KatewayMysqlPath))\n\t\t\treturn\n\t\t}\n\t\tthis.Ui.Output(fmt.Sprintf(\"zone[%s] manager db: %s\", color.Blue(zkzone.Name()), mysqlDsn))\n\n\t\tkateways, err := zkzone.KatewayInfos()\n\t\tif err != nil {\n\t\t\tif err == zklib.ErrNoNode {\n\t\t\t\tthis.Ui.Output(\"no kateway running\")\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tswallow(err)\n\t\t\t}\n\t\t}\n\n\t\tfor _, kw := range kateways {\n\t\t\tif this.id != \"\" && this.id != kw.Id {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tthis.Ui.Info(fmt.Sprintf(\"id:%-2s host:%s cpu:%-2s up:%s\",\n\t\t\t\tkw.Id, kw.Host, kw.Cpu,\n\t\t\t\tgofmt.PrettySince(kw.Ctime)))\n\t\t\tthis.Ui.Output(fmt.Sprintf(\" ver: %s\\n build: %s\\n built: %s\\n log: %s\\n pub: %s\\n sub: %s\\n man: %s\\n dbg: %s\",\n\t\t\t\tkw.Ver,\n\t\t\t\tkw.Build,\n\t\t\t\tkw.BuiltAt,\n\t\t\t\tthis.getKatewayLogLevel(kw.ManAddr),\n\t\t\t\tkw.PubAddr,\n\t\t\t\tkw.SubAddr,\n\t\t\t\tkw.ManAddr,\n\t\t\t\tkw.DebugAddr,\n\t\t\t))\n\n\t\t\tif this.longFmt {\n\t\t\t\tthis.Ui.Output(\" full status:\")\n\t\t\t\tthis.Ui.Output(this.getKatewayStatus(kw.ManAddr))\n\t\t\t}\n\n\t\t\tif this.listClients {\n\t\t\t\tclients := this.getClientsInfo(kw.ManAddr)\n\t\t\t\tthis.Ui.Output(\" pub clients:\")\n\t\t\t\tpubClients := clients[\"pub\"]\n\t\t\t\tsort.Strings(pubClients)\n\t\t\t\tfor _, client := range pubClients {\n\t\t\t\t\t\/\/ pub client in blue\n\t\t\t\t\tthis.Ui.Output(color.Blue(\" %s\", client))\n\t\t\t\t}\n\n\t\t\t\tthis.Ui.Output(\" sub clients:\")\n\t\t\t\tsubClients := clients[\"sub\"]\n\t\t\t\tsort.Strings(subClients)\n\t\t\t\tfor _, client := range subClients {\n\t\t\t\t\t\/\/ sub client in\n\t\t\t\t\tthis.Ui.Output(color.Yellow(\" %s\", client))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t})\n\n\treturn\n}\n\nfunc (this *Kateway) getClientsInfo(url string) map[string][]string {\n\turl = fmt.Sprintf(\"http:\/\/%s\/clients\", url)\n\tbody, err := this.callHttp(url, \"GET\")\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tvar v map[string][]string\n\tjson.Unmarshal(body, &v)\n\treturn v\n}\n\nfunc (this Kateway) getKatewayStatus(url string) string {\n\turl = fmt.Sprintf(\"http:\/\/%s\/status\", url)\n\tbody, err := this.callHttp(url, \"GET\")\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\treturn string(body)\n}\n\nfunc (this *Kateway) getKatewayLogLevel(url string) string {\n\turl = fmt.Sprintf(\"http:\/\/%s\/status\", url)\n\tbody, err := this.callHttp(url, \"GET\")\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\tvar v map[string]interface{}\n\tjson.Unmarshal(body, &v)\n\treturn v[\"loglevel\"].(string)\n}\n\nfunc (this *Kateway) callHttp(url string, method string) (body []byte, err error) {\n\tvar req *http.Request\n\treq, err = http.NewRequest(method, url, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar response *http.Response\n\ttimeout := time.Second * 10\n\tclient := &http.Client{\n\t\tTimeout: timeout,\n\t\tTransport: &http.Transport{\n\t\t\tMaxIdleConnsPerHost: 1,\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout: timeout,\n\t\t\t}).Dial,\n\t\t\tDisableKeepAlives: true,\n\t\t\tResponseHeaderTimeout: timeout,\n\t\t\tTLSHandshakeTimeout: timeout,\n\t\t},\n\t}\n\n\tresponse, err = client.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tbody, err = ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresponse.Body.Close()\n\n\tif response.StatusCode != http.StatusOK {\n\t\tthis.Ui.Error(fmt.Sprintf(\"%s %s %s\", url, response.Status, string(body)))\n\t}\n\n\treturn\n}\n\nfunc (this *Kateway) callKateway(kw *zk.KatewayMeta, method string, uri string) (err error) {\n\turl := fmt.Sprintf(\"http:\/\/%s\/%s\", kw.ManAddr, uri)\n\t_, err = this.callHttp(url, method)\n\treturn\n}\n\nfunc (this *Kateway) runCheckup(zkzone *zk.ZkZone) {\n\tvar (\n\t\tmyApp string\n\t\thisApp string\n\t\tsecret string\n\t\tver string = \"v1\"\n\t\ttopic string = \"smoketestonly\"\n\t)\n\tswitch this.zone {\n\tcase \"sit\":\n\t\tmyApp = \"35\"\n\t\thisApp = \"35\"\n\t\tsecret = \"04dd44d8dad048e6a18ffd153eb8f642\"\n\n\tcase \"prod\":\n\t\tmyApp = \"30\"\n\t\thisApp = \"30\"\n\t\tsecret = \"32f02594f55743eeb1efcf75db6dd8a0\"\n\t}\n\n\trand.Seed(time.Now().UTC().UnixNano())\n\n\tkws, err := zkzone.KatewayInfos()\n\tswallow(err)\n\tfor _, kw := range kws {\n\t\tif this.id != \"\" && kw.Id != this.id {\n\t\t\tcontinue\n\t\t}\n\n\t\tcf := api.DefaultConfig()\n\t\tcf.AppId = myApp\n\t\tcf.Debug = false\n\t\tcf.Secret = secret\n\t\tcf.PubEndpoint = fmt.Sprintf(\"http:\/\/%s\", kw.PubAddr)\n\t\tcf.SubEndpoint = fmt.Sprintf(\"http:\/\/%s\", kw.SubAddr)\n\t\tcli := api.NewClient(cf)\n\t\tmsgId := rand.Int()\n\t\tmsg := fmt.Sprintf(\"smoke %d\", msgId)\n\t\tthis.Ui.Output(fmt.Sprintf(\"Pub: %s\", msg))\n\t\terr := cli.Pub(topic, ver, \"\", []byte(msg))\n\t\tswallow(err)\n\n\t\tcli.Sub(hisApp, topic, ver, \"__smoketestonly__\", func(statusCode int, msg []byte) error {\n\t\t\tif statusCode == http.StatusNoContent {\n\t\t\t\tthis.Ui.Output(\"no content, sub again\")\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tthis.Ui.Output(fmt.Sprintf(\"Sub: %s, http:%s\", string(msg),\n\t\t\t\thttp.StatusText(statusCode)))\n\n\t\t\treturn api.ErrSubStop\n\t\t})\n\n\t\tthis.Ui.Info(fmt.Sprintf(\"curl -H'Appid: %s' -H'Subkey: %s' -i http:\/\/%s\/status\/%s\/%s\/%s\",\n\t\t\tmyApp, secret, kw.SubAddr, hisApp, topic, ver))\n\n\t\t\/\/ 1. 查询某个pubsub topic的partition数量\n\t\t\/\/ 2. 查看pubsub系统某个topic的生产、消费状态\n\t\t\/\/ 3. pub\n\t\t\/\/ 4. sub\n\t}\n\n}\n\nfunc (this *Kateway) doVisualize() {\n\tcmd := pipestream.New(\"\/usr\/local\/bin\/logstalgia\", \"-f\", this.visualLog)\n\terr := cmd.Open()\n\tswallow(err)\n\tdefer cmd.Close()\n\n\tscanner := bufio.NewScanner(cmd.Reader())\n\tscanner.Split(bufio.ScanLines)\n\tfor scanner.Scan() {\n\t}\n}\n\nfunc (*Kateway) Synopsis() string {\n\treturn \"List\/Config online kateway instances\"\n}\n\nfunc (this *Kateway) Help() string {\n\thelp := fmt.Sprintf(`\nUsage: %s kateway -z zone [options]\n\n List\/Config online kateway instances\n\nOptions:\n\n -checkup\n Checkup for online kateway instances\n\n -visualog access log filename\n Visualize the kateway access log with Logstalgia\n You must install Logstalgia beforehand\n\n -id kateway id\n Execute on a single kateway instance. By default, apply on all\n\n -clients\n List online pub\/sub clients\n\n -l\n Use a long listing format\n \n -cf\n Enter config mode\n\n -reset metrics name\n Reset kateway metric counter by name\n\n -loglevel \n Set kateway log level\n \n -option =\n Set kateway options value\n\n`, this.Cmd)\n\treturn strings.TrimSpace(help)\n}\n<|endoftext|>"} {"text":"\/\/ revalidate-bc validates the entire blockchain for a provided\n\/\/ database or target.\npackage main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/btcsuite\/btcd\/btcec\"\n\t_ \"github.com\/lib\/pq\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"chain\/cos\"\n\t\"chain\/cos\/bc\"\n\t\"chain\/cos\/memstore\"\n\t\"chain\/database\/pg\"\n\t\"chain\/database\/sql\"\n\t\"chain\/env\"\n)\n\nconst (\n\tbatchBlockCount = 50\n)\n\nconst help = `\nUsage:\n\n\trevalidate-bc [-t target] [-d url]\n\nCommand revalidate-bc revalidates the entire blockchain of a\ndatabase or target.\n\nEither the database or the target flag must be specified,\nbut not both.\n`\n\nvar (\n\tflagD = flag.String(\"d\", \"\", \"database\")\n\tflagT = flag.String(\"t\", \"\", \"target\")\n\tflagH = flag.Bool(\"h\", false, \"show help\")\n)\n\nfunc fatalf(format string, args ...interface{}) {\n\tfmt.Fprintf(os.Stderr, format, args...)\n\tos.Exit(1)\n}\n\nfunc main() {\n\tenv.Parse()\n\tlog.SetPrefix(\"appenv: \")\n\tlog.SetFlags(0)\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"usage: %s [-t target] [-d url]\\n\", os.Args[0])\n\t}\n\tflag.Parse()\n\tif *flagH || (*flagT == \"\") == (*flagD == \"\") {\n\t\tfmt.Println(strings.TrimSpace(help))\n\t\tfmt.Print(\"\\nFlags:\\n\\n\")\n\t\tflag.PrintDefaults()\n\t\treturn\n\t}\n\n\tvar dbURL string\n\tif *flagD != \"\" {\n\t\tdbURL = *flagD\n\t}\n\tif *flagT != \"\" {\n\t\tvar err error\n\t\tdbURL, err = getTargetDBURL(*flagT)\n\t\tif err != nil {\n\t\t}\n\t}\n\n\t\/\/ Create a database connection.\n\tdb, err := sql.Open(\"postgres\", dbURL)\n\tif err != nil {\n\t\tfatalf(\"unable to get target DB_URL: %v\\n\", err)\n\t}\n\tdefer db.Close()\n\n\tblocksValidated, err := RevalidateBlockchain(db)\n\tif err != nil {\n\t\tfatalf(\"error validating blockchain: %s\\n\", err)\n\t}\n\tfmt.Printf(\"Success: validated %d blocks\\n\", blocksValidated)\n}\n\nfunc RevalidateBlockchain(db *sql.DB) (blocksValidated uint64, err error) {\n\tdbCtx, cancel := context.WithCancel(pg.NewContext(context.Background(), db))\n\tblocks := streamBlocks(dbCtx)\n\n\t\/\/ Setup an FC backed with a memstore.\n\t\/\/ TODO(jackson): Don't keep everything in memory so that we can validate\n\t\/\/ larger blockchains in the future.\n\tctx := context.Background()\n\tfc, err := cos.NewFC(ctx, memstore.New(), []*btcec.PublicKey{}, nil)\n\tif err != nil {\n\t\tfatalf(\"unable to construct FC: %s\\n\", err)\n\t}\n\n\tfor b := range blocks {\n\t\terr = fc.AddBlock(ctx, b)\n\t\tif err != nil {\n\t\t\tcancel()\n\t\t\treturn blocksValidated, fmt.Errorf(\"block %s, height %d: %s\", b.Hash(), b.Height, err)\n\t\t}\n\t\tblocksValidated++\n\t}\n\treturn blocksValidated, nil\n}\n\nfunc streamBlocks(ctx context.Context) <-chan *bc.Block {\n\tconst q = `\n\t\tSELECT data FROM blocks WHERE height>=$1::bigint\n\t\tORDER BY height ASC LIMIT $2\n\t`\n\n\tch := make(chan *bc.Block, batchBlockCount)\n\tgo func() {\n\t\tdefer close(ch)\n\t\tvar next uint64 = 0\n\t\tfor {\n\t\t\t\/\/ Get a new page of blocks and send them out over the channel.\n\t\t\tvar batch []*bc.Block\n\t\t\terr := pg.ForQueryRows(ctx, q, next, batchBlockCount, func(b bc.Block) {\n\t\t\t\tbatch = append(batch, &b)\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tfatalf(\"error listing blocks from db: %s\\n\", err)\n\t\t\t}\n\n\t\t\tfor _, b := range batch {\n\t\t\t\tselect {\n\t\t\t\tcase ch <- b:\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Check for an incomplete page, signalling current end of\n\t\t\t\/\/ the blockchain.\n\t\t\tif len(batch) != batchBlockCount {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ Set the starting block height for the next iteration.\n\t\t\tnext = batch[len(batch)-1].Height + 1\n\t\t}\n\t}()\n\treturn ch\n}\n\nfunc getTargetDBURL(target string) (string, error) {\n\tout, err := exec.Command(\"appenv\", \"-t\", target, \"DB_URL\").CombinedOutput()\n\tif err != nil {\n\t\treturn \"\", errors.New(string(out))\n\t}\n\treturn strings.TrimSpace(string(out)), nil\n}\ncmd\/revalidate-bc: look up db host\/\/ revalidate-bc validates the entire blockchain for a provided\n\/\/ database or target.\npackage main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/btcsuite\/btcd\/btcec\"\n\t_ \"github.com\/lib\/pq\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"chain\/cos\"\n\t\"chain\/cos\/bc\"\n\t\"chain\/cos\/memstore\"\n\t\"chain\/database\/pg\"\n\t\"chain\/database\/sql\"\n\t\"chain\/env\"\n)\n\nconst (\n\tbatchBlockCount = 50\n)\n\nconst help = `\nUsage:\n\n\trevalidate-bc [-t target] [-d url]\n\nCommand revalidate-bc revalidates the entire blockchain of a\ndatabase or target.\n\nEither the database or the target flag must be specified,\nbut not both.\n`\n\nvar (\n\tflagD = flag.String(\"d\", \"\", \"database\")\n\tflagT = flag.String(\"t\", \"\", \"target\")\n\tflagH = flag.Bool(\"h\", false, \"show help\")\n)\n\nfunc fatalf(format string, args ...interface{}) {\n\tfmt.Fprintf(os.Stderr, format, args...)\n\tos.Exit(1)\n}\n\nfunc main() {\n\tenv.Parse()\n\tlog.SetPrefix(\"appenv: \")\n\tlog.SetFlags(0)\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"usage: %s [-t target] [-d url]\\n\", os.Args[0])\n\t}\n\tflag.Parse()\n\tif *flagH || (*flagT == \"\") == (*flagD == \"\") {\n\t\tfmt.Println(strings.TrimSpace(help))\n\t\tfmt.Print(\"\\nFlags:\\n\\n\")\n\t\tflag.PrintDefaults()\n\t\treturn\n\t}\n\n\tvar dbURL string\n\tif *flagD != \"\" {\n\t\tdbURL = *flagD\n\t}\n\tif *flagT != \"\" {\n\t\tvar err error\n\t\tdbURL, err = getTargetDBURL(*flagT)\n\t\tif err != nil {\n\t\t}\n\t}\n\n\t\/\/ Create a database connection.\n\tsql.Register(\"schemadb\", pg.SchemaDriver(\"revalidate-bc\"))\n\tdb, err := sql.Open(\"schemadb\", dbURL)\n\tif err != nil {\n\t\tfatalf(\"unable to get target DB_URL: %v\\n\", err)\n\t}\n\tdefer db.Close()\n\n\tblocksValidated, err := RevalidateBlockchain(db)\n\tif err != nil {\n\t\tfatalf(\"error validating blockchain: %s\\n\", err)\n\t}\n\tfmt.Printf(\"Success: validated %d blocks\\n\", blocksValidated)\n}\n\nfunc RevalidateBlockchain(db *sql.DB) (blocksValidated uint64, err error) {\n\tdbCtx, cancel := context.WithCancel(pg.NewContext(context.Background(), db))\n\tblocks := streamBlocks(dbCtx)\n\n\t\/\/ Setup an FC backed with a memstore.\n\t\/\/ TODO(jackson): Don't keep everything in memory so that we can validate\n\t\/\/ larger blockchains in the future.\n\tctx := context.Background()\n\tfc, err := cos.NewFC(ctx, memstore.New(), []*btcec.PublicKey{}, nil)\n\tif err != nil {\n\t\tfatalf(\"unable to construct FC: %s\\n\", err)\n\t}\n\n\tfor b := range blocks {\n\t\terr = fc.AddBlock(ctx, b)\n\t\tif err != nil {\n\t\t\tcancel()\n\t\t\treturn blocksValidated, fmt.Errorf(\"block %s, height %d: %s\", b.Hash(), b.Height, err)\n\t\t}\n\t\tblocksValidated++\n\t}\n\treturn blocksValidated, nil\n}\n\nfunc streamBlocks(ctx context.Context) <-chan *bc.Block {\n\tconst q = `\n\t\tSELECT data FROM blocks WHERE height>=$1::bigint\n\t\tORDER BY height ASC LIMIT $2\n\t`\n\n\tch := make(chan *bc.Block, batchBlockCount)\n\tgo func() {\n\t\tdefer close(ch)\n\t\tvar next uint64 = 0\n\t\tfor {\n\t\t\t\/\/ Get a new page of blocks and send them out over the channel.\n\t\t\tvar batch []*bc.Block\n\t\t\terr := pg.ForQueryRows(ctx, q, next, batchBlockCount, func(b bc.Block) {\n\t\t\t\tbatch = append(batch, &b)\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tfatalf(\"error listing blocks from db: %s\\n\", err)\n\t\t\t}\n\n\t\t\tfor _, b := range batch {\n\t\t\t\tselect {\n\t\t\t\tcase ch <- b:\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Check for an incomplete page, signalling current end of\n\t\t\t\/\/ the blockchain.\n\t\t\tif len(batch) != batchBlockCount {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ Set the starting block height for the next iteration.\n\t\t\tnext = batch[len(batch)-1].Height + 1\n\t\t}\n\t}()\n\treturn ch\n}\n\nfunc getTargetDBURL(target string) (string, error) {\n\tout, err := exec.Command(\"appenv\", \"-t\", target, \"DB_URL\").CombinedOutput()\n\tif err != nil {\n\t\treturn \"\", errors.New(string(out))\n\t}\n\treturn strings.TrimSpace(string(out)), nil\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"github.com\/thijzert\/speeldoos\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc init_main(args []string) {\n\tif len(args) == 0 {\n\t\tcroak(fmt.Errorf(\"Specify at least one number of parts\"))\n\t}\n\n\tpfsize := make([]int, 0, len(args))\n\ttotal_tracks := 0\n\n\tfor _, i := range args {\n\t\tn, err := strconv.Atoi(i)\n\t\tcroak(err)\n\t\tif n <= 0 {\n\t\t\tcroak(fmt.Errorf(\"Number of parts must be positive\"))\n\t\t}\n\t\tpfsize = append(pfsize, n)\n\t\ttotal_tracks += n\n\t}\n\n\tdiscsize := []int{total_tracks}\n\tif Config.Init.Discs != \"\" {\n\t\tdiscsize = discsize[0:0]\n\t\td_total := 0\n\t\tdds := strings.Split(Config.Init.Discs, \" \")\n\t\tfor _, i := range dds {\n\t\t\tif i == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tn, err := strconv.Atoi(i)\n\t\t\tcroak(err)\n\t\t\tif n <= 0 {\n\t\t\t\tcroak(fmt.Errorf(\"Number of tracks must be positive\"))\n\t\t\t}\n\n\t\t\tdiscsize = append(discsize, n)\n\t\t\td_total += n\n\t\t}\n\n\t\tif d_total != total_tracks {\n\t\t\tcroak(fmt.Errorf(\"Total tracks on all cd's (%d) does not match total number of parts (%d).\", d_total, total_tracks))\n\t\t}\n\t}\n\n\tfoo := &speeldoos.Carrier{}\n\n\tfoo.Name = \"2222\"\n\tfoo.ID = \"2222\"\n\tfoo.Source = \"2222\"\n\tfoo.Performances = make([]speeldoos.Performance, 0, len(args))\n\n\tdisc_index := 0\n\ttrack_counter := 1\n\n\tfor _, n := range pfsize {\n\t\tpf := speeldoos.Performance{\n\t\t\tWork: speeldoos.Work{\n\t\t\t\tComposer: speeldoos.Composer{Name: Config.Init.Composer, ID: strings.Replace(Config.Init.Composer, \" \", \"_\", -1)},\n\t\t\t\tTitle: []speeldoos.Title{speeldoos.Title{\"2222\", \"\"}},\n\t\t\t\tOpusNumber: []speeldoos.OpusNumber{speeldoos.OpusNumber{Number: \"2222\"}},\n\t\t\t\tYear: 2222,\n\t\t\t},\n\t\t\tYear: Config.Init.Year,\n\t\t\tPerformers: []speeldoos.Performer{},\n\t\t\tSourceFiles: make([]speeldoos.SourceFile, n),\n\t\t}\n\n\t\tif Config.Init.Soloist != \"\" {\n\t\t\tpf.Performers = append(pf.Performers, speeldoos.Performer{Name: Config.Init.Soloist, Role: \"soloist\"})\n\t\t}\n\t\tif Config.Init.Orchestra != \"\" {\n\t\t\tpf.Performers = append(pf.Performers, speeldoos.Performer{Name: Config.Init.Orchestra, Role: \"orchestra\"})\n\t\t}\n\t\tif Config.Init.Ensemble != \"\" {\n\t\t\tpf.Performers = append(pf.Performers, speeldoos.Performer{Name: Config.Init.Ensemble, Role: \"ensemble\"})\n\t\t}\n\t\tif Config.Init.Conductor != \"\" {\n\t\t\tpf.Performers = append(pf.Performers, speeldoos.Performer{Name: Config.Init.Conductor, Role: \"conductor\"})\n\t\t}\n\n\t\tif len(pf.Performers) == 0 {\n\t\t\tpf.Performers = append(pf.Performers, speeldoos.Performer{Name: \"2222\", Role: \"2222\"})\n\t\t}\n\n\t\tif n > 1 {\n\t\t\tpf.Work.Parts = make([]string, n)\n\t\t}\n\t\tfor j := 0; j < n; j++ {\n\t\t\tif n > 1 {\n\t\t\t\tpf.Work.Parts[j] = \"2222\"\n\t\t\t}\n\t\t\tif len(discsize) > 1 {\n\t\t\t\tpf.SourceFiles[j] = speeldoos.SourceFile{\n\t\t\t\t\tFilename: path.Join(fmt.Sprintf(Config.Init.DiscFormat, disc_index+1), fmt.Sprintf(Config.Init.TrackFormat, track_counter)),\n\t\t\t\t\tDisc: disc_index + 1,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tpf.SourceFiles[j] = speeldoos.SourceFile{\n\t\t\t\t\tFilename: fmt.Sprintf(Config.Init.TrackFormat, track_counter),\n\t\t\t\t}\n\t\t\t}\n\t\t\ttrack_counter++\n\t\t\tif track_counter > discsize[disc_index] {\n\t\t\t\ttrack_counter = 1\n\t\t\t\tdisc_index++\n\t\t\t}\n\t\t}\n\n\t\tfoo.Performances = append(foo.Performances, pf)\n\t}\n\n\tif Config.Init.OutputFile == \"\" {\n\t\tw := xml.NewEncoder(os.Stdout)\n\t\tw.Indent(\"\", \"\t\")\n\t\tcroak(w.Encode(foo))\n\t} else {\n\t\tcroak(foo.Write(Config.Init.OutputFile))\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"Success. If you saved the output of this script somewhere, use your favorite\\n\"+\n\t\t\"text editor to fill in the missing details. Pro tip: search for '2222' to\\n\"+\n\t\t\"quickly hop between every field that's been left blank.\\n\")\n}\nInit: Prepopulate a sensible Opus Index namepackage main\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"github.com\/thijzert\/speeldoos\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar defaultIndexNames = map[string]string{\n\t\"Carl Philipp Emanuel Bach\": \"Wq\",\n\t\"Dieterich Buxtehude\": \"BuxWV\",\n\t\"Franz Schubert\": \"D\",\n\t\"Georg Philipp Telemann\": \"TWV\",\n\t\"Johann Sebastian Bach\": \"BWV\",\n\t\"Wolfgang Amadeus Mozart\": \"K\",\n}\n\nfunc init_main(args []string) {\n\tif len(args) == 0 {\n\t\tcroak(fmt.Errorf(\"Specify at least one number of parts\"))\n\t}\n\n\tpfsize := make([]int, 0, len(args))\n\ttotal_tracks := 0\n\n\tfor _, i := range args {\n\t\tn, err := strconv.Atoi(i)\n\t\tcroak(err)\n\t\tif n <= 0 {\n\t\t\tcroak(fmt.Errorf(\"Number of parts must be positive\"))\n\t\t}\n\t\tpfsize = append(pfsize, n)\n\t\ttotal_tracks += n\n\t}\n\n\tdiscsize := []int{total_tracks}\n\tif Config.Init.Discs != \"\" {\n\t\tdiscsize = discsize[0:0]\n\t\td_total := 0\n\t\tdds := strings.Split(Config.Init.Discs, \" \")\n\t\tfor _, i := range dds {\n\t\t\tif i == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tn, err := strconv.Atoi(i)\n\t\t\tcroak(err)\n\t\t\tif n <= 0 {\n\t\t\t\tcroak(fmt.Errorf(\"Number of tracks must be positive\"))\n\t\t\t}\n\n\t\t\tdiscsize = append(discsize, n)\n\t\t\td_total += n\n\t\t}\n\n\t\tif d_total != total_tracks {\n\t\t\tcroak(fmt.Errorf(\"Total tracks on all cd's (%d) does not match total number of parts (%d).\", d_total, total_tracks))\n\t\t}\n\t}\n\n\tfoo := &speeldoos.Carrier{}\n\n\tfoo.Name = \"2222\"\n\tfoo.ID = \"2222\"\n\tfoo.Source = \"2222\"\n\tfoo.Performances = make([]speeldoos.Performance, 0, len(args))\n\n\tindexName := defaultIndexNames[Config.Init.Composer]\n\n\tdisc_index := 0\n\ttrack_counter := 1\n\n\tfor _, n := range pfsize {\n\t\tpf := speeldoos.Performance{\n\t\t\tWork: speeldoos.Work{\n\t\t\t\tComposer: speeldoos.Composer{Name: Config.Init.Composer, ID: strings.Replace(Config.Init.Composer, \" \", \"_\", -1)},\n\t\t\t\tTitle: []speeldoos.Title{speeldoos.Title{\"2222\", \"\"}},\n\t\t\t\tOpusNumber: []speeldoos.OpusNumber{speeldoos.OpusNumber{IndexName: indexName, Number: \"2222\"}},\n\t\t\t\tYear: 2222,\n\t\t\t},\n\t\t\tYear: Config.Init.Year,\n\t\t\tPerformers: []speeldoos.Performer{},\n\t\t\tSourceFiles: make([]speeldoos.SourceFile, n),\n\t\t}\n\n\t\tif Config.Init.Soloist != \"\" {\n\t\t\tpf.Performers = append(pf.Performers, speeldoos.Performer{Name: Config.Init.Soloist, Role: \"soloist\"})\n\t\t}\n\t\tif Config.Init.Orchestra != \"\" {\n\t\t\tpf.Performers = append(pf.Performers, speeldoos.Performer{Name: Config.Init.Orchestra, Role: \"orchestra\"})\n\t\t}\n\t\tif Config.Init.Ensemble != \"\" {\n\t\t\tpf.Performers = append(pf.Performers, speeldoos.Performer{Name: Config.Init.Ensemble, Role: \"ensemble\"})\n\t\t}\n\t\tif Config.Init.Conductor != \"\" {\n\t\t\tpf.Performers = append(pf.Performers, speeldoos.Performer{Name: Config.Init.Conductor, Role: \"conductor\"})\n\t\t}\n\n\t\tif len(pf.Performers) == 0 {\n\t\t\tpf.Performers = append(pf.Performers, speeldoos.Performer{Name: \"2222\", Role: \"2222\"})\n\t\t}\n\n\t\tif n > 1 {\n\t\t\tpf.Work.Parts = make([]string, n)\n\t\t}\n\t\tfor j := 0; j < n; j++ {\n\t\t\tif n > 1 {\n\t\t\t\tpf.Work.Parts[j] = \"2222\"\n\t\t\t}\n\t\t\tif len(discsize) > 1 {\n\t\t\t\tpf.SourceFiles[j] = speeldoos.SourceFile{\n\t\t\t\t\tFilename: path.Join(fmt.Sprintf(Config.Init.DiscFormat, disc_index+1), fmt.Sprintf(Config.Init.TrackFormat, track_counter)),\n\t\t\t\t\tDisc: disc_index + 1,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tpf.SourceFiles[j] = speeldoos.SourceFile{\n\t\t\t\t\tFilename: fmt.Sprintf(Config.Init.TrackFormat, track_counter),\n\t\t\t\t}\n\t\t\t}\n\t\t\ttrack_counter++\n\t\t\tif track_counter > discsize[disc_index] {\n\t\t\t\ttrack_counter = 1\n\t\t\t\tdisc_index++\n\t\t\t}\n\t\t}\n\n\t\tfoo.Performances = append(foo.Performances, pf)\n\t}\n\n\tif Config.Init.OutputFile == \"\" {\n\t\tw := xml.NewEncoder(os.Stdout)\n\t\tw.Indent(\"\", \"\t\")\n\t\tcroak(w.Encode(foo))\n\t} else {\n\t\tcroak(foo.Write(Config.Init.OutputFile))\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"Success. If you saved the output of this script somewhere, use your favorite\\n\"+\n\t\t\"text editor to fill in the missing details. Pro tip: search for '2222' to\\n\"+\n\t\t\"quickly hop between every field that's been left blank.\\n\")\n}\n<|endoftext|>"} {"text":"package sso\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"reflect\"\n)\n\nvar (\n\t\/\/ TranquilityOAuth root address.\n\tTranquilityOAuth = \"https:\/\/login.eveonline.com\"\n\t\/\/ SingularityOAuth root address.\n\tSingularityOAuth = \"https:\/\/sisilogin.testeveonline.com\"\n\t\/\/ ErrClientID is returned when the client ID is not specified.\n\tErrClientID = errors.New(\"client ID must be set\")\n\t\/\/ ErrClientSecret is returned when the client secret is not specified.\n\tErrClientSecret = errors.New(\"client secret must be set\")\n\t\/\/ ErrCallbackAddress is returned when the callback address is not set.\n\tErrCallbackAddress = errors.New(\"callback address must be set\")\n\t\/\/ ErrBadOAuthAddress is returned when the OAuth address is not equal to\n\t\/\/ TranquilityOAuth or SingularityOAuth. For the sake of the security of users,\n\t\/\/ the client does not support proxies.\n\tErrBadOAuthAddress = errors.New(\"the provided OAuth root address is invalid\")\n\t\/\/ ErrTooManyRequests is returned when EVE SSO responds with HTTP status 409,\n\t\/\/ which more generally means the client has made way too many requests to SSO.\n\t\/\/ If this is ever returned, the client should wait for a few minutes and retry.\n\tErrTooManyRequests = errors.New(\"EVE SSO responded with HTTP status 409 (too many requests)\")\n\t\/\/ ErrParsingResponse is returned when the authorization code exchage\/refresh\n\t\/\/ methods could not parse the JSON response in to the map. Applications\n\t\/\/ should attempt to retry after a few seconds.\n\tErrParsingResponse = errors.New(\"response returned from EVE SSO could not be parsed (do retry)\")\n)\n\n\/\/ Client to EVE Online's Signle Sign-on service.\ntype Client struct {\n\tid string\n\tsecret string\n\toauth string\n\tcallback string\n\thttpClient *http.Client\n}\n\n\/\/ NewClient configures and returns a new client. For bad options, client is\n\/\/ returned as nil with an error.\nfunc NewClient(opts *Options) (client *Client, err error) {\n\tif err = opts.Validate(); err != nil {\n\t\treturn\n\t}\n\tclient = &Client{\n\t\tid: opts.ClientID,\n\t\tsecret: opts.ClientSecret,\n\t\toauth: opts.OAuthRoot,\n\t\tcallback: opts.CallbackAddress,\n\t\thttpClient: new(http.Client),\n\t}\n\treturn\n}\n\n\/\/ Login redirects the client to EVE Online SSO. The state parameter is optional,\n\/\/ however heavily recommened for security purposes. If no scopes are passed,\n\/\/ then only basic authentication is used.\nfunc (client *Client) Login(w http.ResponseWriter, r *http.Request, state string, scopes ...string) {\n\turl := fmt.Sprintf(\"%v\/oauth\/authorize\/?response_type=code&redirect_uri=%v&client_id=%v&state=%v\", client.oauth, client.callback, client.id, state)\n\tif len(scopes) > 0 {\n\t\turl = fmt.Sprintf(\"%v&scope=%v\", url, formatScopes(scopes...))\n\t}\n\thttp.Redirect(w, r, url, http.StatusFound)\n}\n\n\/\/ Exchange the authorization code for a token.\nfunc (client *Client) Callback(code string) (data map[string]interface{}, err error) {\n\turl := fmt.Sprintf(\"%v\/oauth\/token\/?grant_type=authorization_code&code=%v\", client.oauth, code)\n\tvar req *http.Request\n\tif req, err = http.NewRequest(\"POST\", url, nil); err != nil {\n\t\treturn\n\t}\n\treturn client.doRequest(req)\n}\n\n\/\/ Refresh an old token for a new one.\nfunc (client *Client) Refresh(old map[string]interface{}) (new map[string]interface{}, err error) {\n\trefreshTkn, ok := old[\"refresh_token\"].(string)\n\tif !ok {\n\t\terr = fmt.Errorf(\"bad type for old[\\\"refresh_token\\\"] - want string but got %v\", reflect.TypeOf(old[\"refresh_token\"]).String())\n\t\treturn\n\t}\n\turl := fmt.Sprintf(\"%v\/oauth\/token\/?grant_type=refresh_token&refresh_token=%v\", client.oauth, refreshTkn)\n\tvar req *http.Request\n\tif req, err = http.NewRequest(\"POST\", url, nil); err != nil {\n\t\treturn\n\t}\n\treturn client.doRequest(req)\n}\n\nfunc (client *Client) doRequest(req *http.Request) (data map[string]interface{}, err error) {\n\t\/\/ Sweet mother of nested functions...\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Basic %v\", base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(\"%v:%v\", client.id, client.secret)))))\n\tvar resp *http.Response\n\tif resp, err = client.httpClient.Do(req); err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tif resp.StatusCode == http.StatusConflict {\n\t\t\t\/\/ We've made too many requests to SSO.\n\t\t\terr = ErrTooManyRequests\n\t\t\treturn\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"EVE SSO responded with HTTP status %v\", resp.StatusCode)\n\t\t\treturn\n\t\t}\n\t}\n\tvar raw []byte\n\tif raw, err = ioutil.ReadAll(resp.Body); err != nil {\n\t\treturn\n\t}\n\tdata = make(map[string]interface{})\n\tif err = json.Unmarshal(raw, &data); err != nil {\n\t\tdata = nil\n\t\terr = ErrParsingResponse\n\t\treturn\n\t}\n\treturn\n}\n\nfunc formatScopes(scopes ...string) (formated string) {\n\tfor i, s := range scopes {\n\t\tif len(scopes) == i+1 {\n\t\t\t\/\/ Do not append trailing space to last entry.\n\t\t\tformated += s\n\t\t} else {\n\t\t\tformated += s + \" \"\n\t\t}\n\t}\n\treturn\n}\nImplemented Verify method to verify tokenpackage sso\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"reflect\"\n)\n\nvar (\n\t\/\/ TranquilityOAuth root address.\n\tTranquilityOAuth = \"https:\/\/login.eveonline.com\"\n\t\/\/ SingularityOAuth root address.\n\tSingularityOAuth = \"https:\/\/sisilogin.testeveonline.com\"\n\t\/\/ ErrClientID is returned when the client ID is not specified.\n\tErrClientID = errors.New(\"client ID must be set\")\n\t\/\/ ErrClientSecret is returned when the client secret is not specified.\n\tErrClientSecret = errors.New(\"client secret must be set\")\n\t\/\/ ErrCallbackAddress is returned when the callback address is not set.\n\tErrCallbackAddress = errors.New(\"callback address must be set\")\n\t\/\/ ErrBadOAuthAddress is returned when the OAuth address is not equal to\n\t\/\/ TranquilityOAuth or SingularityOAuth. For the sake of the security of users,\n\t\/\/ the client does not support proxies.\n\tErrBadOAuthAddress = errors.New(\"the provided OAuth root address is invalid\")\n\t\/\/ ErrTooManyRequests is returned when EVE SSO responds with HTTP status 409,\n\t\/\/ which more generally means the client has made way too many requests to SSO.\n\t\/\/ If this is ever returned, the client should wait for a few minutes and retry.\n\tErrTooManyRequests = errors.New(\"EVE SSO responded with HTTP status 409 (too many requests)\")\n\t\/\/ ErrParsingResponse is returned when the authorization code exchage\/refresh\n\t\/\/ methods could not parse the JSON response in to the map. Applications\n\t\/\/ should attempt to retry after a few seconds.\n\tErrParsingResponse = errors.New(\"response returned from EVE SSO could not be parsed (do retry)\")\n)\n\n\/\/ Client to EVE Online's Signle Sign-on service.\ntype Client struct {\n\tid string\n\tsecret string\n\toauth string\n\tcallback string\n\thttpClient *http.Client\n}\n\n\/\/ NewClient configures and returns a new client. For bad options, client is\n\/\/ returned as nil with an error.\nfunc NewClient(opts *Options) (client *Client, err error) {\n\tif err = opts.Validate(); err != nil {\n\t\treturn\n\t}\n\tclient = &Client{\n\t\tid: opts.ClientID,\n\t\tsecret: opts.ClientSecret,\n\t\toauth: opts.OAuthRoot,\n\t\tcallback: opts.CallbackAddress,\n\t\thttpClient: new(http.Client),\n\t}\n\treturn\n}\n\n\/\/ Login redirects the client to EVE Online SSO. The state parameter is optional,\n\/\/ however heavily recommened for security purposes. If no scopes are passed,\n\/\/ then only basic authentication is used.\nfunc (client *Client) Login(w http.ResponseWriter, r *http.Request, state string, scopes ...string) {\n\turl := fmt.Sprintf(\"%v\/oauth\/authorize\/?response_type=code&redirect_uri=%v&client_id=%v&state=%v\", client.oauth, client.callback, client.id, state)\n\tif len(scopes) > 0 {\n\t\turl = fmt.Sprintf(\"%v&scope=%v\", url, formatScopes(scopes...))\n\t}\n\thttp.Redirect(w, r, url, http.StatusFound)\n}\n\n\/\/ Exchange the authorization code for a token.\nfunc (client *Client) Callback(code string) (token map[string]interface{}, err error) {\n\turl := fmt.Sprintf(\"%v\/oauth\/token\/?grant_type=authorization_code&code=%v\", client.oauth, code)\n\tvar req *http.Request\n\tif req, err = http.NewRequest(\"POST\", url, nil); err != nil {\n\t\treturn\n\t}\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Basic %v\", base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(\"%v:%v\", client.id, client.secret)))))\n\treturn client.doRequest(req)\n}\n\n\/\/ Refresh an old token for a new one.\nfunc (client *Client) Refresh(old map[string]interface{}) (new map[string]interface{}, err error) {\n\trefreshTkn, ok := old[\"refresh_token\"].(string)\n\tif !ok {\n\t\terr = fmt.Errorf(\"bad type for old[\\\"refresh_token\\\"] - want string but got %v\", reflect.TypeOf(old[\"refresh_token\"]).String())\n\t\treturn\n\t}\n\turl := fmt.Sprintf(\"%v\/oauth\/token\/?grant_type=refresh_token&refresh_token=%v\", client.oauth, refreshTkn)\n\tvar req *http.Request\n\tif req, err = http.NewRequest(\"POST\", url, nil); err != nil {\n\t\treturn\n\t}\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Basic %v\", base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(\"%v:%v\", client.id, client.secret)))))\n\treturn client.doRequest(req)\n}\n\nfunc (client *Client) Verify(token map[string]interface{}) (result map[string]interface{}, err error) {\n\t_, ok := token[\"access_token\"].(string)\n\tif !ok {\n\t\terr = fmt.Errorf(\"bad type for token[\\\"access_token\\\"] - want string but got %v\", reflect.TypeOf(token[\"access_token\"]).String())\n\t\treturn\n\t}\n\turl := fmt.Sprintf(\"%v\/oauth\/verify\", client.oauth)\n\tvar req *http.Request\n\tif req, err = http.NewRequest(\"GET\", url, nil); err != nil {\n\t\treturn\n\t}\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %v\", token[\"access_token\"]))\n\treturn client.doRequest(req)\n}\n\nfunc (client *Client) doRequest(req *http.Request) (data map[string]interface{}, err error) {\n\t\/\/ Sweet mother of nested functions...\n\tvar resp *http.Response\n\tif resp, err = client.httpClient.Do(req); err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tif resp.StatusCode == http.StatusConflict {\n\t\t\t\/\/ We've made too many requests to SSO.\n\t\t\terr = ErrTooManyRequests\n\t\t\treturn\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"EVE SSO responded with HTTP status %v\", resp.StatusCode)\n\t\t\treturn\n\t\t}\n\t}\n\tvar raw []byte\n\tif raw, err = ioutil.ReadAll(resp.Body); err != nil {\n\t\treturn\n\t}\n\tdata = make(map[string]interface{})\n\tif err = json.Unmarshal(raw, &data); err != nil {\n\t\tdata = nil\n\t\terr = ErrParsingResponse\n\t\treturn\n\t}\n\treturn\n}\n\nfunc formatScopes(scopes ...string) (formated string) {\n\tfor i, s := range scopes {\n\t\tif len(scopes) == i+1 {\n\t\t\t\/\/ Do not append trailing space to last entry.\n\t\t\tformated += s\n\t\t} else {\n\t\t\tformated += s + \" \"\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2014-2015 The project AUTHORS. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\n\tPackage src provides a set of structures for representing a project with its\n\trelated source code independently of the language. In other words, it\n\tprovides a generic representation (abstraction) of a source code.\n\n\n\tGoal\n\n\tThe goal of this package is to provide a generic representation of a project\n\tthat can be analyzed by the anlzr package as well as an API for\n\tencoding\/decoding it to\/from JSON.\n\n\n\tUsage\n\n\tThere are two kinds of program that interact with a src.Project: language\n\tparsers and VCS support tools. The former visits all source files inside\n\tthe project folder and parse every source file in order to fill the\n\tsrc.Project.Packages field (and few others). The latter read the VCS folder\n\tthat contains VCS data and fill the src.Project.Repo structure. The next\n\ttwo chapters treat about them more in details.\n\n\n\tLanguage parsers\n\n\tTODO\n\n\n\tVCS support tools\n\n\tTODO\n\n\n\tExample\n\n\tTODO\n\n\n\tLines of Code counting\n\n\tThe number of real lines of code must be precomputed by the language\n\tparsers. This is the only \"feature\" that must be precomputed because it may\n\thave multiple usages:\n\n\t1. Eliminate empty projects\n\n\t2. Evalutate project size\n\n\t3. Verify that the decoding is correct\n\n\t4. Normalize various counts\n\n\t5. ...\n\n\tTherefore, this count must be accurate and strictly follow the following\n\trules:\n\n\tWe only count statements and declarations as a line of code. Comments,\n\tpackage declaration, imports, expression, etc. must not be taken into\n\taccount. Since an exemple is worth more than a thousand words, let's\n\tcondider the following snippet:\n\n\t \/\/ Package doc (does not count as a line of code)\n\t package main \/\/ does not count as a line of code\n\n\t import \"fmt\" \/\/ does not count as a line of code\n\n\t func main() { \/\/ count as 1 line of code\n\t fmt.Println(\n\t \"Hello, World!\n\t ) \/\/ count as 1 line of code\n\t }\n\n\tThe expected number of lines of code is 2: The main function declaration\n\tand the call to fmt.Println function.\n\n\n\tPerformance\n\n\tDevMine project is dealing with Terabytes of source code, therefore the\n\tJSON decoding must be efficient. That is why we implemented our own JSON\n\tdecoder that focuses on performance. To do so, we had to make some\n\tchoices and add some constraints for language parsers in order to make this\n\tprocess as fast as possible.\n\n\tJSON is usually unpredicatable which forces JSON parsers to be generic to\n\tdeal with every possible kind of input. In DevMine, we have a well defined\n\tstructure, thus instead of writting a generic JSON decoder we wrote one that\n\tdecodes only src.Project objects. This really improves the performances\n\tsince we don't need to use reflextion, generic types (interface{}) and type\n\tassertion. The drawback of this choice is that we have to update the decoder\n\teverytime we modify our structures.\n\n\tMost JSON parsers assume that the JSON input is potentially invalid\n\t(ie. malformed). We don't. Unlike json.Unmarshal, we don't Check for\n\twell-formedness.\n\n\tWe also force the language parsers to put the \"expression_name\" and\n\t\"statement_name\" fields at the beginning of the JSON object. We use that\n\tconvention to decode generic ast.Expr and ast.Stmt without reading the whole\n\tJSON object.\n\n\n\tFurther improvements\n\n\tThe code became quite repetitive. Since most of the logic has been\n\tencapsulated into helper methods, it would be really nice to generate the\n\tdecoding methods using \"go generate\".\n*\/\npackage src\nsrc: fix typo in the doc\/\/ Copyright 2014-2015 The project AUTHORS. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\n\tPackage src provides a set of structures for representing a project with its\n\trelated source code independently of the language. In other words, it\n\tprovides a generic representation (abstraction) of a source code.\n\n\n\tGoal\n\n\tThe goal of this package is to provide a generic representation of a project\n\tthat can be analyzed by the anlzr package as well as an API for\n\tencoding\/decoding it to\/from JSON.\n\n\n\tUsage\n\n\tThere are two kinds of program that interact with a src.Project: language\n\tparsers and VCS support tools. The former visits all source files inside\n\tthe project folder and parse every source file in order to fill the\n\tsrc.Project.Packages field (and few others). The latter read the VCS folder\n\tthat contains VCS data and fill the src.Project.Repo structure. The next\n\ttwo chapters treat about them more in details.\n\n\n\tLanguage parsers\n\n\tTODO\n\n\n\tVCS support tools\n\n\tTODO\n\n\n\tExample\n\n\tTODO\n\n\n\tLines of Code counting\n\n\tThe number of real lines of code must be precomputed by the language\n\tparsers. This is the only \"feature\" that must be precomputed because it may\n\thave multiple usages:\n\n\t1. Eliminate empty projects\n\n\t2. Evalutate project size\n\n\t3. Verify that the decoding is correct\n\n\t4. Normalize various counts\n\n\t5. ...\n\n\tTherefore, this count must be accurate and strictly follow the following\n\trules:\n\n\tWe only count statements and declarations as a line of code. Comments,\n\tpackage declaration, imports, expression, etc. must not be taken into\n\taccount. Since an exemple is worth more than a thousand words, let's\n\tconsider the following snippet:\n\n\t \/\/ Package doc (does not count as a line of code)\n\t package main \/\/ does not count as a line of code\n\n\t import \"fmt\" \/\/ does not count as a line of code\n\n\t func main() { \/\/ count as 1 line of code\n\t fmt.Println(\n\t \"Hello, World!\n\t ) \/\/ count as 1 line of code\n\t }\n\n\tThe expected number of lines of code is 2: The main function declaration\n\tand the call to fmt.Println function.\n\n\n\tPerformance\n\n\tDevMine project is dealing with Terabytes of source code, therefore the\n\tJSON decoding must be efficient. That is why we implemented our own JSON\n\tdecoder that focuses on performance. To do so, we had to make some\n\tchoices and add some constraints for language parsers in order to make this\n\tprocess as fast as possible.\n\n\tJSON is usually unpredicatable which forces JSON parsers to be generic to\n\tdeal with every possible kind of input. In DevMine, we have a well defined\n\tstructure, thus instead of writting a generic JSON decoder we wrote one that\n\tdecodes only src.Project objects. This really improves the performances\n\tsince we don't need to use reflextion, generic types (interface{}) and type\n\tassertion. The drawback of this choice is that we have to update the decoder\n\teverytime we modify our structures.\n\n\tMost JSON parsers assume that the JSON input is potentially invalid\n\t(ie. malformed). We don't. Unlike json.Unmarshal, we don't Check for\n\twell-formedness.\n\n\tWe also force the language parsers to put the \"expression_name\" and\n\t\"statement_name\" fields at the beginning of the JSON object. We use that\n\tconvention to decode generic ast.Expr and ast.Stmt without reading the whole\n\tJSON object.\n\n\n\tFurther improvements\n\n\tThe code became quite repetitive. Since most of the logic has been\n\tencapsulated into helper methods, it would be really nice to generate the\n\tdecoding methods using \"go generate\".\n*\/\npackage src\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage testing\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/tsuru\/tsuru\/safe\"\n)\n\ntype FakeConn struct {\n\tBuf *safe.Buffer\n}\n\nfunc (c *FakeConn) Read(b []byte) (int, error) {\n\tif c.Buf != nil {\n\t\treturn c.Buf.Read(b)\n\t}\n\treturn 0, io.EOF\n}\n\nfunc (c *FakeConn) Write(b []byte) (int, error) {\n\tif c.Buf != nil {\n\t\treturn c.Buf.Write(b)\n\t}\n\treturn 0, io.ErrClosedPipe\n}\n\nfunc (c *FakeConn) Close() error {\n\tc.Buf = nil\n\treturn nil\n}\n\nfunc (c *FakeConn) LocalAddr() net.Addr {\n\treturn nil\n}\n\nfunc (c *FakeConn) RemoteAddr() net.Addr {\n\treturn nil\n}\n\nfunc (c *FakeConn) SetDeadline(t time.Time) error {\n\treturn nil\n}\n\nfunc (c *FakeConn) SetReadDeadline(t time.Time) error {\n\treturn nil\n}\n\nfunc (c *FakeConn) SetWriteDeadline(t time.Time) error {\n\treturn nil\n}\n\ntype Hijacker struct {\n\thttp.ResponseWriter\n\tConn net.Conn\n\terr error\n}\n\nfunc (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif h.err != nil {\n\t\treturn nil, nil, h.err\n\t}\n\treturn h.Conn, nil, nil\n}\ntesting\/conn: export err var in Hijacker struct\/\/ Copyright 2015 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage testing\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/tsuru\/tsuru\/safe\"\n)\n\ntype FakeConn struct {\n\tBuf *safe.Buffer\n}\n\nfunc (c *FakeConn) Read(b []byte) (int, error) {\n\tif c.Buf != nil {\n\t\treturn c.Buf.Read(b)\n\t}\n\treturn 0, io.EOF\n}\n\nfunc (c *FakeConn) Write(b []byte) (int, error) {\n\tif c.Buf != nil {\n\t\treturn c.Buf.Write(b)\n\t}\n\treturn 0, io.ErrClosedPipe\n}\n\nfunc (c *FakeConn) Close() error {\n\tc.Buf = nil\n\treturn nil\n}\n\nfunc (c *FakeConn) LocalAddr() net.Addr {\n\treturn nil\n}\n\nfunc (c *FakeConn) RemoteAddr() net.Addr {\n\treturn nil\n}\n\nfunc (c *FakeConn) SetDeadline(t time.Time) error {\n\treturn nil\n}\n\nfunc (c *FakeConn) SetReadDeadline(t time.Time) error {\n\treturn nil\n}\n\nfunc (c *FakeConn) SetWriteDeadline(t time.Time) error {\n\treturn nil\n}\n\ntype Hijacker struct {\n\thttp.ResponseWriter\n\tConn net.Conn\n\tErr error\n}\n\nfunc (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif h.Err != nil {\n\t\treturn nil, nil, h.Err\n\t}\n\treturn h.Conn, nil, nil\n}\n<|endoftext|>"} {"text":"package blobstore\n\nimport (\n\t\"bytes\"\n\t\"io\"\n)\n\ntype FileBlobWriter struct {\n\n\t\/\/ Buffer for storing data before we can hash it\n\tbuffer bytes.Buffer\n\n\t\/\/ Storage object\n\tStorage BlobStorage\n\n\t\/\/ List of partial file blobs\n\tpartialBids, partialKeys []string\n\n\t\/\/ Overall number of bytes written so far\n\ttotalBytes int64\n}\n\n\/\/ Performing a write operation on the file blob\nfunc (f *FileBlobWriter) Write(p []byte) (n int, err error) {\n\n\tbufferSpaceLeft := maxSimpleFileDataSize - f.buffer.Len()\n\twritten := 0\n\tfor len(p) > 0 {\n\n\t\t\/\/ Let's see how much can we chop this time\n\t\tpartialSize := len(p)\n\t\tif partialSize > bufferSpaceLeft {\n\t\t\tpartialSize = bufferSpaceLeft\n\t\t}\n\n\t\t\/\/ Chop off the next part\n\t\tf.buffer.Write(p[:partialSize])\n\t\tp = p[partialSize:]\n\t\tbufferSpaceLeft -= partialSize\n\t\twritten += partialSize\n\n\t\t\/\/ Check out if we should emit next partial buffer\n\t\tif bufferSpaceLeft <= 0 {\n\t\t\tif err := f.finalizePartialBuffer(); err != nil {\n\t\t\t\tf.cleanup()\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tbufferSpaceLeft = maxSimpleFileDataSize\n\t\t}\n\t}\n\treturn written, nil\n}\n\nfunc (f *FileBlobWriter) finalizePartialBuffer() error {\n\n\t\/\/ Create the header\n\tvar hdr bytes.Buffer\n\thdr.WriteByte(blobTypeSimpleStaticFile)\n\n\t\/\/ Generate the blob\n\treaderGen := func() io.Reader {\n\t\theaderReader := bytes.NewReader(hdr.Bytes())\n\t\tcontentReader := bytes.NewReader(f.buffer.Bytes())\n\t\treturn io.MultiReader(headerReader, contentReader)\n\t}\n\tbid, key, err := createHashValidatedBlobFromReaderGenerator(readerGen, f.Storage)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Queue the blob on a list of partial blobs\n\tf.addPartialBlob(bid, key)\n\n\t\/\/ Increase the counter of bytes thrown out so far\n\tf.totalBytes += int64(f.buffer.Len())\n\n\t\/\/ Cleanup\n\tf.buffer.Reset()\n\n\treturn nil\n}\n\nfunc (f *FileBlobWriter) addPartialBlob(bid, key string) {\n\tf.partialBids = append(f.partialBids, bid)\n\tf.partialKeys = append(f.partialKeys, key)\n}\n\nfunc (f *FileBlobWriter) Finalize() (bid string, key string, err error) {\n\n\t\/\/ Throw out the last partial if needed\n\tif f.buffer.Len() > 0 || len(f.partialBids) == 0 {\n\t\tif err := f.finalizePartialBuffer(); err != nil {\n\t\t\tf.cleanup()\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\n\t\/\/ If there's only one partial in the list, we don't have to create\n\t\/\/ any split file blobs\n\tif len(f.partialBids) == 1 {\n\t\treturn f.partialBids[0], f.partialKeys[0], nil\n\t}\n\n\t\/\/ Create split file blob\n\treturn f.finalizeSplitFile()\n}\n\nfunc (f *FileBlobWriter) finalizeSplitFile() (bid string, key string, err error) {\n\tvar b bytes.Buffer\n\n\t\/\/ Blob type id\n\tb.WriteByte(blobTypeSplitStaticFile)\n\n\t\/\/ Total file size\n\tserializeInt(f.totalBytes, &b)\n\n\t\/\/ Number of partial blobs\n\tserializeInt(int64(len(f.partialBids)), &b)\n\n\t\/\/ Partial blobs list\n\tfor i, bid := range f.partialBids {\n\t\tserializeString(bid, &b)\n\t\tserializeString(f.partialKeys[i], &b)\n\t}\n\n\treturn createHashValidatedBlobFromReaderGenerator(\n\t\tfunc() io.Reader { return bytes.NewReader(b.Bytes()) },\n\t\tf.Storage)\n}\n\nfunc (f *FileBlobWriter) cleanup() {\n\t\/\/ TODO: Remove all blobs generated so far\n}\nAdd some commentspackage blobstore\n\nimport (\n\t\"bytes\"\n\t\"io\"\n)\n\n\/\/ Structure used to generate static file blobs\ntype FileBlobWriter struct {\n\n\t\/\/ Buffer for storing data before we can hash it\n\tbuffer bytes.Buffer\n\n\t\/\/ Storage object\n\tStorage BlobStorage\n\n\t\/\/ List of partial file blobs\n\tpartialBids, partialKeys []string\n\n\t\/\/ Overall number of bytes written so far\n\ttotalBytes int64\n}\n\n\/\/ Performing a write operation on the file blob\nfunc (f *FileBlobWriter) Write(p []byte) (n int, err error) {\n\n\tbufferSpaceLeft := maxSimpleFileDataSize - f.buffer.Len()\n\twritten := 0\n\tfor len(p) > 0 {\n\n\t\t\/\/ Let's see how much can we chop this time\n\t\tpartialSize := len(p)\n\t\tif partialSize > bufferSpaceLeft {\n\t\t\tpartialSize = bufferSpaceLeft\n\t\t}\n\n\t\t\/\/ Chop off the next part\n\t\tf.buffer.Write(p[:partialSize])\n\t\tp = p[partialSize:]\n\t\tbufferSpaceLeft -= partialSize\n\t\twritten += partialSize\n\n\t\t\/\/ Check out if we should emit next partial buffer\n\t\tif bufferSpaceLeft <= 0 {\n\t\t\tif err := f.finalizePartialBuffer(); err != nil {\n\t\t\t\tf.cleanup()\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tbufferSpaceLeft = maxSimpleFileDataSize\n\t\t}\n\t}\n\treturn written, nil\n}\n\n\/\/ Write the current content of internal buffer into a blob,\n\/\/ save it's id and key in a list of partial blobs\nfunc (f *FileBlobWriter) finalizePartialBuffer() error {\n\n\t\/\/ Create the header\n\tvar hdr bytes.Buffer\n\thdr.WriteByte(blobTypeSimpleStaticFile)\n\n\t\/\/ Generate the blob\n\treaderGen := func() io.Reader {\n\t\theaderReader := bytes.NewReader(hdr.Bytes())\n\t\tcontentReader := bytes.NewReader(f.buffer.Bytes())\n\t\treturn io.MultiReader(headerReader, contentReader)\n\t}\n\tbid, key, err := createHashValidatedBlobFromReaderGenerator(readerGen, f.Storage)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Queue the blob on a list of partial blobs\n\tf.addPartialBlob(bid, key)\n\n\t\/\/ Increase the counter of bytes thrown out so far\n\tf.totalBytes += int64(f.buffer.Len())\n\n\t\/\/ Cleanup\n\tf.buffer.Reset()\n\n\treturn nil\n}\n\n\/\/ Save bid and key into a list of partial blobs\nfunc (f *FileBlobWriter) addPartialBlob(bid, key string) {\n\tf.partialBids = append(f.partialBids, bid)\n\tf.partialKeys = append(f.partialKeys, key)\n}\n\n\/\/ Finalize the generation of this file blob\nfunc (f *FileBlobWriter) Finalize() (bid string, key string, err error) {\n\n\t\/\/ Throw out the last partial if needed\n\tif f.buffer.Len() > 0 || len(f.partialBids) == 0 {\n\t\tif err := f.finalizePartialBuffer(); err != nil {\n\t\t\tf.cleanup()\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\n\t\/\/ If there's only one partial in the list, we don't have to create\n\t\/\/ any split file blobs\n\tif len(f.partialBids) == 1 {\n\t\treturn f.partialBids[0], f.partialKeys[0], nil\n\t}\n\n\t\/\/ Create split file blob\n\treturn f.finalizeSplitFile()\n}\n\n\/\/ Finalize blob generation in case we've created split file blob\nfunc (f *FileBlobWriter) finalizeSplitFile() (bid string, key string, err error) {\n\tvar b bytes.Buffer\n\n\t\/\/ Blob type id\n\tb.WriteByte(blobTypeSplitStaticFile)\n\n\t\/\/ Total file size\n\tserializeInt(f.totalBytes, &b)\n\n\t\/\/ Number of partial blobs\n\tserializeInt(int64(len(f.partialBids)), &b)\n\n\t\/\/ Partial blobs list\n\tfor i, bid := range f.partialBids {\n\t\tserializeString(bid, &b)\n\t\tserializeString(f.partialKeys[i], &b)\n\t}\n\n\treturn createHashValidatedBlobFromReaderGenerator(\n\t\tfunc() io.Reader { return bytes.NewReader(b.Bytes()) },\n\t\tf.Storage)\n}\n\nfunc (f *FileBlobWriter) cleanup() {\n\t\/\/ TODO: Remove all blobs generated so far\n}\n<|endoftext|>"} {"text":"package command\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"os\/exec\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/nomad\/api\"\n\t\"github.com\/ryanuber\/columnize\"\n)\n\ntype ServerMembersCommand struct {\n\tMeta\n\tCmd *exec.Cmd\n}\n\nfunc (c *ServerMembersCommand) Help() string {\n\thelpText := `\nUsage: maya omm-status [options]\n\n Display a list of the known servers and their status. Only Nomad servers are\n able to service this command.\n\nGeneral Options:\n\n ` + generalOptionsUsage() + `\n\nServer Members Options:\n\n -detailed\n Show detailed information about each member. This dumps\n a raw set of tags which shows more information than the\n default output format.\n`\n\treturn strings.TrimSpace(helpText)\n}\n\nfunc (c *ServerMembersCommand) Synopsis() string {\n\treturn \"Display a list of known servers and their status\"\n}\n\nfunc (c *ServerMembersCommand) Run(args []string) int {\n\tvar detailed bool\n\n\tflags := c.Meta.FlagSet(\"server-members\", FlagSetClient)\n\tflags.Usage = func() { c.Ui.Output(c.Help()) }\n\tflags.BoolVar(&detailed, \"detailed\", false, \"Show detailed output\")\n\n\tif err := flags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\t\/\/ Check for extra arguments\n\targs = flags.Args()\n\tif len(args) != 0 {\n\t\tc.Ui.Error(c.Help())\n\t\treturn 1\n\t}\n\n\t\/\/ Get the HTTP client\n\tclient, err := c.Meta.Client()\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error initializing client: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Query the members\n\tsrvMembers, err := client.Agent().Members()\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error querying servers: %s\", err))\n\t\treturn 1\n\t}\n\n\tif srvMembers == nil {\n\t\tc.Ui.Error(\"Agent doesn't know about server members\")\n\t\treturn 0\n\t}\n\n\t\/\/ Sort the members\n\tsort.Sort(api.AgentMembersNameSort(srvMembers.Members))\n\n\t\/\/ Determine the leaders per region.\n\tleaders, err := regionLeaders(client, srvMembers.Members)\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error determining leaders: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Format the list\n\tvar out []string\n\tif detailed {\n\t\tout = detailedOutput(srvMembers.Members)\n\t} else {\n\t\tout = standardOutput(srvMembers.Members, leaders)\n\t}\n\n\t\/\/ Dump the list\n\tc.Ui.Output(columnize.SimpleFormat(out))\n\tvar runop int\n\tif runop = c.mserverstatus(); runop != 0 {\n\t\treturn runop\n\t}\n\t\/\/fmt.Println(mserverstatus)\n\treturn 0\n}\n\nfunc standardOutput(mem []*api.AgentMember, leaders map[string]string) []string {\n\t\/\/ Format the members list\n\tmembers := make([]string, len(mem)+1)\n\tmembers[0] = \"Name|Address|Port|Status|Leader|Protocol|Build|Datacenter|Region\"\n\tfor i, member := range mem {\n\t\treg := member.Tags[\"region\"]\n\t\tregLeader, ok := leaders[reg]\n\t\tisLeader := false\n\t\tif ok {\n\t\t\tif regLeader == net.JoinHostPort(member.Addr, member.Tags[\"port\"]) {\n\n\t\t\t\tisLeader = true\n\t\t\t}\n\t\t}\n\n\t\tmembers[i+1] = fmt.Sprintf(\"%s|%s|%d|%s|%t|%d|%s|%s|%s\",\n\t\t\tmember.Name,\n\t\t\tmember.Addr,\n\t\t\tmember.Port,\n\t\t\tmember.Status,\n\t\t\tisLeader,\n\t\t\tmember.ProtocolCur,\n\t\t\tmember.Tags[\"build\"],\n\t\t\tmember.Tags[\"dc\"],\n\t\t\tmember.Tags[\"region\"])\n\t}\n\treturn members\n}\n\nfunc detailedOutput(mem []*api.AgentMember) []string {\n\t\/\/ Format the members list\n\tmembers := make([]string, len(mem)+1)\n\tmembers[0] = \"Name|Address|Port|Tags\"\n\tfor i, member := range mem {\n\t\t\/\/ Format the tags\n\t\ttagPairs := make([]string, 0, len(member.Tags))\n\t\tfor k, v := range member.Tags {\n\t\t\ttagPairs = append(tagPairs, fmt.Sprintf(\"%s=%s\", k, v))\n\t\t}\n\t\ttags := strings.Join(tagPairs, \",\")\n\n\t\tmembers[i+1] = fmt.Sprintf(\"%s|%s|%d|%s\",\n\t\t\tmember.Name,\n\t\t\tmember.Addr,\n\t\t\tmember.Port,\n\t\t\ttags)\n\t}\n\treturn members\n}\n\n\/\/ regionLeaders returns a map of regions to the IP of the member that is the\n\/\/ leader.\nfunc regionLeaders(client *api.Client, mem []*api.AgentMember) (map[string]string, error) {\n\t\/\/ Determine the unique regions.\n\tleaders := make(map[string]string)\n\tregions := make(map[string]struct{})\n\tfor _, m := range mem {\n\t\tregions[m.Tags[\"region\"]] = struct{}{}\n\t}\n\n\tif len(regions) == 0 {\n\t\treturn leaders, nil\n\t}\n\n\tstatus := client.Status()\n\tfor reg := range regions {\n\t\tl, err := status.RegionLeader(reg)\n\t\tif err != nil {\n\t\t\t\/\/ This error means that region has no leader.\n\t\t\tif strings.Contains(err.Error(), \"No cluster leader\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\n\t\tleaders[reg] = l\n\t}\n\n\treturn leaders, nil\n}\n\nfunc (c *ServerMembersCommand) mserverstatus() int {\n\t\/\/\tout, err := exec.Command(\"mayaserver\", \"version\").Output()\n\t\/\/\tif err != nil {\n\t\/\/\t\tlog.Fatal(err)\n\t\/\/\t}\n\t\/\/\tfmt.Printf(\"mayaserver is running: %s\\n\", out)\n\t\/\/\treturn 0\n\n\tvar runop int = 0\n\n\tc.Cmd = exec.Command(\"systemctl\", \"status\", \"mayaserver\")\n\n\tif runop := execute(c.Cmd, c.Ui); runop != 0 {\n\t\tc.Ui.Error(\"mayaserver not running\")\n\t}\n\n\treturn runop\n\n}\nAdding proper commentspackage command\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"os\/exec\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/nomad\/api\"\n\t\"github.com\/ryanuber\/columnize\"\n)\n\ntype ServerMembersCommand struct {\n\tMeta\n\tCmd *exec.Cmd\n}\n\nfunc (c *ServerMembersCommand) Help() string {\n\thelpText := `\nUsage: maya omm-status [options]\n\n Display a list of the known servers and their status. Only Nomad servers are\n able to service this command.\n\nGeneral Options:\n\n ` + generalOptionsUsage() + `\n\nServer Members Options:\n\n -detailed\n Show detailed information about each member. This dumps\n a raw set of tags which shows more information than the\n default output format.\n`\n\treturn strings.TrimSpace(helpText)\n}\n\nfunc (c *ServerMembersCommand) Synopsis() string {\n\treturn \"Display a list of known servers and their status\"\n}\n\nfunc (c *ServerMembersCommand) Run(args []string) int {\n\tvar detailed bool\n\n\tflags := c.Meta.FlagSet(\"server-members\", FlagSetClient)\n\tflags.Usage = func() { c.Ui.Output(c.Help()) }\n\tflags.BoolVar(&detailed, \"detailed\", false, \"Show detailed output\")\n\n\tif err := flags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\t\/\/ Check for extra arguments\n\targs = flags.Args()\n\tif len(args) != 0 {\n\t\tc.Ui.Error(c.Help())\n\t\treturn 1\n\t}\n\n\t\/\/ Get the HTTP client\n\tclient, err := c.Meta.Client()\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error initializing client: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Query the members\n\tsrvMembers, err := client.Agent().Members()\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error querying servers: %s\", err))\n\t\treturn 1\n\t}\n\n\tif srvMembers == nil {\n\t\tc.Ui.Error(\"Agent doesn't know about server members\")\n\t\treturn 0\n\t}\n\n\t\/\/ Sort the members\n\tsort.Sort(api.AgentMembersNameSort(srvMembers.Members))\n\n\t\/\/ Determine the leaders per region.\n\tleaders, err := regionLeaders(client, srvMembers.Members)\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error determining leaders: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Format the list\n\tvar out []string\n\tif detailed {\n\t\tout = detailedOutput(srvMembers.Members)\n\t} else {\n\t\tout = standardOutput(srvMembers.Members, leaders)\n\t}\n\n\t\/\/ Dump the list\n\tc.Ui.Output(columnize.SimpleFormat(out))\n\tvar runop int\n\tif runop = c.mserverstatus(); runop != 0 {\n\t\treturn runop\n\t}\n\treturn 0\n}\n\nfunc standardOutput(mem []*api.AgentMember, leaders map[string]string) []string {\n\t\/\/ Format the members list\n\tmembers := make([]string, len(mem)+1)\n\tmembers[0] = \"Name|Address|Port|Status|Leader|Protocol|Build|Datacenter|Region\"\n\tfor i, member := range mem {\n\t\treg := member.Tags[\"region\"]\n\t\tregLeader, ok := leaders[reg]\n\t\tisLeader := false\n\t\tif ok {\n\t\t\tif regLeader == net.JoinHostPort(member.Addr, member.Tags[\"port\"]) {\n\n\t\t\t\tisLeader = true\n\t\t\t}\n\t\t}\n\n\t\tmembers[i+1] = fmt.Sprintf(\"%s|%s|%d|%s|%t|%d|%s|%s|%s\",\n\t\t\tmember.Name,\n\t\t\tmember.Addr,\n\t\t\tmember.Port,\n\t\t\tmember.Status,\n\t\t\tisLeader,\n\t\t\tmember.ProtocolCur,\n\t\t\tmember.Tags[\"build\"],\n\t\t\tmember.Tags[\"dc\"],\n\t\t\tmember.Tags[\"region\"])\n\t}\n\treturn members\n}\n\nfunc detailedOutput(mem []*api.AgentMember) []string {\n\t\/\/ Format the members list\n\tmembers := make([]string, len(mem)+1)\n\tmembers[0] = \"Name|Address|Port|Tags\"\n\tfor i, member := range mem {\n\t\t\/\/ Format the tags\n\t\ttagPairs := make([]string, 0, len(member.Tags))\n\t\tfor k, v := range member.Tags {\n\t\t\ttagPairs = append(tagPairs, fmt.Sprintf(\"%s=%s\", k, v))\n\t\t}\n\t\ttags := strings.Join(tagPairs, \",\")\n\n\t\tmembers[i+1] = fmt.Sprintf(\"%s|%s|%d|%s\",\n\t\t\tmember.Name,\n\t\t\tmember.Addr,\n\t\t\tmember.Port,\n\t\t\ttags)\n\t}\n\treturn members\n}\n\n\/\/ regionLeaders returns a map of regions to the IP of the member that is the\n\/\/ leader.\nfunc regionLeaders(client *api.Client, mem []*api.AgentMember) (map[string]string, error) {\n\t\/\/ Determine the unique regions.\n\tleaders := make(map[string]string)\n\tregions := make(map[string]struct{})\n\tfor _, m := range mem {\n\t\tregions[m.Tags[\"region\"]] = struct{}{}\n\t}\n\n\tif len(regions) == 0 {\n\t\treturn leaders, nil\n\t}\n\n\tstatus := client.Status()\n\tfor reg := range regions {\n\t\tl, err := status.RegionLeader(reg)\n\t\tif err != nil {\n\t\t\t\/\/ This error means that region has no leader.\n\t\t\tif strings.Contains(err.Error(), \"No cluster leader\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\n\t\tleaders[reg] = l\n\t}\n\n\treturn leaders, nil\n}\n\n\/\/ to get the status of mayaserver deamon,\n\/\/ TODO proper CLI command once mayaserver have it's own\nfunc (c *ServerMembersCommand) mserverstatus() int {\n\tvar runop int = 0\n\n\tc.Cmd = exec.Command(\"systemctl\", \"status\", \"mayaserver\")\n\n\tif runop := execute(c.Cmd, c.Ui); runop != 0 {\n\t\tc.Ui.Error(\"mayaserver not running\")\n\t}\n\n\treturn runop\n\n}\n<|endoftext|>"} {"text":"package commands\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/git-lfs\/git-lfs\/filepathfilter\"\n\t\"github.com\/git-lfs\/git-lfs\/git\"\n\t\"github.com\/git-lfs\/git-lfs\/lfs\"\n\t\"github.com\/git-lfs\/git-lfs\/progress\"\n\t\"github.com\/git-lfs\/git-lfs\/tasklog\"\n\t\"github.com\/git-lfs\/git-lfs\/tq\"\n\t\"github.com\/rubyist\/tracerx\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tfetchRecentArg bool\n\tfetchAllArg bool\n\tfetchPruneArg bool\n)\n\nfunc getIncludeExcludeArgs(cmd *cobra.Command) (include, exclude *string) {\n\tincludeFlag := cmd.Flag(\"include\")\n\texcludeFlag := cmd.Flag(\"exclude\")\n\tif includeFlag.Changed {\n\t\tinclude = &includeArg\n\t}\n\tif excludeFlag.Changed {\n\t\texclude = &excludeArg\n\t}\n\n\treturn\n}\n\nfunc fetchCommand(cmd *cobra.Command, args []string) {\n\trequireInRepo()\n\n\tvar refs []*git.Ref\n\n\tif len(args) > 0 {\n\t\t\/\/ Remote is first arg\n\t\tif err := cfg.SetValidRemote(args[0]); err != nil {\n\t\t\tExit(\"Invalid remote name %q: %s\", args[0], err)\n\t\t}\n\t}\n\n\tif len(args) > 1 {\n\t\tresolvedrefs, err := git.ResolveRefs(args[1:])\n\t\tif err != nil {\n\t\t\tPanic(err, \"Invalid ref argument: %v\", args[1:])\n\t\t}\n\t\trefs = resolvedrefs\n\t} else if !fetchAllArg {\n\t\tref, err := git.CurrentRef()\n\t\tif err != nil {\n\t\t\tPanic(err, \"Could not fetch\")\n\t\t}\n\t\trefs = []*git.Ref{ref}\n\t}\n\n\tsuccess := true\n\tgitscanner := lfs.NewGitScanner(nil)\n\tdefer gitscanner.Close()\n\n\tinclude, exclude := getIncludeExcludeArgs(cmd)\n\tfetchPruneCfg := lfs.NewFetchPruneConfig(cfg.Git)\n\n\tif fetchAllArg {\n\t\tif fetchRecentArg || len(args) > 1 {\n\t\t\tExit(\"Cannot combine --all with ref arguments or --recent\")\n\t\t}\n\t\tif include != nil || exclude != nil {\n\t\t\tExit(\"Cannot combine --all with --include or --exclude\")\n\t\t}\n\t\tif len(cfg.FetchIncludePaths()) > 0 || len(cfg.FetchExcludePaths()) > 0 {\n\t\t\tPrint(\"Ignoring global include \/ exclude paths to fulfil --all\")\n\t\t}\n\t\tsuccess = fetchAll()\n\n\t} else { \/\/ !all\n\t\tfilter := buildFilepathFilter(cfg, include, exclude)\n\n\t\t\/\/ Fetch refs sequentially per arg order; duplicates in later refs will be ignored\n\t\tfor _, ref := range refs {\n\t\t\tPrint(\"fetch: Fetching reference %s\", ref.Name)\n\t\t\ts := fetchRef(ref.Sha, filter)\n\t\t\tsuccess = success && s\n\t\t}\n\n\t\tif fetchRecentArg || fetchPruneCfg.FetchRecentAlways {\n\t\t\ts := fetchRecent(fetchPruneCfg, refs, filter)\n\t\t\tsuccess = success && s\n\t\t}\n\t}\n\n\tif fetchPruneArg {\n\t\tverify := fetchPruneCfg.PruneVerifyRemoteAlways\n\t\t\/\/ no dry-run or verbose options in fetch, assume false\n\t\tprune(fetchPruneCfg, verify, false, false)\n\t}\n\n\tif !success {\n\t\tc := getAPIClient()\n\t\te := c.Endpoints.Endpoint(\"download\", cfg.Remote())\n\t\tExit(\"error: failed to fetch some objects from '%s'\", e.Url)\n\t}\n}\n\nfunc pointersToFetchForRef(ref string, filter *filepathfilter.Filter) ([]*lfs.WrappedPointer, error) {\n\tvar pointers []*lfs.WrappedPointer\n\tvar multiErr error\n\ttempgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) {\n\t\tif err != nil {\n\t\t\tif multiErr != nil {\n\t\t\t\tmultiErr = fmt.Errorf(\"%v\\n%v\", multiErr, err)\n\t\t\t} else {\n\t\t\t\tmultiErr = err\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tpointers = append(pointers, p)\n\t})\n\n\ttempgitscanner.Filter = filter\n\n\tif err := tempgitscanner.ScanTree(ref); err != nil {\n\t\treturn nil, err\n\t}\n\n\ttempgitscanner.Close()\n\treturn pointers, multiErr\n}\n\n\/\/ Fetch all binaries for a given ref (that we don't have already)\nfunc fetchRef(ref string, filter *filepathfilter.Filter) bool {\n\tpointers, err := pointersToFetchForRef(ref, filter)\n\tif err != nil {\n\t\tPanic(err, \"Could not scan for Git LFS files\")\n\t}\n\treturn fetchAndReportToChan(pointers, filter, nil)\n}\n\n\/\/ Fetch all previous versions of objects from since to ref (not including final state at ref)\n\/\/ So this will fetch all the '-' sides of the diff from since to ref\nfunc fetchPreviousVersions(ref string, since time.Time, filter *filepathfilter.Filter) bool {\n\tvar pointers []*lfs.WrappedPointer\n\n\ttempgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) {\n\t\tif err != nil {\n\t\t\tPanic(err, \"Could not scan for Git LFS previous versions\")\n\t\t\treturn\n\t\t}\n\n\t\tpointers = append(pointers, p)\n\t})\n\n\ttempgitscanner.Filter = filter\n\n\tif err := tempgitscanner.ScanPreviousVersions(ref, since, nil); err != nil {\n\t\tExitWithError(err)\n\t}\n\n\ttempgitscanner.Close()\n\treturn fetchAndReportToChan(pointers, filter, nil)\n}\n\n\/\/ Fetch recent objects based on config\nfunc fetchRecent(fetchconf lfs.FetchPruneConfig, alreadyFetchedRefs []*git.Ref, filter *filepathfilter.Filter) bool {\n\tif fetchconf.FetchRecentRefsDays == 0 && fetchconf.FetchRecentCommitsDays == 0 {\n\t\treturn true\n\t}\n\n\tok := true\n\t\/\/ Make a list of what unique commits we've already fetched for to avoid duplicating work\n\tuniqueRefShas := make(map[string]string, len(alreadyFetchedRefs))\n\tfor _, ref := range alreadyFetchedRefs {\n\t\tuniqueRefShas[ref.Sha] = ref.Name\n\t}\n\t\/\/ First find any other recent refs\n\tif fetchconf.FetchRecentRefsDays > 0 {\n\t\tPrint(\"fetch: Fetching recent branches within %v days\", fetchconf.FetchRecentRefsDays)\n\t\trefsSince := time.Now().AddDate(0, 0, -fetchconf.FetchRecentRefsDays)\n\t\trefs, err := git.RecentBranches(refsSince, fetchconf.FetchRecentRefsIncludeRemotes, cfg.Remote())\n\t\tif err != nil {\n\t\t\tPanic(err, \"Could not scan for recent refs\")\n\t\t}\n\t\tfor _, ref := range refs {\n\t\t\t\/\/ Don't fetch for the same SHA twice\n\t\t\tif prevRefName, ok := uniqueRefShas[ref.Sha]; ok {\n\t\t\t\tif ref.Name != prevRefName {\n\t\t\t\t\ttracerx.Printf(\"Skipping fetch for %v, already fetched via %v\", ref.Name, prevRefName)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tuniqueRefShas[ref.Sha] = ref.Name\n\t\t\t\tPrint(\"fetch: Fetching reference %s\", ref.Name)\n\t\t\t\tk := fetchRef(ref.Sha, filter)\n\t\t\t\tok = ok && k\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ For every unique commit we've fetched, check recent commits too\n\tif fetchconf.FetchRecentCommitsDays > 0 {\n\t\tfor commit, refName := range uniqueRefShas {\n\t\t\t\/\/ We measure from the last commit at the ref\n\t\t\tsumm, err := git.GetCommitSummary(commit)\n\t\t\tif err != nil {\n\t\t\t\tError(\"Couldn't scan commits at %v: %v\", refName, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tPrint(\"fetch: Fetching changes within %v days of %v\", fetchconf.FetchRecentCommitsDays, refName)\n\t\t\tcommitsSince := summ.CommitDate.AddDate(0, 0, -fetchconf.FetchRecentCommitsDays)\n\t\t\tk := fetchPreviousVersions(commit, commitsSince, filter)\n\t\t\tok = ok && k\n\t\t}\n\n\t}\n\treturn ok\n}\n\nfunc fetchAll() bool {\n\tpointers := scanAll()\n\tPrint(\"fetch: Fetching all references...\")\n\treturn fetchAndReportToChan(pointers, nil, nil)\n}\n\nfunc scanAll() []*lfs.WrappedPointer {\n\t\/\/ This could be a long process so use the chan version & report progress\n\ttask := tasklog.NewSimpleTask()\n\tlogger := tasklog.NewLogger(OutputWriter)\n\tlogger.Enqueue(task)\n\tvar numObjs int64\n\n\t\/\/ use temp gitscanner to collect pointers\n\tvar pointers []*lfs.WrappedPointer\n\tvar multiErr error\n\ttempgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) {\n\t\tif err != nil {\n\t\t\tif multiErr != nil {\n\t\t\t\tmultiErr = fmt.Errorf(\"%v\\n%v\", multiErr, err)\n\t\t\t} else {\n\t\t\t\tmultiErr = err\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tnumObjs++\n\t\ttask.Logf(\"fetch: %d object(s) found\", numObjs)\n\t\tpointers = append(pointers, p)\n\t})\n\n\tif err := tempgitscanner.ScanAll(nil); err != nil {\n\t\tPanic(err, \"Could not scan for Git LFS files\")\n\t}\n\n\ttempgitscanner.Close()\n\n\tif multiErr != nil {\n\t\tPanic(multiErr, \"Could not scan for Git LFS files\")\n\t}\n\n\ttask.Complete()\n\treturn pointers\n}\n\n\/\/ Fetch and report completion of each OID to a channel (optional, pass nil to skip)\n\/\/ Returns true if all completed with no errors, false if errors were written to stderr\/log\nfunc fetchAndReportToChan(allpointers []*lfs.WrappedPointer, filter *filepathfilter.Filter, out chan<- *lfs.WrappedPointer) bool {\n\tready, pointers, meter := readyAndMissingPointers(allpointers, filter)\n\tq := newDownloadQueue(\n\t\tgetTransferManifestOperationRemote(\"download\", cfg.Remote()),\n\t\tcfg.Remote(), tq.WithProgress(meter),\n\t)\n\n\tif out != nil {\n\t\t\/\/ If we already have it, or it won't be fetched\n\t\t\/\/ report it to chan immediately to support pull\/checkout\n\t\tfor _, p := range ready {\n\t\t\tout <- p\n\t\t}\n\n\t\tdlwatch := q.Watch()\n\n\t\tgo func() {\n\t\t\t\/\/ fetch only reports single OID, but OID *might* be referenced by multiple\n\t\t\t\/\/ WrappedPointers if same content is at multiple paths, so map oid->slice\n\t\t\toidToPointers := make(map[string][]*lfs.WrappedPointer, len(pointers))\n\t\t\tfor _, pointer := range pointers {\n\t\t\t\tplist := oidToPointers[pointer.Oid]\n\t\t\t\toidToPointers[pointer.Oid] = append(plist, pointer)\n\t\t\t}\n\n\t\t\tfor t := range dlwatch {\n\t\t\t\tplist, ok := oidToPointers[t.Oid]\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, p := range plist {\n\t\t\t\t\tout <- p\n\t\t\t\t}\n\t\t\t}\n\t\t\tclose(out)\n\t\t}()\n\t}\n\n\tfor _, p := range pointers {\n\t\ttracerx.Printf(\"fetch %v [%v]\", p.Name, p.Oid)\n\n\t\tq.Add(downloadTransfer(p))\n\t}\n\n\tprocessQueue := time.Now()\n\tq.Wait()\n\ttracerx.PerformanceSince(\"process queue\", processQueue)\n\n\tok := true\n\tfor _, err := range q.Errors() {\n\t\tok = false\n\t\tFullError(err)\n\t}\n\treturn ok\n}\n\nfunc readyAndMissingPointers(allpointers []*lfs.WrappedPointer, filter *filepathfilter.Filter) ([]*lfs.WrappedPointer, []*lfs.WrappedPointer, *progress.ProgressMeter) {\n\tlogger := tasklog.NewLogger(os.Stdout)\n\tmeter := buildProgressMeter(false)\n\tlogger.Enqueue(meter)\n\n\tseen := make(map[string]bool, len(allpointers))\n\tmissing := make([]*lfs.WrappedPointer, 0, len(allpointers))\n\tready := make([]*lfs.WrappedPointer, 0, len(allpointers))\n\n\tfor _, p := range allpointers {\n\t\t\/\/ no need to download the same object multiple times\n\t\tif seen[p.Oid] {\n\t\t\tcontinue\n\t\t}\n\n\t\tseen[p.Oid] = true\n\n\t\t\/\/ no need to download objects that exist locally already\n\t\tlfs.LinkOrCopyFromReference(cfg, p.Oid, p.Size)\n\t\tif cfg.LFSObjectExists(p.Oid, p.Size) {\n\t\t\tready = append(ready, p)\n\t\t\tcontinue\n\t\t}\n\n\t\tmissing = append(missing, p)\n\t\tmeter.Add(p.Size)\n\t}\n\n\treturn ready, missing, meter\n}\n\nfunc init() {\n\tRegisterCommand(\"fetch\", fetchCommand, func(cmd *cobra.Command) {\n\t\tcmd.Flags().StringVarP(&includeArg, \"include\", \"I\", \"\", \"Include a list of paths\")\n\t\tcmd.Flags().StringVarP(&excludeArg, \"exclude\", \"X\", \"\", \"Exclude a list of paths\")\n\t\tcmd.Flags().BoolVarP(&fetchRecentArg, \"recent\", \"r\", false, \"Fetch recent refs & commits\")\n\t\tcmd.Flags().BoolVarP(&fetchAllArg, \"all\", \"a\", false, \"Fetch all LFS files ever referenced\")\n\t\tcmd.Flags().BoolVarP(&fetchPruneArg, \"prune\", \"p\", false, \"After fetching, prune old data\")\n\t})\n}\ncommands\/fetch: use refspec in fetchpackage commands\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/git-lfs\/git-lfs\/filepathfilter\"\n\t\"github.com\/git-lfs\/git-lfs\/git\"\n\t\"github.com\/git-lfs\/git-lfs\/lfs\"\n\t\"github.com\/git-lfs\/git-lfs\/progress\"\n\t\"github.com\/git-lfs\/git-lfs\/tasklog\"\n\t\"github.com\/git-lfs\/git-lfs\/tq\"\n\t\"github.com\/rubyist\/tracerx\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tfetchRecentArg bool\n\tfetchAllArg bool\n\tfetchPruneArg bool\n)\n\nfunc getIncludeExcludeArgs(cmd *cobra.Command) (include, exclude *string) {\n\tincludeFlag := cmd.Flag(\"include\")\n\texcludeFlag := cmd.Flag(\"exclude\")\n\tif includeFlag.Changed {\n\t\tinclude = &includeArg\n\t}\n\tif excludeFlag.Changed {\n\t\texclude = &excludeArg\n\t}\n\n\treturn\n}\n\nfunc fetchCommand(cmd *cobra.Command, args []string) {\n\trequireInRepo()\n\n\tvar refs []*git.Ref\n\n\tif len(args) > 0 {\n\t\t\/\/ Remote is first arg\n\t\tif err := cfg.SetValidRemote(args[0]); err != nil {\n\t\t\tExit(\"Invalid remote name %q: %s\", args[0], err)\n\t\t}\n\t}\n\n\tif len(args) > 1 {\n\t\tresolvedrefs, err := git.ResolveRefs(args[1:])\n\t\tif err != nil {\n\t\t\tPanic(err, \"Invalid ref argument: %v\", args[1:])\n\t\t}\n\t\trefs = resolvedrefs\n\t} else if !fetchAllArg {\n\t\tref, err := git.CurrentRef()\n\t\tif err != nil {\n\t\t\tPanic(err, \"Could not fetch\")\n\t\t}\n\t\trefs = []*git.Ref{ref}\n\t}\n\n\tsuccess := true\n\tgitscanner := lfs.NewGitScanner(nil)\n\tdefer gitscanner.Close()\n\n\tinclude, exclude := getIncludeExcludeArgs(cmd)\n\tfetchPruneCfg := lfs.NewFetchPruneConfig(cfg.Git)\n\n\tif fetchAllArg {\n\t\tif fetchRecentArg || len(args) > 1 {\n\t\t\tExit(\"Cannot combine --all with ref arguments or --recent\")\n\t\t}\n\t\tif include != nil || exclude != nil {\n\t\t\tExit(\"Cannot combine --all with --include or --exclude\")\n\t\t}\n\t\tif len(cfg.FetchIncludePaths()) > 0 || len(cfg.FetchExcludePaths()) > 0 {\n\t\t\tPrint(\"Ignoring global include \/ exclude paths to fulfil --all\")\n\t\t}\n\t\tsuccess = fetchAll()\n\n\t} else { \/\/ !all\n\t\tfilter := buildFilepathFilter(cfg, include, exclude)\n\n\t\t\/\/ Fetch refs sequentially per arg order; duplicates in later refs will be ignored\n\t\tfor _, ref := range refs {\n\t\t\tPrint(\"fetch: Fetching reference %s\", ref.Refspec())\n\t\t\ts := fetchRef(ref.Sha, filter)\n\t\t\tsuccess = success && s\n\t\t}\n\n\t\tif fetchRecentArg || fetchPruneCfg.FetchRecentAlways {\n\t\t\ts := fetchRecent(fetchPruneCfg, refs, filter)\n\t\t\tsuccess = success && s\n\t\t}\n\t}\n\n\tif fetchPruneArg {\n\t\tverify := fetchPruneCfg.PruneVerifyRemoteAlways\n\t\t\/\/ no dry-run or verbose options in fetch, assume false\n\t\tprune(fetchPruneCfg, verify, false, false)\n\t}\n\n\tif !success {\n\t\tc := getAPIClient()\n\t\te := c.Endpoints.Endpoint(\"download\", cfg.Remote())\n\t\tExit(\"error: failed to fetch some objects from '%s'\", e.Url)\n\t}\n}\n\nfunc pointersToFetchForRef(ref string, filter *filepathfilter.Filter) ([]*lfs.WrappedPointer, error) {\n\tvar pointers []*lfs.WrappedPointer\n\tvar multiErr error\n\ttempgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) {\n\t\tif err != nil {\n\t\t\tif multiErr != nil {\n\t\t\t\tmultiErr = fmt.Errorf(\"%v\\n%v\", multiErr, err)\n\t\t\t} else {\n\t\t\t\tmultiErr = err\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tpointers = append(pointers, p)\n\t})\n\n\ttempgitscanner.Filter = filter\n\n\tif err := tempgitscanner.ScanTree(ref); err != nil {\n\t\treturn nil, err\n\t}\n\n\ttempgitscanner.Close()\n\treturn pointers, multiErr\n}\n\n\/\/ Fetch all binaries for a given ref (that we don't have already)\nfunc fetchRef(ref string, filter *filepathfilter.Filter) bool {\n\tpointers, err := pointersToFetchForRef(ref, filter)\n\tif err != nil {\n\t\tPanic(err, \"Could not scan for Git LFS files\")\n\t}\n\treturn fetchAndReportToChan(pointers, filter, nil)\n}\n\n\/\/ Fetch all previous versions of objects from since to ref (not including final state at ref)\n\/\/ So this will fetch all the '-' sides of the diff from since to ref\nfunc fetchPreviousVersions(ref string, since time.Time, filter *filepathfilter.Filter) bool {\n\tvar pointers []*lfs.WrappedPointer\n\n\ttempgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) {\n\t\tif err != nil {\n\t\t\tPanic(err, \"Could not scan for Git LFS previous versions\")\n\t\t\treturn\n\t\t}\n\n\t\tpointers = append(pointers, p)\n\t})\n\n\ttempgitscanner.Filter = filter\n\n\tif err := tempgitscanner.ScanPreviousVersions(ref, since, nil); err != nil {\n\t\tExitWithError(err)\n\t}\n\n\ttempgitscanner.Close()\n\treturn fetchAndReportToChan(pointers, filter, nil)\n}\n\n\/\/ Fetch recent objects based on config\nfunc fetchRecent(fetchconf lfs.FetchPruneConfig, alreadyFetchedRefs []*git.Ref, filter *filepathfilter.Filter) bool {\n\tif fetchconf.FetchRecentRefsDays == 0 && fetchconf.FetchRecentCommitsDays == 0 {\n\t\treturn true\n\t}\n\n\tok := true\n\t\/\/ Make a list of what unique commits we've already fetched for to avoid duplicating work\n\tuniqueRefShas := make(map[string]string, len(alreadyFetchedRefs))\n\tfor _, ref := range alreadyFetchedRefs {\n\t\tuniqueRefShas[ref.Sha] = ref.Name\n\t}\n\t\/\/ First find any other recent refs\n\tif fetchconf.FetchRecentRefsDays > 0 {\n\t\tPrint(\"fetch: Fetching recent branches within %v days\", fetchconf.FetchRecentRefsDays)\n\t\trefsSince := time.Now().AddDate(0, 0, -fetchconf.FetchRecentRefsDays)\n\t\trefs, err := git.RecentBranches(refsSince, fetchconf.FetchRecentRefsIncludeRemotes, cfg.Remote())\n\t\tif err != nil {\n\t\t\tPanic(err, \"Could not scan for recent refs\")\n\t\t}\n\t\tfor _, ref := range refs {\n\t\t\t\/\/ Don't fetch for the same SHA twice\n\t\t\tif prevRefName, ok := uniqueRefShas[ref.Sha]; ok {\n\t\t\t\tif ref.Name != prevRefName {\n\t\t\t\t\ttracerx.Printf(\"Skipping fetch for %v, already fetched via %v\", ref.Name, prevRefName)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tuniqueRefShas[ref.Sha] = ref.Name\n\t\t\t\tPrint(\"fetch: Fetching reference %s\", ref.Name)\n\t\t\t\tk := fetchRef(ref.Sha, filter)\n\t\t\t\tok = ok && k\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ For every unique commit we've fetched, check recent commits too\n\tif fetchconf.FetchRecentCommitsDays > 0 {\n\t\tfor commit, refName := range uniqueRefShas {\n\t\t\t\/\/ We measure from the last commit at the ref\n\t\t\tsumm, err := git.GetCommitSummary(commit)\n\t\t\tif err != nil {\n\t\t\t\tError(\"Couldn't scan commits at %v: %v\", refName, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tPrint(\"fetch: Fetching changes within %v days of %v\", fetchconf.FetchRecentCommitsDays, refName)\n\t\t\tcommitsSince := summ.CommitDate.AddDate(0, 0, -fetchconf.FetchRecentCommitsDays)\n\t\t\tk := fetchPreviousVersions(commit, commitsSince, filter)\n\t\t\tok = ok && k\n\t\t}\n\n\t}\n\treturn ok\n}\n\nfunc fetchAll() bool {\n\tpointers := scanAll()\n\tPrint(\"fetch: Fetching all references...\")\n\treturn fetchAndReportToChan(pointers, nil, nil)\n}\n\nfunc scanAll() []*lfs.WrappedPointer {\n\t\/\/ This could be a long process so use the chan version & report progress\n\ttask := tasklog.NewSimpleTask()\n\tlogger := tasklog.NewLogger(OutputWriter)\n\tlogger.Enqueue(task)\n\tvar numObjs int64\n\n\t\/\/ use temp gitscanner to collect pointers\n\tvar pointers []*lfs.WrappedPointer\n\tvar multiErr error\n\ttempgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) {\n\t\tif err != nil {\n\t\t\tif multiErr != nil {\n\t\t\t\tmultiErr = fmt.Errorf(\"%v\\n%v\", multiErr, err)\n\t\t\t} else {\n\t\t\t\tmultiErr = err\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tnumObjs++\n\t\ttask.Logf(\"fetch: %d object(s) found\", numObjs)\n\t\tpointers = append(pointers, p)\n\t})\n\n\tif err := tempgitscanner.ScanAll(nil); err != nil {\n\t\tPanic(err, \"Could not scan for Git LFS files\")\n\t}\n\n\ttempgitscanner.Close()\n\n\tif multiErr != nil {\n\t\tPanic(multiErr, \"Could not scan for Git LFS files\")\n\t}\n\n\ttask.Complete()\n\treturn pointers\n}\n\n\/\/ Fetch and report completion of each OID to a channel (optional, pass nil to skip)\n\/\/ Returns true if all completed with no errors, false if errors were written to stderr\/log\nfunc fetchAndReportToChan(allpointers []*lfs.WrappedPointer, filter *filepathfilter.Filter, out chan<- *lfs.WrappedPointer) bool {\n\tready, pointers, meter := readyAndMissingPointers(allpointers, filter)\n\tq := newDownloadQueue(\n\t\tgetTransferManifestOperationRemote(\"download\", cfg.Remote()),\n\t\tcfg.Remote(), tq.WithProgress(meter),\n\t)\n\n\tif out != nil {\n\t\t\/\/ If we already have it, or it won't be fetched\n\t\t\/\/ report it to chan immediately to support pull\/checkout\n\t\tfor _, p := range ready {\n\t\t\tout <- p\n\t\t}\n\n\t\tdlwatch := q.Watch()\n\n\t\tgo func() {\n\t\t\t\/\/ fetch only reports single OID, but OID *might* be referenced by multiple\n\t\t\t\/\/ WrappedPointers if same content is at multiple paths, so map oid->slice\n\t\t\toidToPointers := make(map[string][]*lfs.WrappedPointer, len(pointers))\n\t\t\tfor _, pointer := range pointers {\n\t\t\t\tplist := oidToPointers[pointer.Oid]\n\t\t\t\toidToPointers[pointer.Oid] = append(plist, pointer)\n\t\t\t}\n\n\t\t\tfor t := range dlwatch {\n\t\t\t\tplist, ok := oidToPointers[t.Oid]\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, p := range plist {\n\t\t\t\t\tout <- p\n\t\t\t\t}\n\t\t\t}\n\t\t\tclose(out)\n\t\t}()\n\t}\n\n\tfor _, p := range pointers {\n\t\ttracerx.Printf(\"fetch %v [%v]\", p.Name, p.Oid)\n\n\t\tq.Add(downloadTransfer(p))\n\t}\n\n\tprocessQueue := time.Now()\n\tq.Wait()\n\ttracerx.PerformanceSince(\"process queue\", processQueue)\n\n\tok := true\n\tfor _, err := range q.Errors() {\n\t\tok = false\n\t\tFullError(err)\n\t}\n\treturn ok\n}\n\nfunc readyAndMissingPointers(allpointers []*lfs.WrappedPointer, filter *filepathfilter.Filter) ([]*lfs.WrappedPointer, []*lfs.WrappedPointer, *progress.ProgressMeter) {\n\tlogger := tasklog.NewLogger(os.Stdout)\n\tmeter := buildProgressMeter(false)\n\tlogger.Enqueue(meter)\n\n\tseen := make(map[string]bool, len(allpointers))\n\tmissing := make([]*lfs.WrappedPointer, 0, len(allpointers))\n\tready := make([]*lfs.WrappedPointer, 0, len(allpointers))\n\n\tfor _, p := range allpointers {\n\t\t\/\/ no need to download the same object multiple times\n\t\tif seen[p.Oid] {\n\t\t\tcontinue\n\t\t}\n\n\t\tseen[p.Oid] = true\n\n\t\t\/\/ no need to download objects that exist locally already\n\t\tlfs.LinkOrCopyFromReference(cfg, p.Oid, p.Size)\n\t\tif cfg.LFSObjectExists(p.Oid, p.Size) {\n\t\t\tready = append(ready, p)\n\t\t\tcontinue\n\t\t}\n\n\t\tmissing = append(missing, p)\n\t\tmeter.Add(p.Size)\n\t}\n\n\treturn ready, missing, meter\n}\n\nfunc init() {\n\tRegisterCommand(\"fetch\", fetchCommand, func(cmd *cobra.Command) {\n\t\tcmd.Flags().StringVarP(&includeArg, \"include\", \"I\", \"\", \"Include a list of paths\")\n\t\tcmd.Flags().StringVarP(&excludeArg, \"exclude\", \"X\", \"\", \"Exclude a list of paths\")\n\t\tcmd.Flags().BoolVarP(&fetchRecentArg, \"recent\", \"r\", false, \"Fetch recent refs & commits\")\n\t\tcmd.Flags().BoolVarP(&fetchAllArg, \"all\", \"a\", false, \"Fetch all LFS files ever referenced\")\n\t\tcmd.Flags().BoolVarP(&fetchPruneArg, \"prune\", \"p\", false, \"After fetching, prune old data\")\n\t})\n}\n<|endoftext|>"} {"text":"package commands\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/github\/git-lfs\/lfs\"\n\t\"github.com\/github\/git-lfs\/vendor\/_nuts\/github.com\/spf13\/cobra\"\n)\n\nvar (\n\ttrackCmd = &cobra.Command{\n\t\tUse: \"track\",\n\t\tShort: \"Manipulate .gitattributes\",\n\t\tRun: trackCommand,\n\t}\n)\n\nfunc trackCommand(cmd *cobra.Command, args []string) {\n\tif lfs.LocalGitDir == \"\" {\n\t\tPrint(\"Not a git repository.\")\n\t\tos.Exit(128)\n\t}\n\n\tlfs.InstallHooks(false)\n\tknownPaths := findPaths()\n\n\tif len(args) == 0 {\n\t\tPrint(\"Listing tracked paths\")\n\t\tfor _, t := range knownPaths {\n\t\t\tPrint(\" %s (%s)\", t.Path, t.Source)\n\t\t}\n\t\treturn\n\t}\n\n\taddTrailingLinebreak := needsTrailingLinebreak(\".gitattributes\")\n\tattributesFile, err := os.OpenFile(\".gitattributes\", os.O_RDWR|os.O_APPEND|os.O_CREATE, 0660)\n\tif err != nil {\n\t\tPrint(\"Error opening .gitattributes file\")\n\t\treturn\n\t}\n\tdefer attributesFile.Close()\n\n\tif addTrailingLinebreak {\n\t\tif _, err := attributesFile.WriteString(\"\\n\"); err != nil {\n\t\t\tPrint(\"Error writing to .gitattributes\")\n\t\t}\n\t}\n\n\twd, _ := os.Getwd()\n\nArgsLoop:\n\tfor _, t := range args {\n\t\tabsT, relT := absRelPath(t, wd)\n\t\tfor _, k := range knownPaths {\n\t\t\tabsK, _ := absRelPath(k.Path, filepath.Join(wd, filepath.Dir(k.Source)))\n\t\t\tif absT == absK {\n\t\t\t\tPrint(\"%s already supported\", t)\n\t\t\t\tcontinue ArgsLoop\n\t\t\t}\n\t\t}\n\n\t\tencodedArg := strings.Replace(relT, \" \", \"[[:space:]]\", -1)\n\t\t_, err := attributesFile.WriteString(fmt.Sprintf(\"%s filter=lfs diff=lfs merge=lfs -crlf\\n\", encodedArg))\n\t\tif err != nil {\n\t\t\tPrint(\"Error adding path %s\", t)\n\t\t\tcontinue\n\t\t}\n\t\tPrint(\"Tracking %s\", t)\n\t}\n}\n\ntype mediaPath struct {\n\tPath string\n\tSource string\n}\n\nfunc findPaths() []mediaPath {\n\tpaths := make([]mediaPath, 0)\n\twd, _ := os.Getwd()\n\n\tfor _, path := range findAttributeFiles() {\n\t\tattributes, err := os.Open(path)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tscanner := bufio.NewScanner(attributes)\n\t\tfor scanner.Scan() {\n\t\t\tline := scanner.Text()\n\t\t\tif strings.Contains(line, \"filter=lfs\") {\n\t\t\t\tfields := strings.Fields(line)\n\t\t\t\trelPath, _ := filepath.Rel(wd, path)\n\t\t\t\tpaths = append(paths, mediaPath{Path: fields[0], Source: relPath})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn paths\n}\n\nfunc findAttributeFiles() []string {\n\tpaths := make([]string, 0)\n\n\trepoAttributes := filepath.Join(lfs.LocalGitDir, \"info\", \"attributes\")\n\tif info, err := os.Stat(repoAttributes); err == nil && !info.IsDir() {\n\t\tpaths = append(paths, repoAttributes)\n\t}\n\n\tfilepath.Walk(lfs.LocalWorkingDir, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !info.IsDir() && (filepath.Base(path) == \".gitattributes\") {\n\t\t\tpaths = append(paths, path)\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn paths\n}\n\nfunc needsTrailingLinebreak(filename string) bool {\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer file.Close()\n\n\tbuf := make([]byte, 16384)\n\tbytesRead := 0\n\tfor {\n\t\tn, err := file.Read(buf)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn false\n\t\t}\n\t\tbytesRead = n\n\t}\n\n\treturn !strings.HasSuffix(string(buf[0:bytesRead]), \"\\n\")\n}\n\n\/\/ absRelPath takes a path and a working directory and\n\/\/ returns an absolute and a relative representation of path based on the working directory\nfunc absRelPath(path, wd string) (string, string) {\n\tif filepath.IsAbs(path) {\n\t\trelPath, _ := filepath.Rel(wd, path)\n\t\treturn path, relPath\n\t}\n\n\tabsPath := filepath.Join(wd, path)\n\treturn absPath, path\n}\n\nfunc init() {\n\tRootCmd.AddCommand(trackCmd)\n}\nアー アアアア アーアーpackage commands\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/github\/git-lfs\/lfs\"\n\t\"github.com\/github\/git-lfs\/vendor\/_nuts\/github.com\/spf13\/cobra\"\n)\n\nvar (\n\ttrackCmd = &cobra.Command{\n\t\tUse: \"track\",\n\t\tShort: \"Manipulate .gitattributes\",\n\t\tRun: trackCommand,\n\t}\n)\n\nfunc trackCommand(cmd *cobra.Command, args []string) {\n\tif lfs.LocalGitDir == \"\" {\n\t\tPrint(\"Not a git repository.\")\n\t\tos.Exit(128)\n\t}\n\n\tlfs.InstallHooks(false)\n\tknownPaths := findPaths()\n\n\tif len(args) == 0 {\n\t\tPrint(\"Listing tracked paths\")\n\t\tfor _, t := range knownPaths {\n\t\t\tPrint(\" %s (%s)\", t.Path, t.Source)\n\t\t}\n\t\treturn\n\t}\n\n\taddTrailingLinebreak := needsTrailingLinebreak(\".gitattributes\")\n\tattributesFile, err := os.OpenFile(\".gitattributes\", os.O_RDWR|os.O_APPEND|os.O_CREATE, 0660)\n\tif err != nil {\n\t\tPrint(\"Error opening .gitattributes file\")\n\t\treturn\n\t}\n\tdefer attributesFile.Close()\n\n\tif addTrailingLinebreak {\n\t\tif _, err := attributesFile.WriteString(\"\\n\"); err != nil {\n\t\t\tPrint(\"Error writing to .gitattributes\")\n\t\t}\n\t}\n\n\twd, _ := os.Getwd()\n\nArgsLoop:\n\tfor _, t := range args {\n\t\tabsT, relT := absRelPath(t, wd)\n\n\t\tif !filepath.HasPrefix(absT, lfs.LocalWorkingDir) {\n\t\t\tPrint(\"%s is outside repository\", t)\n\t\t\tos.Exit(128)\n\t\t}\n\n\t\tfor _, k := range knownPaths {\n\t\t\tabsK, _ := absRelPath(k.Path, filepath.Join(wd, filepath.Dir(k.Source)))\n\t\t\tif absT == absK {\n\t\t\t\tPrint(\"%s already supported\", t)\n\t\t\t\tcontinue ArgsLoop\n\t\t\t}\n\t\t}\n\n\t\tencodedArg := strings.Replace(relT, \" \", \"[[:space:]]\", -1)\n\t\t_, err := attributesFile.WriteString(fmt.Sprintf(\"%s filter=lfs diff=lfs merge=lfs -crlf\\n\", encodedArg))\n\t\tif err != nil {\n\t\t\tPrint(\"Error adding path %s\", t)\n\t\t\tcontinue\n\t\t}\n\t\tPrint(\"Tracking %s\", t)\n\t}\n}\n\ntype mediaPath struct {\n\tPath string\n\tSource string\n}\n\nfunc findPaths() []mediaPath {\n\tpaths := make([]mediaPath, 0)\n\twd, _ := os.Getwd()\n\n\tfor _, path := range findAttributeFiles() {\n\t\tattributes, err := os.Open(path)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tscanner := bufio.NewScanner(attributes)\n\t\tfor scanner.Scan() {\n\t\t\tline := scanner.Text()\n\t\t\tif strings.Contains(line, \"filter=lfs\") {\n\t\t\t\tfields := strings.Fields(line)\n\t\t\t\trelPath, _ := filepath.Rel(wd, path)\n\t\t\t\tpaths = append(paths, mediaPath{Path: fields[0], Source: relPath})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn paths\n}\n\nfunc findAttributeFiles() []string {\n\tpaths := make([]string, 0)\n\n\trepoAttributes := filepath.Join(lfs.LocalGitDir, \"info\", \"attributes\")\n\tif info, err := os.Stat(repoAttributes); err == nil && !info.IsDir() {\n\t\tpaths = append(paths, repoAttributes)\n\t}\n\n\tfilepath.Walk(lfs.LocalWorkingDir, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !info.IsDir() && (filepath.Base(path) == \".gitattributes\") {\n\t\t\tpaths = append(paths, path)\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn paths\n}\n\nfunc needsTrailingLinebreak(filename string) bool {\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer file.Close()\n\n\tbuf := make([]byte, 16384)\n\tbytesRead := 0\n\tfor {\n\t\tn, err := file.Read(buf)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn false\n\t\t}\n\t\tbytesRead = n\n\t}\n\n\treturn !strings.HasSuffix(string(buf[0:bytesRead]), \"\\n\")\n}\n\n\/\/ absRelPath takes a path and a working directory and\n\/\/ returns an absolute and a relative representation of path based on the working directory\nfunc absRelPath(path, wd string) (string, string) {\n\tif filepath.IsAbs(path) {\n\t\trelPath, _ := filepath.Rel(wd, path)\n\t\treturn path, relPath\n\t}\n\n\tabsPath := filepath.Join(wd, path)\n\treturn absPath, path\n}\n\nfunc init() {\n\tRootCmd.AddCommand(trackCmd)\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2018 The Doctl Authors All rights reserved.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"sort\"\n\t\"testing\"\n\n\t\"github.com\/digitalocean\/doctl\"\n\t\"github.com\/digitalocean\/doctl\/do\"\n\tdomocks \"github.com\/digitalocean\/doctl\/do\/mocks\"\n\t\"github.com\/digitalocean\/doctl\/pkg\/runner\"\n\t\"github.com\/digitalocean\/doctl\/pkg\/ssh\"\n\t\"github.com\/digitalocean\/godo\"\n\t\"github.com\/spf13\/viper\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nvar (\n\ttestDroplet = do.Droplet{\n\t\tDroplet: &godo.Droplet{\n\t\t\tID: 1,\n\t\t\tImage: &godo.Image{\n\t\t\t\tID: 1,\n\t\t\t\tName: \"an-image\",\n\t\t\t\tDistribution: \"DOOS\",\n\t\t\t},\n\t\t\tName: \"a-droplet\",\n\t\t\tNetworks: &godo.Networks{\n\t\t\t\tV4: []godo.NetworkV4{\n\t\t\t\t\t{IPAddress: \"8.8.8.8\", Type: \"public\"},\n\t\t\t\t\t{IPAddress: \"172.16.1.2\", Type: \"private\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRegion: &godo.Region{\n\t\t\t\tSlug: \"test0\",\n\t\t\t\tName: \"test 0\",\n\t\t\t},\n\t\t},\n\t}\n\n\tanotherTestDroplet = do.Droplet{\n\t\tDroplet: &godo.Droplet{\n\t\t\tID: 3,\n\t\t\tImage: &godo.Image{\n\t\t\t\tID: 1,\n\t\t\t\tName: \"an-image\",\n\t\t\t\tDistribution: \"DOOS\",\n\t\t\t},\n\t\t\tName: \"another-droplet\",\n\t\t\tNetworks: &godo.Networks{\n\t\t\t\tV4: []godo.NetworkV4{\n\t\t\t\t\t{IPAddress: \"8.8.8.9\", Type: \"public\"},\n\t\t\t\t\t{IPAddress: \"172.16.1.4\", Type: \"private\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRegion: &godo.Region{\n\t\t\t\tSlug: \"test0\",\n\t\t\t\tName: \"test 0\",\n\t\t\t},\n\t\t},\n\t}\n\n\ttestPrivateDroplet = do.Droplet{\n\t\tDroplet: &godo.Droplet{\n\t\t\tID: 1,\n\t\t\tImage: &godo.Image{\n\t\t\t\tID: 1,\n\t\t\t\tName: \"an-image\",\n\t\t\t\tDistribution: \"DOOS\",\n\t\t\t},\n\t\t\tName: \"a-droplet\",\n\t\t\tNetworks: &godo.Networks{\n\t\t\t\tV4: []godo.NetworkV4{\n\t\t\t\t\t{IPAddress: \"172.16.1.2\", Type: \"private\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRegion: &godo.Region{\n\t\t\t\tSlug: \"test0\",\n\t\t\t\tName: \"test 0\",\n\t\t\t},\n\t\t},\n\t}\n\n\ttestDropletList = do.Droplets{testDroplet, anotherTestDroplet}\n\ttestPrivateDropletList = do.Droplets{testPrivateDroplet}\n\ttestKernel = do.Kernel{Kernel: &godo.Kernel{ID: 1}}\n\ttestKernelList = do.Kernels{testKernel}\n\ttestFloatingIP = do.FloatingIP{\n\t\tFloatingIP: &godo.FloatingIP{\n\t\t\tDroplet: testDroplet.Droplet,\n\t\t\tRegion: testDroplet.Region,\n\t\t\tIP: \"127.0.0.1\",\n\t\t},\n\t}\n\ttestFloatingIPList = do.FloatingIPs{testFloatingIP}\n\n\ttestSnapshot = do.Snapshot{\n\t\tSnapshot: &godo.Snapshot{\n\t\t\tID: \"1\",\n\t\t\tName: \"test-snapshot\",\n\t\t\tRegions: []string{\"dev0\"},\n\t\t},\n\t}\n\ttestSnapshotSecondary = do.Snapshot{\n\t\tSnapshot: &godo.Snapshot{\n\t\t\tID: \"2\",\n\t\t\tName: \"test-snapshot-2\",\n\t\t\tRegions: []string{\"dev1\", \"dev2\"},\n\t\t},\n\t}\n\n\ttestSnapshotList = do.Snapshots{testSnapshot, testSnapshotSecondary}\n)\n\nfunc assertCommandNames(t *testing.T, cmd *Command, expected ...string) {\n\tvar names []string\n\n\tfor _, c := range cmd.Commands() {\n\t\tnames = append(names, c.Name())\n\t\tif c.Name() == \"list\" {\n\t\t\tassert.Contains(t, c.Aliases, \"ls\", \"Missing 'ls' alias for 'list' command.\")\n\t\t}\n\t}\n\n\tsort.Strings(expected)\n\tsort.Strings(names)\n\tassert.Equal(t, expected, names)\n}\n\ntype testFn func(c *CmdConfig, tm *tcMocks)\n\ntype tcMocks struct {\n\tkeys domocks.KeysService\n\tsizes domocks.SizesService\n\tregions domocks.RegionsService\n\timages domocks.ImagesService\n\timageActions domocks.ImageActionsService\n\tfloatingIPs domocks.FloatingIPsService\n\tfloatingIPActions domocks.FloatingIPActionsService\n\tdroplets domocks.DropletsService\n\tdropletActions domocks.DropletActionsService\n\tdomains domocks.DomainsService\n\tvolumes domocks.VolumesService\n\tvolumeActions domocks.VolumeActionsService\n\tactions domocks.ActionsService\n\taccount domocks.AccountService\n\ttags domocks.TagsService\n\tsnapshots domocks.SnapshotsService\n\tcertificates domocks.CertificatesService\n\tloadBalancers domocks.LoadBalancersService\n\tfirewalls domocks.FirewallsService\n\tcdns domocks.CDNsService\n\tprojects domocks.ProjectsService\n\tkubernetes domocks.KubernetesService\n\tdatabases domocks.DatabasesService\n}\n\nfunc withTestClient(t *testing.T, tFn testFn) {\n\togConfig := doctl.DoitConfig\n\tdefer func() {\n\t\tdoctl.DoitConfig = ogConfig\n\t}()\n\n\tcfg := NewTestConfig()\n\tdoctl.DoitConfig = cfg\n\n\ttm := &tcMocks{}\n\n\tconfig := &CmdConfig{\n\t\tNS: \"test\",\n\t\tDoit: cfg,\n\t\tOut: ioutil.Discard,\n\n\t\t\/\/ can stub this out, since the return is dictated by the mocks.\n\t\tinitServices: func(c *CmdConfig) error { return nil },\n\n\t\tgetContextAccessToken: func() string {\n\t\t\treturn viper.GetString(doctl.ArgAccessToken)\n\t\t},\n\n\t\tsetContextAccessToken: func(token string) {},\n\n\t\tKeys: func() do.KeysService { return &tm.keys },\n\t\tSizes: func() do.SizesService { return &tm.sizes },\n\t\tRegions: func() do.RegionsService { return &tm.regions },\n\t\tImages: func() do.ImagesService { return &tm.images },\n\t\tImageActions: func() do.ImageActionsService { return &tm.imageActions },\n\t\tFloatingIPs: func() do.FloatingIPsService { return &tm.floatingIPs },\n\t\tFloatingIPActions: func() do.FloatingIPActionsService { return &tm.floatingIPActions },\n\t\tDroplets: func() do.DropletsService { return &tm.droplets },\n\t\tDropletActions: func() do.DropletActionsService { return &tm.dropletActions },\n\t\tDomains: func() do.DomainsService { return &tm.domains },\n\t\tActions: func() do.ActionsService { return &tm.actions },\n\t\tAccount: func() do.AccountService { return &tm.account },\n\t\tTags: func() do.TagsService { return &tm.tags },\n\t\tVolumes: func() do.VolumesService { return &tm.volumes },\n\t\tVolumeActions: func() do.VolumeActionsService { return &tm.volumeActions },\n\t\tSnapshots: func() do.SnapshotsService { return &tm.snapshots },\n\t\tCertificates: func() do.CertificatesService { return &tm.certificates },\n\t\tLoadBalancers: func() do.LoadBalancersService { return &tm.loadBalancers },\n\t\tFirewalls: func() do.FirewallsService { return &tm.firewalls },\n\t\tCDNs: func() do.CDNsService { return &tm.cdns },\n\t\tProjects: func() do.ProjectsService { return &tm.projects },\n\t\tKubernetes: func() do.KubernetesService { return &tm.kubernetes },\n\t\tDatabases: func() do.DatabasesService { return &tm.databases },\n\t}\n\n\ttFn(config, tm)\n\n\tassert.True(t, tm.account.AssertExpectations(t))\n\tassert.True(t, tm.actions.AssertExpectations(t))\n\tassert.True(t, tm.certificates.AssertExpectations(t))\n\tassert.True(t, tm.domains.AssertExpectations(t))\n\tassert.True(t, tm.dropletActions.AssertExpectations(t))\n\tassert.True(t, tm.droplets.AssertExpectations(t))\n\tassert.True(t, tm.floatingIPActions.AssertExpectations(t))\n\tassert.True(t, tm.floatingIPs.AssertExpectations(t))\n\tassert.True(t, tm.imageActions.AssertExpectations(t))\n\tassert.True(t, tm.images.AssertExpectations(t))\n\tassert.True(t, tm.regions.AssertExpectations(t))\n\tassert.True(t, tm.sizes.AssertExpectations(t))\n\tassert.True(t, tm.keys.AssertExpectations(t))\n\tassert.True(t, tm.tags.AssertExpectations(t))\n\tassert.True(t, tm.volumes.AssertExpectations(t))\n\tassert.True(t, tm.volumeActions.AssertExpectations(t))\n\tassert.True(t, tm.snapshots.AssertExpectations(t))\n\tassert.True(t, tm.loadBalancers.AssertExpectations(t))\n\tassert.True(t, tm.firewalls.AssertExpectations(t))\n\tassert.True(t, tm.cdns.AssertExpectations(t))\n\tassert.True(t, tm.projects.AssertExpectations(t))\n\tassert.True(t, tm.kubernetes.AssertExpectations(t))\n\tassert.True(t, tm.databases.AssertExpectations(t))\n}\n\ntype TestConfig struct {\n\tSSHFn func(user, host, keyPath string, port int, opts ssh.Options) runner.Runner\n\tv *viper.Viper\n\tIsSetMap map[string]bool\n}\n\nvar _ doctl.Config = &TestConfig{}\n\nfunc NewTestConfig() *TestConfig {\n\treturn &TestConfig{\n\t\tSSHFn: func(u, h, kp string, p int, opts ssh.Options) runner.Runner {\n\t\t\treturn &doctl.MockRunner{}\n\t\t},\n\t\tv: viper.New(),\n\t\tIsSetMap: make(map[string]bool),\n\t}\n}\n\nvar _ doctl.Config = &TestConfig{}\n\nfunc (c *TestConfig) GetGodoClient(trace bool, accessToken string) (*godo.Client, error) {\n\treturn &godo.Client{}, nil\n}\n\nfunc (c *TestConfig) SSH(user, host, keyPath string, port int, opts ssh.Options) runner.Runner {\n\treturn c.SSHFn(user, host, keyPath, port, opts)\n}\n\nfunc (c *TestConfig) Set(ns, key string, val interface{}) {\n\tnskey := fmt.Sprintf(\"%s-%s\", ns, key)\n\tc.v.Set(nskey, val)\n\tc.IsSetMap[key] = true\n}\n\nfunc (c *TestConfig) IsSet(key string) bool {\n\treturn c.IsSetMap[key]\n}\n\nfunc (c *TestConfig) GetString(ns, key string) (string, error) {\n\tnskey := fmt.Sprintf(\"%s-%s\", ns, key)\n\treturn c.v.GetString(nskey), nil\n}\n\nfunc (c *TestConfig) GetInt(ns, key string) (int, error) {\n\tnskey := fmt.Sprintf(\"%s-%s\", ns, key)\n\treturn c.v.GetInt(nskey), nil\n}\n\nfunc (c *TestConfig) GetStringSlice(ns, key string) ([]string, error) {\n\tnskey := fmt.Sprintf(\"%s-%s\", ns, key)\n\treturn c.v.GetStringSlice(nskey), nil\n}\n\nfunc (c *TestConfig) GetBool(ns, key string) (bool, error) {\n\tnskey := fmt.Sprintf(\"%s-%s\", ns, key)\n\treturn c.v.GetBool(nskey), nil\n}\nTest that all 'get' and 'list' commands support the 'format' flag.\/*\nCopyright 2018 The Doctl Authors All rights reserved.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"sort\"\n\t\"testing\"\n\n\t\"github.com\/digitalocean\/doctl\"\n\t\"github.com\/digitalocean\/doctl\/do\"\n\tdomocks \"github.com\/digitalocean\/doctl\/do\/mocks\"\n\t\"github.com\/digitalocean\/doctl\/pkg\/runner\"\n\t\"github.com\/digitalocean\/doctl\/pkg\/ssh\"\n\t\"github.com\/digitalocean\/godo\"\n\t\"github.com\/spf13\/viper\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nvar (\n\ttestDroplet = do.Droplet{\n\t\tDroplet: &godo.Droplet{\n\t\t\tID: 1,\n\t\t\tImage: &godo.Image{\n\t\t\t\tID: 1,\n\t\t\t\tName: \"an-image\",\n\t\t\t\tDistribution: \"DOOS\",\n\t\t\t},\n\t\t\tName: \"a-droplet\",\n\t\t\tNetworks: &godo.Networks{\n\t\t\t\tV4: []godo.NetworkV4{\n\t\t\t\t\t{IPAddress: \"8.8.8.8\", Type: \"public\"},\n\t\t\t\t\t{IPAddress: \"172.16.1.2\", Type: \"private\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRegion: &godo.Region{\n\t\t\t\tSlug: \"test0\",\n\t\t\t\tName: \"test 0\",\n\t\t\t},\n\t\t},\n\t}\n\n\tanotherTestDroplet = do.Droplet{\n\t\tDroplet: &godo.Droplet{\n\t\t\tID: 3,\n\t\t\tImage: &godo.Image{\n\t\t\t\tID: 1,\n\t\t\t\tName: \"an-image\",\n\t\t\t\tDistribution: \"DOOS\",\n\t\t\t},\n\t\t\tName: \"another-droplet\",\n\t\t\tNetworks: &godo.Networks{\n\t\t\t\tV4: []godo.NetworkV4{\n\t\t\t\t\t{IPAddress: \"8.8.8.9\", Type: \"public\"},\n\t\t\t\t\t{IPAddress: \"172.16.1.4\", Type: \"private\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRegion: &godo.Region{\n\t\t\t\tSlug: \"test0\",\n\t\t\t\tName: \"test 0\",\n\t\t\t},\n\t\t},\n\t}\n\n\ttestPrivateDroplet = do.Droplet{\n\t\tDroplet: &godo.Droplet{\n\t\t\tID: 1,\n\t\t\tImage: &godo.Image{\n\t\t\t\tID: 1,\n\t\t\t\tName: \"an-image\",\n\t\t\t\tDistribution: \"DOOS\",\n\t\t\t},\n\t\t\tName: \"a-droplet\",\n\t\t\tNetworks: &godo.Networks{\n\t\t\t\tV4: []godo.NetworkV4{\n\t\t\t\t\t{IPAddress: \"172.16.1.2\", Type: \"private\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRegion: &godo.Region{\n\t\t\t\tSlug: \"test0\",\n\t\t\t\tName: \"test 0\",\n\t\t\t},\n\t\t},\n\t}\n\n\ttestDropletList = do.Droplets{testDroplet, anotherTestDroplet}\n\ttestPrivateDropletList = do.Droplets{testPrivateDroplet}\n\ttestKernel = do.Kernel{Kernel: &godo.Kernel{ID: 1}}\n\ttestKernelList = do.Kernels{testKernel}\n\ttestFloatingIP = do.FloatingIP{\n\t\tFloatingIP: &godo.FloatingIP{\n\t\t\tDroplet: testDroplet.Droplet,\n\t\t\tRegion: testDroplet.Region,\n\t\t\tIP: \"127.0.0.1\",\n\t\t},\n\t}\n\ttestFloatingIPList = do.FloatingIPs{testFloatingIP}\n\n\ttestSnapshot = do.Snapshot{\n\t\tSnapshot: &godo.Snapshot{\n\t\t\tID: \"1\",\n\t\t\tName: \"test-snapshot\",\n\t\t\tRegions: []string{\"dev0\"},\n\t\t},\n\t}\n\ttestSnapshotSecondary = do.Snapshot{\n\t\tSnapshot: &godo.Snapshot{\n\t\t\tID: \"2\",\n\t\t\tName: \"test-snapshot-2\",\n\t\t\tRegions: []string{\"dev1\", \"dev2\"},\n\t\t},\n\t}\n\n\ttestSnapshotList = do.Snapshots{testSnapshot, testSnapshotSecondary}\n)\n\nfunc assertCommandNames(t *testing.T, cmd *Command, expected ...string) {\n\tvar names []string\n\n\tfor _, c := range cmd.Commands() {\n\t\tnames = append(names, c.Name())\n\t\tif c.Name() == \"list\" {\n\t\t\tassert.Contains(t, c.Aliases, \"ls\", \"Missing 'ls' alias for 'list' command.\")\n\t\t\tassert.NotNil(t, c.Flags().Lookup(\"format\"), \"Missing 'format' flag for 'list' command.\")\n\t\t}\n\t\tif c.Name() == \"get\" {\n\t\t\tassert.NotNil(t, c.Flags().Lookup(\"format\"), \"Missing 'format' flag for 'get' command.\")\n\t\t}\n\t}\n\n\tsort.Strings(expected)\n\tsort.Strings(names)\n\tassert.Equal(t, expected, names)\n}\n\ntype testFn func(c *CmdConfig, tm *tcMocks)\n\ntype tcMocks struct {\n\tkeys domocks.KeysService\n\tsizes domocks.SizesService\n\tregions domocks.RegionsService\n\timages domocks.ImagesService\n\timageActions domocks.ImageActionsService\n\tfloatingIPs domocks.FloatingIPsService\n\tfloatingIPActions domocks.FloatingIPActionsService\n\tdroplets domocks.DropletsService\n\tdropletActions domocks.DropletActionsService\n\tdomains domocks.DomainsService\n\tvolumes domocks.VolumesService\n\tvolumeActions domocks.VolumeActionsService\n\tactions domocks.ActionsService\n\taccount domocks.AccountService\n\ttags domocks.TagsService\n\tsnapshots domocks.SnapshotsService\n\tcertificates domocks.CertificatesService\n\tloadBalancers domocks.LoadBalancersService\n\tfirewalls domocks.FirewallsService\n\tcdns domocks.CDNsService\n\tprojects domocks.ProjectsService\n\tkubernetes domocks.KubernetesService\n\tdatabases domocks.DatabasesService\n}\n\nfunc withTestClient(t *testing.T, tFn testFn) {\n\togConfig := doctl.DoitConfig\n\tdefer func() {\n\t\tdoctl.DoitConfig = ogConfig\n\t}()\n\n\tcfg := NewTestConfig()\n\tdoctl.DoitConfig = cfg\n\n\ttm := &tcMocks{}\n\n\tconfig := &CmdConfig{\n\t\tNS: \"test\",\n\t\tDoit: cfg,\n\t\tOut: ioutil.Discard,\n\n\t\t\/\/ can stub this out, since the return is dictated by the mocks.\n\t\tinitServices: func(c *CmdConfig) error { return nil },\n\n\t\tgetContextAccessToken: func() string {\n\t\t\treturn viper.GetString(doctl.ArgAccessToken)\n\t\t},\n\n\t\tsetContextAccessToken: func(token string) {},\n\n\t\tKeys: func() do.KeysService { return &tm.keys },\n\t\tSizes: func() do.SizesService { return &tm.sizes },\n\t\tRegions: func() do.RegionsService { return &tm.regions },\n\t\tImages: func() do.ImagesService { return &tm.images },\n\t\tImageActions: func() do.ImageActionsService { return &tm.imageActions },\n\t\tFloatingIPs: func() do.FloatingIPsService { return &tm.floatingIPs },\n\t\tFloatingIPActions: func() do.FloatingIPActionsService { return &tm.floatingIPActions },\n\t\tDroplets: func() do.DropletsService { return &tm.droplets },\n\t\tDropletActions: func() do.DropletActionsService { return &tm.dropletActions },\n\t\tDomains: func() do.DomainsService { return &tm.domains },\n\t\tActions: func() do.ActionsService { return &tm.actions },\n\t\tAccount: func() do.AccountService { return &tm.account },\n\t\tTags: func() do.TagsService { return &tm.tags },\n\t\tVolumes: func() do.VolumesService { return &tm.volumes },\n\t\tVolumeActions: func() do.VolumeActionsService { return &tm.volumeActions },\n\t\tSnapshots: func() do.SnapshotsService { return &tm.snapshots },\n\t\tCertificates: func() do.CertificatesService { return &tm.certificates },\n\t\tLoadBalancers: func() do.LoadBalancersService { return &tm.loadBalancers },\n\t\tFirewalls: func() do.FirewallsService { return &tm.firewalls },\n\t\tCDNs: func() do.CDNsService { return &tm.cdns },\n\t\tProjects: func() do.ProjectsService { return &tm.projects },\n\t\tKubernetes: func() do.KubernetesService { return &tm.kubernetes },\n\t\tDatabases: func() do.DatabasesService { return &tm.databases },\n\t}\n\n\ttFn(config, tm)\n\n\tassert.True(t, tm.account.AssertExpectations(t))\n\tassert.True(t, tm.actions.AssertExpectations(t))\n\tassert.True(t, tm.certificates.AssertExpectations(t))\n\tassert.True(t, tm.domains.AssertExpectations(t))\n\tassert.True(t, tm.dropletActions.AssertExpectations(t))\n\tassert.True(t, tm.droplets.AssertExpectations(t))\n\tassert.True(t, tm.floatingIPActions.AssertExpectations(t))\n\tassert.True(t, tm.floatingIPs.AssertExpectations(t))\n\tassert.True(t, tm.imageActions.AssertExpectations(t))\n\tassert.True(t, tm.images.AssertExpectations(t))\n\tassert.True(t, tm.regions.AssertExpectations(t))\n\tassert.True(t, tm.sizes.AssertExpectations(t))\n\tassert.True(t, tm.keys.AssertExpectations(t))\n\tassert.True(t, tm.tags.AssertExpectations(t))\n\tassert.True(t, tm.volumes.AssertExpectations(t))\n\tassert.True(t, tm.volumeActions.AssertExpectations(t))\n\tassert.True(t, tm.snapshots.AssertExpectations(t))\n\tassert.True(t, tm.loadBalancers.AssertExpectations(t))\n\tassert.True(t, tm.firewalls.AssertExpectations(t))\n\tassert.True(t, tm.cdns.AssertExpectations(t))\n\tassert.True(t, tm.projects.AssertExpectations(t))\n\tassert.True(t, tm.kubernetes.AssertExpectations(t))\n\tassert.True(t, tm.databases.AssertExpectations(t))\n}\n\ntype TestConfig struct {\n\tSSHFn func(user, host, keyPath string, port int, opts ssh.Options) runner.Runner\n\tv *viper.Viper\n\tIsSetMap map[string]bool\n}\n\nvar _ doctl.Config = &TestConfig{}\n\nfunc NewTestConfig() *TestConfig {\n\treturn &TestConfig{\n\t\tSSHFn: func(u, h, kp string, p int, opts ssh.Options) runner.Runner {\n\t\t\treturn &doctl.MockRunner{}\n\t\t},\n\t\tv: viper.New(),\n\t\tIsSetMap: make(map[string]bool),\n\t}\n}\n\nvar _ doctl.Config = &TestConfig{}\n\nfunc (c *TestConfig) GetGodoClient(trace bool, accessToken string) (*godo.Client, error) {\n\treturn &godo.Client{}, nil\n}\n\nfunc (c *TestConfig) SSH(user, host, keyPath string, port int, opts ssh.Options) runner.Runner {\n\treturn c.SSHFn(user, host, keyPath, port, opts)\n}\n\nfunc (c *TestConfig) Set(ns, key string, val interface{}) {\n\tnskey := fmt.Sprintf(\"%s-%s\", ns, key)\n\tc.v.Set(nskey, val)\n\tc.IsSetMap[key] = true\n}\n\nfunc (c *TestConfig) IsSet(key string) bool {\n\treturn c.IsSetMap[key]\n}\n\nfunc (c *TestConfig) GetString(ns, key string) (string, error) {\n\tnskey := fmt.Sprintf(\"%s-%s\", ns, key)\n\treturn c.v.GetString(nskey), nil\n}\n\nfunc (c *TestConfig) GetInt(ns, key string) (int, error) {\n\tnskey := fmt.Sprintf(\"%s-%s\", ns, key)\n\treturn c.v.GetInt(nskey), nil\n}\n\nfunc (c *TestConfig) GetStringSlice(ns, key string) ([]string, error) {\n\tnskey := fmt.Sprintf(\"%s-%s\", ns, key)\n\treturn c.v.GetStringSlice(nskey), nil\n}\n\nfunc (c *TestConfig) GetBool(ns, key string) (bool, error) {\n\tnskey := fmt.Sprintf(\"%s-%s\", ns, key)\n\treturn c.v.GetBool(nskey), nil\n}\n<|endoftext|>"} {"text":"package std\n\nimport (\n\t\"math\"\n\n\t\"github.com\/DeedleFake\/wdte\"\n)\n\nfunc save(f wdte.Func, saved ...wdte.Func) wdte.Func {\n\treturn wdte.GoFunc(func(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\t\treturn f.Call(frame, append(saved, args...)...)\n\t})\n}\n\n\/\/ Add returns the sum of its arguments. If called with only 1\n\/\/ argument, it returns a function which adds arguments given to that\n\/\/ one argument.\nfunc Add(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn wdte.GoFunc(Add)\n\n\tcase 1:\n\t\treturn save(wdte.GoFunc(Add), args[0])\n\t}\n\n\tvar sum wdte.Number\n\tfor _, arg := range args {\n\t\tsum += arg.Call(frame).(wdte.Number)\n\t}\n\treturn sum\n}\n\n\/\/ Sub returns args[0] - args[1]. If called with only 1 argument, it\n\/\/ returns a function which returns that argument minus the argument\n\/\/ given.\nfunc Sub(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn wdte.GoFunc(Sub)\n\n\tcase 1:\n\t\treturn save(wdte.GoFunc(Sub), args[0])\n\t}\n\n\ta1 := args[0].Call(frame).(wdte.Number)\n\ta2 := args[1].Call(frame).(wdte.Number)\n\treturn a1 - a2\n}\n\n\/\/ Mult returns the product of its arguments. If called with only 1\n\/\/ argument, it returns a function that multiplies that argument by\n\/\/ its own arguments.\nfunc Mult(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn wdte.GoFunc(Mult)\n\n\tcase 1:\n\t\treturn save(wdte.GoFunc(Mult), args[0])\n\t}\n\n\tp := wdte.Number(1)\n\tfor _, arg := range args {\n\t\tp *= arg.Call(frame).(wdte.Number)\n\t}\n\treturn p\n}\n\n\/\/ Div returns args[0] \/ args[1]. If called with only 1 argument, it\n\/\/ returns a function which divides its own argument by the original\n\/\/ argument.\nfunc Div(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn wdte.GoFunc(Div)\n\n\tcase 1:\n\t\treturn save(wdte.GoFunc(Div), args[0])\n\t}\n\n\ta1 := args[0].Call(frame).(wdte.Number)\n\ta2 := args[1].Call(frame).(wdte.Number)\n\treturn a1 \/ a2\n}\n\n\/\/ Mod returns args[0] % args[1]. If called with only 1 argument, it\n\/\/ returns a function which divides its own argument by the original\n\/\/ argument.\nfunc Mod(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn wdte.GoFunc(Mod)\n\n\tcase 1:\n\t\treturn save(wdte.GoFunc(Mod), args[0])\n\t}\n\n\ta1 := args[0].Call(frame).(wdte.Number)\n\ta2 := args[1].Call(frame).(wdte.Number)\n\treturn wdte.Number(math.Mod(float64(a1), float64(a2)))\n}\n\n\/\/ Insert adds the functions in this package to m. It maps them to the\n\/\/ corresponding mathematical operators. For example, Add() becomes\n\/\/ `+`, Sub() becomes `-`, and so on.\nfunc Insert(m *wdte.Module) {\n\tm.Funcs[\"+\"] = wdte.GoFunc(Add)\n\tm.Funcs[\"-\"] = wdte.GoFunc(Sub)\n\tm.Funcs[\"*\"] = wdte.GoFunc(Mult)\n\tm.Funcs[\"\/\"] = wdte.GoFunc(Div)\n\tm.Funcs[\"%\"] = wdte.GoFunc(Mod)\n}\nstd: Update std to use Frame.package std\n\nimport (\n\t\"math\"\n\n\t\"github.com\/DeedleFake\/wdte\"\n)\n\nfunc save(f wdte.Func, saved ...wdte.Func) wdte.Func {\n\treturn wdte.GoFunc(func(frame wdte.Frame, args ...wdte.Func) wdte.Func {\n\t\treturn f.Call(frame, append(saved, args...)...)\n\t})\n}\n\n\/\/ Add returns the sum of its arguments. If called with only 1\n\/\/ argument, it returns a function which adds arguments given to that\n\/\/ one argument.\nfunc Add(frame wdte.Frame, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn wdte.GoFunc(Add)\n\n\tcase 1:\n\t\treturn save(wdte.GoFunc(Add), args[0])\n\t}\n\n\tvar sum wdte.Number\n\tfor _, arg := range args {\n\t\tsum += arg.Call(frame).(wdte.Number)\n\t}\n\treturn sum\n}\n\n\/\/ Sub returns args[0] - args[1]. If called with only 1 argument, it\n\/\/ returns a function which returns that argument minus the argument\n\/\/ given.\nfunc Sub(frame wdte.Frame, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn wdte.GoFunc(Sub)\n\n\tcase 1:\n\t\treturn save(wdte.GoFunc(Sub), args[0])\n\t}\n\n\ta1 := args[0].Call(frame).(wdte.Number)\n\ta2 := args[1].Call(frame).(wdte.Number)\n\treturn a1 - a2\n}\n\n\/\/ Mult returns the product of its arguments. If called with only 1\n\/\/ argument, it returns a function that multiplies that argument by\n\/\/ its own arguments.\nfunc Mult(frame wdte.Frame, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn wdte.GoFunc(Mult)\n\n\tcase 1:\n\t\treturn save(wdte.GoFunc(Mult), args[0])\n\t}\n\n\tp := wdte.Number(1)\n\tfor _, arg := range args {\n\t\tp *= arg.Call(frame).(wdte.Number)\n\t}\n\treturn p\n}\n\n\/\/ Div returns args[0] \/ args[1]. If called with only 1 argument, it\n\/\/ returns a function which divides its own argument by the original\n\/\/ argument.\nfunc Div(frame wdte.Frame, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn wdte.GoFunc(Div)\n\n\tcase 1:\n\t\treturn save(wdte.GoFunc(Div), args[0])\n\t}\n\n\ta1 := args[0].Call(frame).(wdte.Number)\n\ta2 := args[1].Call(frame).(wdte.Number)\n\treturn a1 \/ a2\n}\n\n\/\/ Mod returns args[0] % args[1]. If called with only 1 argument, it\n\/\/ returns a function which divides its own argument by the original\n\/\/ argument.\nfunc Mod(frame wdte.Frame, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn wdte.GoFunc(Mod)\n\n\tcase 1:\n\t\treturn save(wdte.GoFunc(Mod), args[0])\n\t}\n\n\ta1 := args[0].Call(frame).(wdte.Number)\n\ta2 := args[1].Call(frame).(wdte.Number)\n\treturn wdte.Number(math.Mod(float64(a1), float64(a2)))\n}\n\n\/\/ Insert adds the functions in this package to m. It maps them to the\n\/\/ corresponding mathematical operators. For example, Add() becomes\n\/\/ `+`, Sub() becomes `-`, and so on.\nfunc Insert(m *wdte.Module) {\n\tm.Funcs[\"+\"] = wdte.GoFunc(Add)\n\tm.Funcs[\"-\"] = wdte.GoFunc(Sub)\n\tm.Funcs[\"*\"] = wdte.GoFunc(Mult)\n\tm.Funcs[\"\/\"] = wdte.GoFunc(Div)\n\tm.Funcs[\"%\"] = wdte.GoFunc(Mod)\n}\n<|endoftext|>"} {"text":"package hpkp\n\nimport (\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ MemStorage is threadsafe hpkp host storage backed by an in-memory map\ntype MemStorage struct {\n\tdomains map[string]Header\n\tmutex sync.Mutex\n}\n\n\/\/ NewMemStorage initializes hpkp in-memory datastructure\nfunc NewMemStorage() *MemStorage {\n\tm := &MemStorage{}\n\tm.domains = make(map[string]Header)\n\treturn m\n}\n\n\/\/ Lookup returns the corresponding hpkp header information for a given host\nfunc (s *MemStorage) Lookup(host string) *Header {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\td, ok := s.domains[host]\n\tif ok {\n\t\treturn copy(d)\n\t}\n\n\t\/\/ is h a subdomain of an hpkp domain, walk the domain to see if it is a sub\n\t\/\/ sub ... sub domain of a domain that has the `includeSubDomains` rule\n\tl := len(host)\n\tfor l > 0 {\n\t\ti := strings.Index(host, \".\")\n\t\tif i > 0 {\n\t\t\thost = host[i+1:]\n\t\t\td, ok := s.domains[host]\n\t\t\tif ok {\n\t\t\t\tif d.IncludeSubDomains {\n\t\t\t\t\treturn copy(d)\n\t\t\t\t}\n\t\t\t}\n\t\t\tl = len(host)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc copy(h Header) *Header {\n\td := h\n\treturn &d\n}\n\n\/\/ Add a domain to hpkp storage\nfunc (s *MemStorage) Add(host string, d *Header) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\tif s.domains == nil {\n\t\ts.domains = make(map[string]Header)\n\t}\n\n\tif d.MaxAge == 0 && !d.Permanent {\n\t\tcheck, ok := s.domains[host]\n\t\tif ok {\n\t\t\tif !check.Permanent {\n\t\t\t\tdelete(s.domains, host)\n\t\t\t}\n\t\t}\n\t} else {\n\t\ts.domains[host] = *d\n\t}\n}\nstorage: construct to return interface instead of structpackage hpkp\n\nimport (\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ MemStorage is threadsafe hpkp host storage backed by an in-memory map\ntype MemStorage struct {\n\tdomains map[string]Header\n\tmutex sync.Mutex\n}\n\n\/\/ NewMemStorage initializes hpkp in-memory datastructure\nfunc NewMemStorage() Storage {\n\tm := &MemStorage{}\n\tm.domains = make(map[string]Header)\n\treturn m\n}\n\n\/\/ Lookup returns the corresponding hpkp header information for a given host\nfunc (s *MemStorage) Lookup(host string) *Header {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\td, ok := s.domains[host]\n\tif ok {\n\t\treturn copy(d)\n\t}\n\n\t\/\/ is h a subdomain of an hpkp domain, walk the domain to see if it is a sub\n\t\/\/ sub ... sub domain of a domain that has the `includeSubDomains` rule\n\tl := len(host)\n\tfor l > 0 {\n\t\ti := strings.Index(host, \".\")\n\t\tif i > 0 {\n\t\t\thost = host[i+1:]\n\t\t\td, ok := s.domains[host]\n\t\t\tif ok {\n\t\t\t\tif d.IncludeSubDomains {\n\t\t\t\t\treturn copy(d)\n\t\t\t\t}\n\t\t\t}\n\t\t\tl = len(host)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc copy(h Header) *Header {\n\td := h\n\treturn &d\n}\n\n\/\/ Add a domain to hpkp storage\nfunc (s *MemStorage) Add(host string, d *Header) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\tif s.domains == nil {\n\t\ts.domains = make(map[string]Header)\n\t}\n\n\tif d.MaxAge == 0 && !d.Permanent {\n\t\tcheck, ok := s.domains[host]\n\t\tif ok {\n\t\t\tif !check.Permanent {\n\t\t\t\tdelete(s.domains, host)\n\t\t\t}\n\t\t}\n\t} else {\n\t\ts.domains[host] = *d\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2018 The Ebiten Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage devicescale\n\nimport (\n\t\"sync\"\n)\n\ntype pos struct {\n\tx, y int\n}\n\nvar (\n\tm sync.Mutex\n\tcache = map[pos]float64{}\n)\n\n\/\/ GetAt returns the device scale at (x, y).\n\/\/ x and y are in device-dependent pixels.\nfunc GetAt(x, y int) float64 {\n\tm.Lock()\n\tdefer m.Unlock()\n\tif s, ok := cache[pos{x, y}]; ok {\n\t\treturn s\n\t}\n\ts := impl(x, y)\n\tcache[pos{x, y}] = s\n\treturn s\n}\ninternal\/devicescale: Add comment about #1573\/\/ Copyright 2018 The Ebiten Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage devicescale\n\nimport (\n\t\"sync\"\n)\n\ntype pos struct {\n\tx, y int\n}\n\nvar (\n\tm sync.Mutex\n\tcache = map[pos]float64{}\n)\n\n\/\/ GetAt returns the device scale at (x, y).\n\/\/ x and y are in device-dependent pixels.\nfunc GetAt(x, y int) float64 {\n\tm.Lock()\n\tdefer m.Unlock()\n\tif s, ok := cache[pos{x, y}]; ok {\n\t\treturn s\n\t}\n\ts := impl(x, y)\n\tcache[pos{x, y}] = s\n\n\t\/\/ TODO: Provide a way to invalidate the cache, or move the cache.\n\t\/\/ The device scale can vary even for the same monitor.\n\t\/\/ The only known case is when the application works on macOS, with OpenGL, with a wider screen mode,\n\t\/\/ and in the fullscreen mode (#1573).\n\n\treturn s\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2017 by caixw, All rights reserved.\n\/\/ Use of this source code is governed by a MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage entry\n\nimport (\n\tstdregexp \"regexp\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/issue9\/assert\"\n\t\"github.com\/issue9\/mux\/internal\/syntax\"\n)\n\nvar _ Entry = ®exp{}\n\nfunc TestNewRegexp(t *testing.T) {\n\ta := assert.New(t)\n\n\tpattern := \"\/posts\/{id:\\\\d+}\"\n\tr, err := newRegexp(&syntax.Syntax{\n\t\tPattern: pattern,\n\t\tHasParams: true,\n\t\tType: syntax.TypeRegexp,\n\t\tPatterns: []string{\"\/posts\/\", \"(?P\\\\d+)\"},\n\t})\n\ta.NotError(err).NotNil(r)\n\ta.Equal(r.pattern, pattern)\n\ta.Equal(r.expr.String(), \"\/posts\/(?P\\\\d+)\")\n\n\tpattern = \"\/posts\/{id}\/page\/{page:\\\\d+}\/size\/{:\\\\d+}\"\n\tr, err = newRegexp(&syntax.Syntax{\n\t\tPattern: pattern,\n\t\tHasParams: true,\n\t\tType: syntax.TypeRegexp,\n\t\tPatterns: []string{\"\/posts\/\", \"(?P[^\/]+)\", \"\/page\/\", \"(?P\\\\d+)\", \"\/size\/\", \"(\\\\d+)\"},\n\t})\n\ta.NotError(err).NotNil(r)\n\ta.Equal(r.pattern, pattern)\n\ta.Equal(r.expr.String(), \"\/posts\/(?P[^\/]+)\/page\/(?P\\\\d+)\/size\/(\\\\d+)\")\n}\n\nfunc TestRegexp_Match(t *testing.T) {\n\ta := assert.New(t)\n\n\tnewMatcher(a, \"\/posts\/{id:\\\\d+}\").\n\t\tTrue(\"\/posts\/1\", map[string]string{\"id\": \"1\"}).\n\t\tFalse(\"\/posts\/\", nil).\n\t\tFalse(\"\/posts\", nil).\n\t\tFalse(\"\/posts\/id\", nil).\n\t\tFalse(\"\/posts\/id.html\/\", nil).\n\t\tFalse(\"\/posts\/id.html\/page\", nil).\n\t\tFalse(\"\/post\/id\", nil)\n\n\tnewMatcher(a, \"\/posts\/{id:\\\\d+}.html\").\n\t\tTrue(\"\/posts\/1.html\", map[string]string{\"id\": \"1\"}).\n\t\tFalse(\"\/posts\/\", nil).\n\t\tFalse(\"\/posts\", nil).\n\t\tFalse(\"\/posts\/id\", nil).\n\t\tFalse(\"\/posts\/id.html\", nil).\n\t\tFalse(\"\/posts\/id.html\/page\", nil).\n\t\tFalse(\"\/post\/id\", nil)\n\n\tnewMatcher(a, \"\/posts\/{id:[^\/]+}.html\").\n\t\tTrue(\"\/posts\/a.b.html\", map[string]string{\"id\": \"a.b\"})\n\n\tnewMatcher(a, \"\/posts\/{id}\/page\/{page:\\\\d+}\").\n\t\tTrue(\"\/posts\/1\/page\/1\", map[string]string{\"id\": \"1\", \"page\": \"1\"}).\n\t\tTrue(\"\/posts\/1.html\/page\/1\", map[string]string{\"id\": \"1.html\", \"page\": \"1\"}).\n\t\tFalse(\"\/posts\/1.html\/page\/x\", nil).\n\t\tFalse(\"\/posts\/id-1\/page\/1\/\", nil).\n\t\tFalse(\"\/posts\/id-1\/page\/1\/size\/1\", nil)\n\n\t\/\/ size 为未命名参数\n\tnewMatcher(a, \"\/posts\/{id}\/page\/{page:\\\\d+}\/size\/{:\\\\d+}\").\n\t\tTrue(\"\/posts\/1.html\/page\/1\/size\/11\", map[string]string{\"id\": \"1.html\", \"page\": \"1\"}).\n\t\tFalse(\"\/posts\/1.html\/page\/x\/size\/11\", nil)\n\n\tnewMatcher(a, \"\/users\/{user:\\\\w+}\/{repos}\/pulls\").\n\t\tFalse(\"\/users\/user\/repos\/pulls\/number\", nil)\n}\n\nfunc TestRegexp_match_wildcard(t *testing.T) {\n\ta := assert.New(t)\n\n\tnewMatcher(a, \"\/posts\/{id:\\\\d+}\/*\").\n\t\tFalse(\"\/posts\/1\", nil).\n\t\tFalse(\"\/posts\", nil).\n\t\tTrue(\"\/posts\/1\/\", map[string]string{\"id\": \"1\"}).\n\t\tTrue(\"\/posts\/1\/index.html\", map[string]string{\"id\": \"1\"}).\n\t\tFalse(\"\/posts\/id.html\/page\", nil)\n\n\tnewMatcher(a, \"\/posts\/{id}\/page\/{page:\\\\d+}\/*\").\n\t\tFalse(\"\/posts\/1\/page\/1\", nil).\n\t\tTrue(\"\/posts\/1.html\/page\/1\/\", map[string]string{\"id\": \"1.html\", \"page\": \"1\"}).\n\t\tTrue(\"\/posts\/1.html\/page\/1\/index.html\", map[string]string{\"id\": \"1.html\", \"page\": \"1\"}).\n\t\tFalse(\"\/posts\/1.html\/page\/x\/index.html\", nil)\n\n\t\/\/ size 为未命名参数\n\tnewMatcher(a, \"\/posts\/{id}\/page\/{page:\\\\d+}\/size\/{:\\\\d+}\/*\").\n\t\tFalse(\"\/posts\/1.html\/page\/1\/size\/1\", nil).\n\t\tTrue(\"\/posts\/1.html\/page\/1\/size\/1\/index.html\", map[string]string{\"id\": \"1.html\", \"page\": \"1\"})\n}\n\nfunc TestRegexp_URL(t *testing.T) {\n\ta := assert.New(t)\n\tn, err := New(\"\/posts\/{id:[^\/]+}\")\n\ta.NotError(err).NotNil(n)\n\turl, err := n.URL(map[string]string{\"id\": \"5.html\"}, \"path\")\n\ta.NotError(err).Equal(url, \"\/posts\/5.html\")\n\turl, err = n.URL(map[string]string{\"id\": \"5.html\/\"}, \"path\")\n\ta.NotError(err).Equal(url, \"\/posts\/5.html\/\")\n\n\tn, err = New(\"\/posts\/{id:[^\/]+}\/page\/{page}\")\n\turl, err = n.URL(map[string]string{\"id\": \"5.html\", \"page\": \"1\"}, \"path\")\n\ta.NotError(err).Equal(url, \"\/posts\/5.html\/page\/1\")\n\n\t\/\/ 少参数\n\turl, err = n.URL(map[string]string{\"id\": \"5.html\"}, \"path\")\n\ta.Error(err).Equal(url, \"\")\n\n\t\/\/ 带有未命名参数\n\tn, err = New(\"\/posts\/{id}\/page\/{page:\\\\d+}\/size\/{:\\\\d+}\")\n\turl, err = n.URL(map[string]string{\"id\": \"5.html\", \"page\": \"1\"}, \"path\")\n\ta.NotError(err).Equal(url, \"\/posts\/5.html\/page\/1\/size\/[0-9]+\")\n\n\t\/\/ 带通配符\n\tn, err = New(\"\/posts\/{id:[^\/]+}\/page\/{page}\/*\")\n\turl, err = n.URL(map[string]string{\"id\": \"5.html\", \"page\": \"1\"}, \"path\")\n\ta.NotError(err).Equal(url, \"\/posts\/5.html\/page\/1\/path\")\n\n\t\/\/ 指定了空的 path\n\turl, err = n.URL(map[string]string{\"id\": \"5.html\", \"page\": \"1\"}, \"\")\n\ta.NotError(err).Equal(url, \"\/posts\/5.html\/page\/1\/\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ 以下为一个性能测试用,用于验证将一个正则表达式折分成多个\n\/\/ 和不折分,哪个性能下高一点\n\n\/\/ 测试用内容,键名为正则,键值为或匹配的值\nvar regexpStrs = map[string]string{\n\t\"\/blog\/posts\/\": \"\/blog\/posts\/\",\n\t\"(?P\\\\d+)\": \"100\",\n\t\"\/page\/\": \"\/page\/\",\n\t\"(?P\\\\d+)\": \"100\",\n\t\"\/size\/\": \"\/size\/\",\n\t\"(?P\\\\d+)\": \"100\",\n}\n\n\/\/ 将所有的内容当作一条正则进行处理\nfunc BenchmarkRegexp_One(b *testing.B) {\n\ta := assert.New(b)\n\n\tregstr := \"\"\n\tmatch := \"\"\n\tfor k, v := range regexpStrs {\n\t\tregstr += k\n\t\tmatch += v\n\t}\n\n\texpr, err := stdregexp.Compile(regstr)\n\ta.NotError(err).NotNil(expr)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tloc := expr.FindStringIndex(match)\n\t\tif loc == nil || loc[0] != 0 {\n\t\t\tb.Error(\"BenchmarkBasic_Match:error\")\n\t\t}\n\t}\n}\n\n\/\/ 将内容细分,仅将其中的正则部分处理成正则表达式,其它的仍然以字符串作比较\n\/\/\n\/\/ 目前看来,仅在只有一条正则夹在其中的时候,才有一占点优势,否则可能更慢。\nfunc BenchmarkRegexp_Mult(b *testing.B) {\n\ttype item struct {\n\t\tpattern string\n\t\texpr *stdregexp.Regexp\n\t}\n\n\titems := make([]*item, 0, len(regexpStrs))\n\n\tmatch := \"\"\n\tfor k, v := range regexpStrs {\n\t\tif strings.IndexByte(k, '?') >= 0 {\n\t\t\titems = append(items, &item{expr: stdregexp.MustCompile(k)})\n\t\t} else {\n\t\t\titems = append(items, &item{pattern: k})\n\t\t}\n\t\tmatch += v\n\t}\n\n\ttest := func(path string) bool {\n\t\tfor _, i := range items {\n\t\t\tif i.expr == nil {\n\t\t\t\tif !strings.HasPrefix(path, i.pattern) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tpath = path[len(i.pattern):]\n\t\t\t} else {\n\t\t\t\tloc := i.expr.FindStringIndex(path)\n\t\t\t\tif loc == nil || loc[0] != 0 {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tpath = path[loc[1]:]\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tif !test(match) {\n\t\t\tb.Error(\"er\")\n\t\t}\n\t}\n}\n[internal\/entry] 添加测试内容\/\/ Copyright 2017 by caixw, All rights reserved.\n\/\/ Use of this source code is governed by a MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage entry\n\nimport (\n\tstdregexp \"regexp\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/issue9\/assert\"\n\t\"github.com\/issue9\/mux\/internal\/syntax\"\n)\n\nvar _ Entry = ®exp{}\n\nfunc TestNewRegexp(t *testing.T) {\n\ta := assert.New(t)\n\n\tpattern := \"\/posts\/{id:\\\\d+}\"\n\tr, err := newRegexp(&syntax.Syntax{\n\t\tPattern: pattern,\n\t\tHasParams: true,\n\t\tType: syntax.TypeRegexp,\n\t\tPatterns: []string{\"\/posts\/\", \"(?P\\\\d+)\"},\n\t})\n\ta.NotError(err).NotNil(r)\n\ta.Equal(r.pattern, pattern)\n\ta.Equal(r.expr.String(), \"\/posts\/(?P\\\\d+)\")\n\n\tpattern = \"\/posts\/{id}\/page\/{page:\\\\d+}\/size\/{:\\\\d+}\"\n\tr, err = newRegexp(&syntax.Syntax{\n\t\tPattern: pattern,\n\t\tHasParams: true,\n\t\tType: syntax.TypeRegexp,\n\t\tPatterns: []string{\"\/posts\/\", \"(?P[^\/]+)\", \"\/page\/\", \"(?P\\\\d+)\", \"\/size\/\", \"(\\\\d+)\"},\n\t})\n\ta.NotError(err).NotNil(r)\n\ta.Equal(r.pattern, pattern)\n\ta.Equal(r.expr.String(), \"\/posts\/(?P[^\/]+)\/page\/(?P\\\\d+)\/size\/(\\\\d+)\")\n}\n\nfunc TestRegexp_Match(t *testing.T) {\n\ta := assert.New(t)\n\n\tnewMatcher(a, \"\/posts\/{id:\\\\d+}\").\n\t\tTrue(\"\/posts\/1\", map[string]string{\"id\": \"1\"}).\n\t\tFalse(\"\/posts\/\", nil).\n\t\tFalse(\"\/posts\", nil).\n\t\tFalse(\"\/posts\/id\", nil).\n\t\tFalse(\"\/posts\/id.html\/\", nil).\n\t\tFalse(\"\/posts\/id.html\/page\", nil).\n\t\tFalse(\"\/post\/id\", nil)\n\n\tnewMatcher(a, \"\/posts\/{id:\\\\d+}.html\").\n\t\tTrue(\"\/posts\/1.html\", map[string]string{\"id\": \"1\"}).\n\t\tFalse(\"\/posts\/\", nil).\n\t\tFalse(\"\/posts\", nil).\n\t\tFalse(\"\/posts\/id\", nil).\n\t\tFalse(\"\/posts\/id.html\", nil).\n\t\tFalse(\"\/posts\/id.html\/page\", nil).\n\t\tFalse(\"\/post\/id\", nil)\n\n\tnewMatcher(a, \"\/posts\/{id:[^\/]+}.html\").\n\t\tTrue(\"\/posts\/a.b.html\", map[string]string{\"id\": \"a.b\"})\n\n\tnewMatcher(a, \"\/posts\/{id}\/page\/{page:\\\\d+}\").\n\t\tTrue(\"\/posts\/1\/page\/1\", map[string]string{\"id\": \"1\", \"page\": \"1\"}).\n\t\tTrue(\"\/posts\/1.html\/page\/1\", map[string]string{\"id\": \"1.html\", \"page\": \"1\"}).\n\t\tFalse(\"\/posts\/1.html\/page\/x\", nil).\n\t\tFalse(\"\/posts\/id-1\/page\/1\/\", nil).\n\t\tFalse(\"\/posts\/id-1\/page\/1\/size\/1\", nil)\n\n\tnewMatcher(a, \"\/posts\/{id:\\\\w+}{page:\\\\d+}\").\n\t\tTrue(\"\/posts\/aa1\", map[string]string{\"id\": \"aa\", \"page\": \"1\"})\n\n\t\/\/ size 为未命名参数\n\tnewMatcher(a, \"\/posts\/{id}\/page\/{page:\\\\d+}\/size\/{:\\\\d+}\").\n\t\tTrue(\"\/posts\/1.html\/page\/1\/size\/11\", map[string]string{\"id\": \"1.html\", \"page\": \"1\"}).\n\t\tFalse(\"\/posts\/1.html\/page\/x\/size\/11\", nil)\n\n\tnewMatcher(a, \"\/users\/{user:\\\\w+}\/{repos}\/pulls\").\n\t\tFalse(\"\/users\/user\/repos\/pulls\/number\", nil)\n}\n\nfunc TestRegexp_match_wildcard(t *testing.T) {\n\ta := assert.New(t)\n\n\tnewMatcher(a, \"\/posts\/{id:\\\\d+}\/*\").\n\t\tFalse(\"\/posts\/1\", nil).\n\t\tFalse(\"\/posts\", nil).\n\t\tTrue(\"\/posts\/1\/\", map[string]string{\"id\": \"1\"}).\n\t\tTrue(\"\/posts\/1\/index.html\", map[string]string{\"id\": \"1\"}).\n\t\tFalse(\"\/posts\/id.html\/page\", nil)\n\n\tnewMatcher(a, \"\/posts\/{id}\/page\/{page:\\\\d+}\/*\").\n\t\tFalse(\"\/posts\/1\/page\/1\", nil).\n\t\tTrue(\"\/posts\/1.html\/page\/1\/\", map[string]string{\"id\": \"1.html\", \"page\": \"1\"}).\n\t\tTrue(\"\/posts\/1.html\/page\/1\/index.html\", map[string]string{\"id\": \"1.html\", \"page\": \"1\"}).\n\t\tFalse(\"\/posts\/1.html\/page\/x\/index.html\", nil)\n\n\t\/\/ size 为未命名参数\n\tnewMatcher(a, \"\/posts\/{id}\/page\/{page:\\\\d+}\/size\/{:\\\\d+}\/*\").\n\t\tFalse(\"\/posts\/1.html\/page\/1\/size\/1\", nil).\n\t\tTrue(\"\/posts\/1.html\/page\/1\/size\/1\/index.html\", map[string]string{\"id\": \"1.html\", \"page\": \"1\"})\n}\n\nfunc TestRegexp_URL(t *testing.T) {\n\ta := assert.New(t)\n\tn, err := New(\"\/posts\/{id:[^\/]+}\")\n\ta.NotError(err).NotNil(n)\n\turl, err := n.URL(map[string]string{\"id\": \"5.html\"}, \"path\")\n\ta.NotError(err).Equal(url, \"\/posts\/5.html\")\n\turl, err = n.URL(map[string]string{\"id\": \"5.html\/\"}, \"path\")\n\ta.NotError(err).Equal(url, \"\/posts\/5.html\/\")\n\n\tn, err = New(\"\/posts\/{id:[^\/]+}\/page\/{page}\")\n\turl, err = n.URL(map[string]string{\"id\": \"5.html\", \"page\": \"1\"}, \"path\")\n\ta.NotError(err).Equal(url, \"\/posts\/5.html\/page\/1\")\n\n\t\/\/ 少参数\n\turl, err = n.URL(map[string]string{\"id\": \"5.html\"}, \"path\")\n\ta.Error(err).Equal(url, \"\")\n\n\t\/\/ 带有未命名参数\n\tn, err = New(\"\/posts\/{id}\/page\/{page:\\\\d+}\/size\/{:\\\\d+}\")\n\turl, err = n.URL(map[string]string{\"id\": \"5.html\", \"page\": \"1\"}, \"path\")\n\ta.NotError(err).Equal(url, \"\/posts\/5.html\/page\/1\/size\/[0-9]+\")\n\n\t\/\/ 带通配符\n\tn, err = New(\"\/posts\/{id:[^\/]+}\/page\/{page}\/*\")\n\turl, err = n.URL(map[string]string{\"id\": \"5.html\", \"page\": \"1\"}, \"path\")\n\ta.NotError(err).Equal(url, \"\/posts\/5.html\/page\/1\/path\")\n\n\t\/\/ 指定了空的 path\n\turl, err = n.URL(map[string]string{\"id\": \"5.html\", \"page\": \"1\"}, \"\")\n\ta.NotError(err).Equal(url, \"\/posts\/5.html\/page\/1\/\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ 以下为一个性能测试用,用于验证将一个正则表达式折分成多个\n\/\/ 和不折分,哪个性能下高一点\n\n\/\/ 测试用内容,键名为正则,键值为或匹配的值\nvar regexpStrs = map[string]string{\n\t\"\/blog\/posts\/\": \"\/blog\/posts\/\",\n\t\"(?P\\\\d+)\": \"100\",\n\t\"\/page\/\": \"\/page\/\",\n\t\"(?P\\\\d+)\": \"100\",\n\t\"\/size\/\": \"\/size\/\",\n\t\"(?P\\\\d+)\": \"100\",\n}\n\n\/\/ 将所有的内容当作一条正则进行处理\nfunc BenchmarkRegexp_One(b *testing.B) {\n\ta := assert.New(b)\n\n\tregstr := \"\"\n\tmatch := \"\"\n\tfor k, v := range regexpStrs {\n\t\tregstr += k\n\t\tmatch += v\n\t}\n\n\texpr, err := stdregexp.Compile(regstr)\n\ta.NotError(err).NotNil(expr)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tloc := expr.FindStringIndex(match)\n\t\tif loc == nil || loc[0] != 0 {\n\t\t\tb.Error(\"BenchmarkBasic_Match:error\")\n\t\t}\n\t}\n}\n\n\/\/ 将内容细分,仅将其中的正则部分处理成正则表达式,其它的仍然以字符串作比较\n\/\/\n\/\/ 目前看来,仅在只有一条正则夹在其中的时候,才有一占点优势,否则可能更慢。\nfunc BenchmarkRegexp_Mult(b *testing.B) {\n\ttype item struct {\n\t\tpattern string\n\t\texpr *stdregexp.Regexp\n\t}\n\n\titems := make([]*item, 0, len(regexpStrs))\n\n\tmatch := \"\"\n\tfor k, v := range regexpStrs {\n\t\tif strings.IndexByte(k, '?') >= 0 {\n\t\t\titems = append(items, &item{expr: stdregexp.MustCompile(k)})\n\t\t} else {\n\t\t\titems = append(items, &item{pattern: k})\n\t\t}\n\t\tmatch += v\n\t}\n\n\ttest := func(path string) bool {\n\t\tfor _, i := range items {\n\t\t\tif i.expr == nil {\n\t\t\t\tif !strings.HasPrefix(path, i.pattern) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tpath = path[len(i.pattern):]\n\t\t\t} else {\n\t\t\t\tloc := i.expr.FindStringIndex(path)\n\t\t\t\tif loc == nil || loc[0] != 0 {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tpath = path[loc[1]:]\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tif !test(match) {\n\t\t\tb.Error(\"er\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage postgres\n\nimport (\n\t\"context\"\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/lib\/pq\"\n\t\"golang.org\/x\/discovery\/internal\"\n\t\"golang.org\/x\/discovery\/internal\/database\"\n\t\"golang.org\/x\/discovery\/internal\/derrors\"\n\t\"golang.org\/x\/discovery\/internal\/log\"\n\t\"golang.org\/x\/discovery\/internal\/stdlib\"\n\t\"golang.org\/x\/discovery\/internal\/version\"\n\t\"golang.org\/x\/mod\/module\"\n\t\"golang.org\/x\/mod\/semver\"\n)\n\n\/\/ InsertVersion inserts a version into the database using\n\/\/ db.saveVersion, along with a search document corresponding to each of its\n\/\/ packages.\nfunc (db *DB) InsertVersion(ctx context.Context, v *internal.Version) (err error) {\n\tdefer func() {\n\t\tif v == nil {\n\t\t\tderrors.Wrap(&err, \"DB.InsertVersion(ctx, nil)\")\n\t\t} else {\n\t\t\tderrors.Wrap(&err, \"DB.InsertVersion(ctx, Version(%q, %q))\", v.ModulePath, v.Version)\n\t\t}\n\t}()\n\n\tif err := validateVersion(v); err != nil {\n\t\treturn fmt.Errorf(\"validateVersion: %v: %w\", err, derrors.InvalidArgument)\n\t}\n\tremoveNonDistributableData(v)\n\n\tif err := db.saveVersion(ctx, v); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ If there is a more recent version of this module that has an alternative\n\t\/\/ module path, then do not insert its packages into search_documents. This\n\t\/\/ happens when a module that initially does not have a go.mod file is\n\t\/\/ forked or fetched via some non-canonical path (such as an alternative\n\t\/\/ capitalization), and then in a later version acquires a go.mod file.\n\t\/\/\n\t\/\/ To take an actual example: github.com\/sirupsen\/logrus@v1.1.0 has a go.mod\n\t\/\/ file that establishes that path as canonical. But v1.0.6 does not have a\n\t\/\/ go.mod file. So the miscapitalized path github.com\/Sirupsen\/logrus at\n\t\/\/ v1.1.0 is marked as an alternative path (code 491) by\n\t\/\/ internal\/fetch.FetchVersion and is not inserted into the DB, but at\n\t\/\/ v1.0.6 it is considered valid, and we end up here. We still insert\n\t\/\/ github.com\/Sirupsen\/logrus@v1.0.6 in the versions table and friends so\n\t\/\/ that users who import it can find information about it, but we don't want\n\t\/\/ it showing up in search results.\n\t\/\/\n\t\/\/ Note that we end up here only if we first saw the alternative version\n\t\/\/ (github.com\/Sirupsen\/logrus@v1.1.0 in the example) and then see the valid\n\t\/\/ one. The \"if code == 491\" section of internal\/etl.fetchAndUpdateState\n\t\/\/ handles the case where we fetch the versions in the other order.\n\trow := db.db.QueryRow(ctx, `\n\t\t\tSELECT 1 FROM module_version_states\n\t\t\tWHERE module_path = $1 AND sort_version > $2 and status = 491`,\n\t\tv.ModulePath, version.ForSorting(v.Version))\n\tvar x int\n\tif err := row.Scan(&x); err != sql.ErrNoRows {\n\t\tlog.Infof(ctx, \"%s@%s: not inserting into search documents\", v.ModulePath, v.Version)\n\t\treturn err\n\t}\n\n\t\/\/ Insert the module's packages into search_documents.\n\tfor _, pkg := range v.Packages {\n\t\tif err := db.UpsertSearchDocument(ctx, pkg.Path); err != nil && !errors.Is(err, derrors.InvalidArgument) {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ saveVersion inserts a Version into the database along with its packages,\n\/\/ imports, and licenses. If any of these rows already exist, the version and\n\/\/ corresponding will be deleted and reinserted.\n\/\/ If the version is malformed then insertion will fail.\n\/\/\n\/\/ A derrors.InvalidArgument error will be returned if the given version and\n\/\/ licenses are invalid.\nfunc (db *DB) saveVersion(ctx context.Context, v *internal.Version) error {\n\tif v.ReadmeContents == internal.StringFieldMissing {\n\t\treturn errors.New(\"saveVersion: version missing ReadmeContents\")\n\t}\n\t\/\/ Sort to ensure proper lock ordering, avoiding deadlocks. See\n\t\/\/ b\/141164828#comment8. The only deadlocks we've actually seen are on\n\t\/\/ imports_unique, because they can occur when processing two versions of\n\t\/\/ the same module, which happens regularly. But if we were ever to process\n\t\/\/ the same module and version twice, we could see deadlocks in the other\n\t\/\/ bulk inserts.\n\tsort.Slice(v.Packages, func(i, j int) bool {\n\t\treturn v.Packages[i].Path < v.Packages[j].Path\n\t})\n\tsort.Slice(v.Licenses, func(i, j int) bool {\n\t\treturn v.Licenses[i].FilePath < v.Licenses[j].FilePath\n\t})\n\tfor _, p := range v.Packages {\n\t\tsort.Strings(p.Imports)\n\t}\n\n\terr := db.db.Transact(func(tx *sql.Tx) error {\n\t\t\/\/ If the version exists, delete it to force an overwrite. This allows us\n\t\t\/\/ to selectively repopulate data after a code change.\n\t\tif err := db.DeleteVersion(ctx, tx, v.ModulePath, v.Version); err != nil {\n\t\t\treturn fmt.Errorf(\"error deleting existing versions: %v\", err)\n\t\t}\n\n\t\tsourceInfoJSON, err := json.Marshal(v.SourceInfo)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := database.ExecTx(ctx, tx,\n\t\t\t`INSERT INTO versions(\n\t\t\t\tmodule_path,\n\t\t\t\tversion,\n\t\t\t\tcommit_time,\n\t\t\t\treadme_file_path,\n\t\t\t\treadme_contents,\n\t\t\t\tsort_version,\n\t\t\t\tversion_type,\n\t\t\t\tseries_path,\n\t\t\t\tsource_info,\n\t\t\t\tredistributable,\n\t\t\t\thas_go_mod)\n\t\t\tVALUES($1,$2,$3,$4,$5,$6,$7,$8,$9,$10, $11) ON CONFLICT DO NOTHING`,\n\t\t\tv.ModulePath,\n\t\t\tv.Version,\n\t\t\tv.CommitTime,\n\t\t\tv.ReadmeFilePath,\n\t\t\tv.ReadmeContents,\n\t\t\tversion.ForSorting(v.Version),\n\t\t\tv.VersionType,\n\t\t\tv.SeriesPath(),\n\t\t\tsourceInfoJSON,\n\t\t\tv.IsRedistributable,\n\t\t\tv.HasGoMod,\n\t\t); err != nil {\n\t\t\treturn fmt.Errorf(\"error inserting version: %v\", err)\n\t\t}\n\n\t\tvar licenseValues []interface{}\n\t\tfor _, l := range v.Licenses {\n\t\t\tcovJSON, err := json.Marshal(l.Coverage)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"marshalling %+v: %v\", l.Coverage, err)\n\t\t\t}\n\t\t\tlicenseValues = append(licenseValues, v.ModulePath, v.Version,\n\t\t\t\tl.FilePath, makeValidUnicode(l.Contents), pq.Array(l.Types), covJSON)\n\t\t}\n\t\tif len(licenseValues) > 0 {\n\t\t\tlicenseCols := []string{\n\t\t\t\t\"module_path\",\n\t\t\t\t\"version\",\n\t\t\t\t\"file_path\",\n\t\t\t\t\"contents\",\n\t\t\t\t\"types\",\n\t\t\t\t\"coverage\",\n\t\t\t}\n\t\t\tif err := database.BulkInsert(ctx, tx, \"licenses\", licenseCols, licenseValues,\n\t\t\t\tdatabase.OnConflictDoNothing); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ We only insert into imports_unique if this is the latest version of the module.\n\t\tisLatest, err := isLatestVersion(ctx, tx, v.ModulePath, v.Version)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif isLatest {\n\t\t\t\/\/ Remove the previous rows for this module. We'll replace them with\n\t\t\t\/\/ new ones below.\n\t\t\tif _, err := database.ExecTx(ctx, tx,\n\t\t\t\t`DELETE FROM imports_unique WHERE from_module_path = $1`,\n\t\t\t\tv.ModulePath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tvar pkgValues, importValues, importUniqueValues []interface{}\n\t\tfor _, p := range v.Packages {\n\t\t\tif p.DocumentationHTML == internal.StringFieldMissing {\n\t\t\t\treturn errors.New(\"saveVersion: package missing DocumentationHTML\")\n\t\t\t}\n\t\t\tvar licenseTypes, licensePaths []string\n\t\t\tfor _, l := range p.Licenses {\n\t\t\t\tif len(l.Types) == 0 {\n\t\t\t\t\t\/\/ If a license file has no detected license types, we still need to\n\t\t\t\t\t\/\/ record it as applicable to the package, because we want to fail\n\t\t\t\t\t\/\/ closed (meaning if there is a LICENSE file containing unknown\n\t\t\t\t\t\/\/ licenses, we assume them not to be permissive of redistribution.)\n\t\t\t\t\tlicenseTypes = append(licenseTypes, \"\")\n\t\t\t\t\tlicensePaths = append(licensePaths, l.FilePath)\n\t\t\t\t} else {\n\t\t\t\t\tfor _, typ := range l.Types {\n\t\t\t\t\t\tlicenseTypes = append(licenseTypes, typ)\n\t\t\t\t\t\tlicensePaths = append(licensePaths, l.FilePath)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tpkgValues = append(pkgValues,\n\t\t\t\tp.Path,\n\t\t\t\tp.Synopsis,\n\t\t\t\tp.Name,\n\t\t\t\tv.Version,\n\t\t\t\tv.ModulePath,\n\t\t\t\tp.V1Path,\n\t\t\t\tp.IsRedistributable,\n\t\t\t\tp.DocumentationHTML,\n\t\t\t\tpq.Array(licenseTypes),\n\t\t\t\tpq.Array(licensePaths),\n\t\t\t\tp.GOOS,\n\t\t\t\tp.GOARCH,\n\t\t\t\tv.CommitTime,\n\t\t\t)\n\t\t\tfor _, i := range p.Imports {\n\t\t\t\timportValues = append(importValues, p.Path, v.ModulePath, v.Version, i)\n\t\t\t\tif isLatest {\n\t\t\t\t\timportUniqueValues = append(importUniqueValues, p.Path, v.ModulePath, i)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(pkgValues) > 0 {\n\t\t\tpkgCols := []string{\n\t\t\t\t\"path\",\n\t\t\t\t\"synopsis\",\n\t\t\t\t\"name\",\n\t\t\t\t\"version\",\n\t\t\t\t\"module_path\",\n\t\t\t\t\"v1_path\",\n\t\t\t\t\"redistributable\",\n\t\t\t\t\"documentation\",\n\t\t\t\t\"license_types\",\n\t\t\t\t\"license_paths\",\n\t\t\t\t\"goos\",\n\t\t\t\t\"goarch\",\n\t\t\t\t\"commit_time\",\n\t\t\t}\n\t\t\tif err := database.BulkInsert(ctx, tx, \"packages\", pkgCols, pkgValues, database.OnConflictDoNothing); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif len(importValues) > 0 {\n\t\t\timportCols := []string{\n\t\t\t\t\"from_path\",\n\t\t\t\t\"from_module_path\",\n\t\t\t\t\"from_version\",\n\t\t\t\t\"to_path\",\n\t\t\t}\n\t\t\tif err := database.BulkInsert(ctx, tx, \"imports\", importCols, importValues, database.OnConflictDoNothing); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif len(importUniqueValues) > 0 {\n\t\t\t\timportUniqueCols := []string{\n\t\t\t\t\t\"from_path\",\n\t\t\t\t\t\"from_module_path\",\n\t\t\t\t\t\"to_path\",\n\t\t\t\t}\n\t\t\t\tif err := database.BulkInsert(ctx, tx, \"imports_unique\", importUniqueCols, importUniqueValues, database.OnConflictDoNothing); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"DB.saveVersion(ctx, Version(%q, %q)): %w\", v.ModulePath, v.Version, err)\n\t}\n\treturn nil\n}\n\n\/\/ isLatestVersion reports whether version is the latest version of the module.\nfunc isLatestVersion(ctx context.Context, tx *sql.Tx, modulePath, version string) (_ bool, err error) {\n\tdefer derrors.Wrap(&err, \"latestVersion(ctx, tx, %q)\", modulePath)\n\n\trow := tx.QueryRowContext(ctx, `\n\t\tSELECT version FROM versions WHERE module_path = $1\n\t\tORDER BY version_type = 'release' DESC, sort_version DESC\n\t\tLIMIT 1`,\n\t\tmodulePath)\n\tvar v string\n\tif err := row.Scan(&v); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn true, nil \/\/ It's the only version, so it's also the latest.\n\t\t}\n\t\treturn false, err\n\t}\n\treturn semver.Compare(version, v) >= 0, nil\n}\n\n\/\/ validateVersion checks that fields needed to insert a version into the\n\/\/ database are present. Otherwise, it returns an error listing the reasons the\n\/\/ version cannot be inserted.\nfunc validateVersion(v *internal.Version) error {\n\tif v == nil {\n\t\treturn fmt.Errorf(\"nil version\")\n\t}\n\n\tvar errReasons []string\n\tif !utf8.ValidString(v.ReadmeContents) {\n\t\terrReasons = append(errReasons, fmt.Sprintf(\"readme %q is not valid UTF-8\", v.ReadmeFilePath))\n\t}\n\tfor _, l := range v.Licenses {\n\t\tif !utf8.ValidString(string(l.Contents)) {\n\t\t\terrReasons = append(errReasons, fmt.Sprintf(\"license %q contains invalid UTF-8\", l.FilePath))\n\t\t}\n\t}\n\tif v.Version == \"\" {\n\t\terrReasons = append(errReasons, \"no specified version\")\n\t}\n\tif v.ModulePath == \"\" {\n\t\terrReasons = append(errReasons, \"no module path\")\n\t}\n\tif v.ModulePath != stdlib.ModulePath {\n\t\tif err := module.CheckPath(v.ModulePath); err != nil {\n\t\t\terrReasons = append(errReasons, fmt.Sprintf(\"invalid module path (%s)\", err))\n\t\t}\n\t\tif !semver.IsValid(v.Version) {\n\t\t\terrReasons = append(errReasons, \"invalid version\")\n\t\t}\n\t}\n\tif len(v.Packages) == 0 {\n\t\terrReasons = append(errReasons, \"module does not have any packages\")\n\t}\n\tif v.CommitTime.IsZero() {\n\t\terrReasons = append(errReasons, \"empty commit time\")\n\t}\n\tif len(errReasons) == 0 {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"cannot insert version %q: %s\", v.Version, strings.Join(errReasons, \", \"))\n}\n\n\/\/ removeNonDistributableData removes any information from the version payload,\n\/\/ after checking licenses.\nfunc removeNonDistributableData(v *internal.Version) {\n\tfor _, p := range v.Packages {\n\t\tif !p.IsRedistributable {\n\t\t\t\/\/ Prune derived information that can't be stored.\n\t\t\tp.Synopsis = \"\"\n\t\t\tp.DocumentationHTML = \"\"\n\t\t}\n\t}\n\tif !v.IsRedistributable {\n\t\tv.ReadmeFilePath = \"\"\n\t\tv.ReadmeContents = \"\"\n\t}\n}\n\n\/\/ DeleteVersion deletes a Version from the database.\n\/\/ If tx is non-nil, it will be used to execute the statement.\n\/\/ Otherwise the statement will be run outside of a transaction.\nfunc (db *DB) DeleteVersion(ctx context.Context, tx *sql.Tx, modulePath, version string) (err error) {\n\tdefer derrors.Wrap(&err, \"DB.DeleteVersion(ctx, tx, %q, %q)\", modulePath, version)\n\n\t\/\/ We only need to delete from the versions table. Thanks to ON DELETE\n\t\/\/ CASCADE constraints, that will trigger deletions from all other tables.\n\tconst stmt = `DELETE FROM versions WHERE module_path=$1 AND version=$2`\n\tif tx == nil {\n\t\t_, err = db.db.Exec(ctx, stmt, modulePath, version)\n\t} else {\n\t\t_, err = database.ExecTx(ctx, tx, stmt, modulePath, version)\n\t}\n\treturn err\n}\n\n\/\/ makeValidUnicode removes null runes from license contents, because pq doesn't like them.\nfunc makeValidUnicode(bs []byte) string {\n\ts := string(bs)\n\tvar b strings.Builder\n\tfor _, r := range s {\n\t\tif r != 0 {\n\t\t\tb.WriteRune(r)\n\t\t}\n\t}\n\treturn b.String()\n}\ninternal\/postgres: determine latest version correctly\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage postgres\n\nimport (\n\t\"context\"\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/lib\/pq\"\n\t\"golang.org\/x\/discovery\/internal\"\n\t\"golang.org\/x\/discovery\/internal\/database\"\n\t\"golang.org\/x\/discovery\/internal\/derrors\"\n\t\"golang.org\/x\/discovery\/internal\/log\"\n\t\"golang.org\/x\/discovery\/internal\/stdlib\"\n\t\"golang.org\/x\/discovery\/internal\/version\"\n\t\"golang.org\/x\/mod\/module\"\n\t\"golang.org\/x\/mod\/semver\"\n)\n\n\/\/ InsertVersion inserts a version into the database using\n\/\/ db.saveVersion, along with a search document corresponding to each of its\n\/\/ packages.\nfunc (db *DB) InsertVersion(ctx context.Context, v *internal.Version) (err error) {\n\tdefer func() {\n\t\tif v == nil {\n\t\t\tderrors.Wrap(&err, \"DB.InsertVersion(ctx, nil)\")\n\t\t} else {\n\t\t\tderrors.Wrap(&err, \"DB.InsertVersion(ctx, Version(%q, %q))\", v.ModulePath, v.Version)\n\t\t}\n\t}()\n\n\tif err := validateVersion(v); err != nil {\n\t\treturn fmt.Errorf(\"validateVersion: %v: %w\", err, derrors.InvalidArgument)\n\t}\n\tremoveNonDistributableData(v)\n\n\tif err := db.saveVersion(ctx, v); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ If there is a more recent version of this module that has an alternative\n\t\/\/ module path, then do not insert its packages into search_documents. This\n\t\/\/ happens when a module that initially does not have a go.mod file is\n\t\/\/ forked or fetched via some non-canonical path (such as an alternative\n\t\/\/ capitalization), and then in a later version acquires a go.mod file.\n\t\/\/\n\t\/\/ To take an actual example: github.com\/sirupsen\/logrus@v1.1.0 has a go.mod\n\t\/\/ file that establishes that path as canonical. But v1.0.6 does not have a\n\t\/\/ go.mod file. So the miscapitalized path github.com\/Sirupsen\/logrus at\n\t\/\/ v1.1.0 is marked as an alternative path (code 491) by\n\t\/\/ internal\/fetch.FetchVersion and is not inserted into the DB, but at\n\t\/\/ v1.0.6 it is considered valid, and we end up here. We still insert\n\t\/\/ github.com\/Sirupsen\/logrus@v1.0.6 in the versions table and friends so\n\t\/\/ that users who import it can find information about it, but we don't want\n\t\/\/ it showing up in search results.\n\t\/\/\n\t\/\/ Note that we end up here only if we first saw the alternative version\n\t\/\/ (github.com\/Sirupsen\/logrus@v1.1.0 in the example) and then see the valid\n\t\/\/ one. The \"if code == 491\" section of internal\/etl.fetchAndUpdateState\n\t\/\/ handles the case where we fetch the versions in the other order.\n\trow := db.db.QueryRow(ctx, `\n\t\t\tSELECT 1 FROM module_version_states\n\t\t\tWHERE module_path = $1 AND sort_version > $2 and status = 491`,\n\t\tv.ModulePath, version.ForSorting(v.Version))\n\tvar x int\n\tif err := row.Scan(&x); err != sql.ErrNoRows {\n\t\tlog.Infof(ctx, \"%s@%s: not inserting into search documents\", v.ModulePath, v.Version)\n\t\treturn err\n\t}\n\n\t\/\/ Insert the module's packages into search_documents.\n\tfor _, pkg := range v.Packages {\n\t\tif err := db.UpsertSearchDocument(ctx, pkg.Path); err != nil && !errors.Is(err, derrors.InvalidArgument) {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ saveVersion inserts a Version into the database along with its packages,\n\/\/ imports, and licenses. If any of these rows already exist, the version and\n\/\/ corresponding will be deleted and reinserted.\n\/\/ If the version is malformed then insertion will fail.\n\/\/\n\/\/ A derrors.InvalidArgument error will be returned if the given version and\n\/\/ licenses are invalid.\nfunc (db *DB) saveVersion(ctx context.Context, v *internal.Version) error {\n\tif v.ReadmeContents == internal.StringFieldMissing {\n\t\treturn errors.New(\"saveVersion: version missing ReadmeContents\")\n\t}\n\t\/\/ Sort to ensure proper lock ordering, avoiding deadlocks. See\n\t\/\/ b\/141164828#comment8. The only deadlocks we've actually seen are on\n\t\/\/ imports_unique, because they can occur when processing two versions of\n\t\/\/ the same module, which happens regularly. But if we were ever to process\n\t\/\/ the same module and version twice, we could see deadlocks in the other\n\t\/\/ bulk inserts.\n\tsort.Slice(v.Packages, func(i, j int) bool {\n\t\treturn v.Packages[i].Path < v.Packages[j].Path\n\t})\n\tsort.Slice(v.Licenses, func(i, j int) bool {\n\t\treturn v.Licenses[i].FilePath < v.Licenses[j].FilePath\n\t})\n\tfor _, p := range v.Packages {\n\t\tsort.Strings(p.Imports)\n\t}\n\n\terr := db.db.Transact(func(tx *sql.Tx) error {\n\t\t\/\/ If the version exists, delete it to force an overwrite. This allows us\n\t\t\/\/ to selectively repopulate data after a code change.\n\t\tif err := db.DeleteVersion(ctx, tx, v.ModulePath, v.Version); err != nil {\n\t\t\treturn fmt.Errorf(\"error deleting existing versions: %v\", err)\n\t\t}\n\n\t\tsourceInfoJSON, err := json.Marshal(v.SourceInfo)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := database.ExecTx(ctx, tx,\n\t\t\t`INSERT INTO versions(\n\t\t\t\tmodule_path,\n\t\t\t\tversion,\n\t\t\t\tcommit_time,\n\t\t\t\treadme_file_path,\n\t\t\t\treadme_contents,\n\t\t\t\tsort_version,\n\t\t\t\tversion_type,\n\t\t\t\tseries_path,\n\t\t\t\tsource_info,\n\t\t\t\tredistributable,\n\t\t\t\thas_go_mod)\n\t\t\tVALUES($1,$2,$3,$4,$5,$6,$7,$8,$9,$10, $11) ON CONFLICT DO NOTHING`,\n\t\t\tv.ModulePath,\n\t\t\tv.Version,\n\t\t\tv.CommitTime,\n\t\t\tv.ReadmeFilePath,\n\t\t\tv.ReadmeContents,\n\t\t\tversion.ForSorting(v.Version),\n\t\t\tv.VersionType,\n\t\t\tv.SeriesPath(),\n\t\t\tsourceInfoJSON,\n\t\t\tv.IsRedistributable,\n\t\t\tv.HasGoMod,\n\t\t); err != nil {\n\t\t\treturn fmt.Errorf(\"error inserting version: %v\", err)\n\t\t}\n\n\t\tvar licenseValues []interface{}\n\t\tfor _, l := range v.Licenses {\n\t\t\tcovJSON, err := json.Marshal(l.Coverage)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"marshalling %+v: %v\", l.Coverage, err)\n\t\t\t}\n\t\t\tlicenseValues = append(licenseValues, v.ModulePath, v.Version,\n\t\t\t\tl.FilePath, makeValidUnicode(l.Contents), pq.Array(l.Types), covJSON)\n\t\t}\n\t\tif len(licenseValues) > 0 {\n\t\t\tlicenseCols := []string{\n\t\t\t\t\"module_path\",\n\t\t\t\t\"version\",\n\t\t\t\t\"file_path\",\n\t\t\t\t\"contents\",\n\t\t\t\t\"types\",\n\t\t\t\t\"coverage\",\n\t\t\t}\n\t\t\tif err := database.BulkInsert(ctx, tx, \"licenses\", licenseCols, licenseValues,\n\t\t\t\tdatabase.OnConflictDoNothing); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ We only insert into imports_unique if this is the latest version of the module.\n\t\tisLatest, err := isLatestVersion(ctx, tx, v.ModulePath, v.Version)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif isLatest {\n\t\t\t\/\/ Remove the previous rows for this module. We'll replace them with\n\t\t\t\/\/ new ones below.\n\t\t\tif _, err := database.ExecTx(ctx, tx,\n\t\t\t\t`DELETE FROM imports_unique WHERE from_module_path = $1`,\n\t\t\t\tv.ModulePath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tvar pkgValues, importValues, importUniqueValues []interface{}\n\t\tfor _, p := range v.Packages {\n\t\t\tif p.DocumentationHTML == internal.StringFieldMissing {\n\t\t\t\treturn errors.New(\"saveVersion: package missing DocumentationHTML\")\n\t\t\t}\n\t\t\tvar licenseTypes, licensePaths []string\n\t\t\tfor _, l := range p.Licenses {\n\t\t\t\tif len(l.Types) == 0 {\n\t\t\t\t\t\/\/ If a license file has no detected license types, we still need to\n\t\t\t\t\t\/\/ record it as applicable to the package, because we want to fail\n\t\t\t\t\t\/\/ closed (meaning if there is a LICENSE file containing unknown\n\t\t\t\t\t\/\/ licenses, we assume them not to be permissive of redistribution.)\n\t\t\t\t\tlicenseTypes = append(licenseTypes, \"\")\n\t\t\t\t\tlicensePaths = append(licensePaths, l.FilePath)\n\t\t\t\t} else {\n\t\t\t\t\tfor _, typ := range l.Types {\n\t\t\t\t\t\tlicenseTypes = append(licenseTypes, typ)\n\t\t\t\t\t\tlicensePaths = append(licensePaths, l.FilePath)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tpkgValues = append(pkgValues,\n\t\t\t\tp.Path,\n\t\t\t\tp.Synopsis,\n\t\t\t\tp.Name,\n\t\t\t\tv.Version,\n\t\t\t\tv.ModulePath,\n\t\t\t\tp.V1Path,\n\t\t\t\tp.IsRedistributable,\n\t\t\t\tp.DocumentationHTML,\n\t\t\t\tpq.Array(licenseTypes),\n\t\t\t\tpq.Array(licensePaths),\n\t\t\t\tp.GOOS,\n\t\t\t\tp.GOARCH,\n\t\t\t\tv.CommitTime,\n\t\t\t)\n\t\t\tfor _, i := range p.Imports {\n\t\t\t\timportValues = append(importValues, p.Path, v.ModulePath, v.Version, i)\n\t\t\t\tif isLatest {\n\t\t\t\t\timportUniqueValues = append(importUniqueValues, p.Path, v.ModulePath, i)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(pkgValues) > 0 {\n\t\t\tpkgCols := []string{\n\t\t\t\t\"path\",\n\t\t\t\t\"synopsis\",\n\t\t\t\t\"name\",\n\t\t\t\t\"version\",\n\t\t\t\t\"module_path\",\n\t\t\t\t\"v1_path\",\n\t\t\t\t\"redistributable\",\n\t\t\t\t\"documentation\",\n\t\t\t\t\"license_types\",\n\t\t\t\t\"license_paths\",\n\t\t\t\t\"goos\",\n\t\t\t\t\"goarch\",\n\t\t\t\t\"commit_time\",\n\t\t\t}\n\t\t\tif err := database.BulkInsert(ctx, tx, \"packages\", pkgCols, pkgValues, database.OnConflictDoNothing); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif len(importValues) > 0 {\n\t\t\timportCols := []string{\n\t\t\t\t\"from_path\",\n\t\t\t\t\"from_module_path\",\n\t\t\t\t\"from_version\",\n\t\t\t\t\"to_path\",\n\t\t\t}\n\t\t\tif err := database.BulkInsert(ctx, tx, \"imports\", importCols, importValues, database.OnConflictDoNothing); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif len(importUniqueValues) > 0 {\n\t\t\t\timportUniqueCols := []string{\n\t\t\t\t\t\"from_path\",\n\t\t\t\t\t\"from_module_path\",\n\t\t\t\t\t\"to_path\",\n\t\t\t\t}\n\t\t\t\tif err := database.BulkInsert(ctx, tx, \"imports_unique\", importUniqueCols, importUniqueValues, database.OnConflictDoNothing); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"DB.saveVersion(ctx, Version(%q, %q)): %w\", v.ModulePath, v.Version, err)\n\t}\n\treturn nil\n}\n\n\/\/ isLatestVersion reports whether version is the latest version of the module.\nfunc isLatestVersion(ctx context.Context, tx *sql.Tx, modulePath, version string) (_ bool, err error) {\n\tdefer derrors.Wrap(&err, \"latestVersion(ctx, tx, %q)\", modulePath)\n\n\trow := tx.QueryRowContext(ctx, `\n\t\tSELECT version FROM versions WHERE module_path = $1\n\t\tORDER BY version_type = 'release' DESC, sort_version DESC\n\t\tLIMIT 1`,\n\t\tmodulePath)\n\tvar v string\n\tif err := row.Scan(&v); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn true, nil \/\/ It's the only version, so it's also the latest.\n\t\t}\n\t\treturn false, err\n\t}\n\treturn version == v, nil\n}\n\n\/\/ validateVersion checks that fields needed to insert a version into the\n\/\/ database are present. Otherwise, it returns an error listing the reasons the\n\/\/ version cannot be inserted.\nfunc validateVersion(v *internal.Version) error {\n\tif v == nil {\n\t\treturn fmt.Errorf(\"nil version\")\n\t}\n\n\tvar errReasons []string\n\tif !utf8.ValidString(v.ReadmeContents) {\n\t\terrReasons = append(errReasons, fmt.Sprintf(\"readme %q is not valid UTF-8\", v.ReadmeFilePath))\n\t}\n\tfor _, l := range v.Licenses {\n\t\tif !utf8.ValidString(string(l.Contents)) {\n\t\t\terrReasons = append(errReasons, fmt.Sprintf(\"license %q contains invalid UTF-8\", l.FilePath))\n\t\t}\n\t}\n\tif v.Version == \"\" {\n\t\terrReasons = append(errReasons, \"no specified version\")\n\t}\n\tif v.ModulePath == \"\" {\n\t\terrReasons = append(errReasons, \"no module path\")\n\t}\n\tif v.ModulePath != stdlib.ModulePath {\n\t\tif err := module.CheckPath(v.ModulePath); err != nil {\n\t\t\terrReasons = append(errReasons, fmt.Sprintf(\"invalid module path (%s)\", err))\n\t\t}\n\t\tif !semver.IsValid(v.Version) {\n\t\t\terrReasons = append(errReasons, \"invalid version\")\n\t\t}\n\t}\n\tif len(v.Packages) == 0 {\n\t\terrReasons = append(errReasons, \"module does not have any packages\")\n\t}\n\tif v.CommitTime.IsZero() {\n\t\terrReasons = append(errReasons, \"empty commit time\")\n\t}\n\tif len(errReasons) == 0 {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"cannot insert version %q: %s\", v.Version, strings.Join(errReasons, \", \"))\n}\n\n\/\/ removeNonDistributableData removes any information from the version payload,\n\/\/ after checking licenses.\nfunc removeNonDistributableData(v *internal.Version) {\n\tfor _, p := range v.Packages {\n\t\tif !p.IsRedistributable {\n\t\t\t\/\/ Prune derived information that can't be stored.\n\t\t\tp.Synopsis = \"\"\n\t\t\tp.DocumentationHTML = \"\"\n\t\t}\n\t}\n\tif !v.IsRedistributable {\n\t\tv.ReadmeFilePath = \"\"\n\t\tv.ReadmeContents = \"\"\n\t}\n}\n\n\/\/ DeleteVersion deletes a Version from the database.\n\/\/ If tx is non-nil, it will be used to execute the statement.\n\/\/ Otherwise the statement will be run outside of a transaction.\nfunc (db *DB) DeleteVersion(ctx context.Context, tx *sql.Tx, modulePath, version string) (err error) {\n\tdefer derrors.Wrap(&err, \"DB.DeleteVersion(ctx, tx, %q, %q)\", modulePath, version)\n\n\t\/\/ We only need to delete from the versions table. Thanks to ON DELETE\n\t\/\/ CASCADE constraints, that will trigger deletions from all other tables.\n\tconst stmt = `DELETE FROM versions WHERE module_path=$1 AND version=$2`\n\tif tx == nil {\n\t\t_, err = db.db.Exec(ctx, stmt, modulePath, version)\n\t} else {\n\t\t_, err = database.ExecTx(ctx, tx, stmt, modulePath, version)\n\t}\n\treturn err\n}\n\n\/\/ makeValidUnicode removes null runes from license contents, because pq doesn't like them.\nfunc makeValidUnicode(bs []byte) string {\n\ts := string(bs)\n\tvar b strings.Builder\n\tfor _, r := range s {\n\t\tif r != 0 {\n\t\t\tb.WriteRune(r)\n\t\t}\n\t}\n\treturn b.String()\n}\n<|endoftext|>"} {"text":"package xid\n\nimport (\n\t\"encoding\/json\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nconst strInvalidID = \"xid: invalid ID\"\n\ntype IDParts struct {\n\tid ID\n\ttimestamp int64\n\tmachine []byte\n\tpid uint16\n\tcounter int32\n}\n\nvar IDs = []IDParts{\n\tIDParts{\n\t\tID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9},\n\t\t1300816219,\n\t\t[]byte{0x60, 0xf4, 0x86},\n\t\t0xe428,\n\t\t4271561,\n\t},\n\tIDParts{\n\t\tID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},\n\t\t0,\n\t\t[]byte{0x00, 0x00, 0x00},\n\t\t0x0000,\n\t\t0,\n\t},\n\tIDParts{\n\t\tID{0x00, 0x00, 0x00, 0x00, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0x00, 0x00, 0x01},\n\t\t0,\n\t\t[]byte{0xaa, 0xbb, 0xcc},\n\t\t0xddee,\n\t\t1,\n\t},\n}\n\nfunc TestIDPartsExtraction(t *testing.T) {\n\tfor i, v := range IDs {\n\t\tassert.Equal(t, v.id.Time(), time.Unix(v.timestamp, 0), \"#%d timestamp\", i)\n\t\tassert.Equal(t, v.id.Machine(), v.machine, \"#%d machine\", i)\n\t\tassert.Equal(t, v.id.Pid(), v.pid, \"#%d pid\", i)\n\t\tassert.Equal(t, v.id.Counter(), v.counter, \"#%d counter\", i)\n\t}\n}\n\nfunc TestNew(t *testing.T) {\n\t\/\/ Generate 10 ids\n\tids := make([]ID, 10)\n\tfor i := 0; i < 10; i++ {\n\t\tids[i] = New()\n\t}\n\tfor i := 1; i < 10; i++ {\n\t\tprevID := ids[i-1]\n\t\tid := ids[i]\n\t\t\/\/ Test for uniqueness among all other 9 generated ids\n\t\tfor j, tid := range ids {\n\t\t\tif j != i {\n\t\t\t\tassert.NotEqual(t, id, tid, \"Generated ID is not unique\")\n\t\t\t}\n\t\t}\n\t\t\/\/ Check that timestamp was incremented and is within 30 seconds of the previous one\n\t\tsecs := id.Time().Sub(prevID.Time()).Seconds()\n\t\tassert.Equal(t, (secs >= 0 && secs <= 30), true, \"Wrong timestamp in generated ID\")\n\t\t\/\/ Check that machine ids are the same\n\t\tassert.Equal(t, id.Machine(), prevID.Machine())\n\t\t\/\/ Check that pids are the same\n\t\tassert.Equal(t, id.Pid(), prevID.Pid())\n\t\t\/\/ Test for proper increment\n\t\tdelta := int(id.Counter() - prevID.Counter())\n\t\tassert.Equal(t, delta, 1, \"Wrong increment in generated ID\")\n\t}\n}\n\nfunc TestIDString(t *testing.T) {\n\tid := ID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9}\n\tassert.Equal(t, \"9m4e2mr0ui3e8a215n4g\", id.String())\n}\n\nfunc TestFromString(t *testing.T) {\n\tid, err := FromString(\"9m4e2mr0ui3e8a215n4g\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, ID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9}, id)\n}\n\nfunc TestFromStringInvalid(t *testing.T) {\n\tid, err := FromString(\"invalid\")\n\tassert.EqualError(t, err, strInvalidID)\n\tassert.Equal(t, ID{}, id)\n}\n\ntype jsonType struct {\n\tID *ID\n\tStr string\n}\n\nfunc TestIDJSONMarshaling(t *testing.T) {\n\tid := ID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9}\n\tv := jsonType{ID: &id, Str: \"test\"}\n\tdata, err := json.Marshal(&v)\n\tassert.NoError(t, err)\n\tassert.Equal(t, `{\"ID\":\"9m4e2mr0ui3e8a215n4g\",\"Str\":\"test\"}`, string(data))\n}\n\nfunc TestIDJSONUnmarshaling(t *testing.T) {\n\tdata := []byte(`{\"ID\":\"9m4e2mr0ui3e8a215n4g\",\"Str\":\"test\"}`)\n\tv := jsonType{}\n\terr := json.Unmarshal(data, &v)\n\tassert.NoError(t, err)\n\tassert.Equal(t, ID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9}, *v.ID)\n}\n\nfunc TestIDJSONUnmarshalingError(t *testing.T) {\n\tv := jsonType{}\n\terr := json.Unmarshal([]byte(`{\"ID\":\"9M4E2MR0UI3E8A215N4G\"}`), &v)\n\tassert.EqualError(t, err, strInvalidID)\n\terr = json.Unmarshal([]byte(`{\"ID\":\"TYjhW2D0huQoQS\"}`), &v)\n\tassert.EqualError(t, err, strInvalidID)\n\terr = json.Unmarshal([]byte(`{\"ID\":\"TYjhW2D0huQoQS3kdk\"}`), &v)\n\tassert.EqualError(t, err, strInvalidID)\n}\n\nfunc TestIDDriverValue(t *testing.T) {\n\tid := ID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9}\n\tdata, err := id.Value()\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"9m4e2mr0ui3e8a215n4g\", data)\n}\n\nfunc TestIDDriverScan(t *testing.T) {\n\tid := ID{}\n\terr := id.Scan(\"9m4e2mr0ui3e8a215n4g\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, ID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9}, id)\n}\n\nfunc TestIDDriverScanError(t *testing.T) {\n\tid := ID{}\n\terr := id.Scan(0)\n\tassert.EqualError(t, err, \"xid: scanning unsupported type: int\")\n\terr = id.Scan(\"0\")\n\tassert.EqualError(t, err, strInvalidID)\n}\n\nfunc TestIDDriverScanByteFromDatabase(t *testing.T) {\n\tid := ID{}\n\tbs := []byte(\"9m4e2mr0ui3e8a215n4g\")\n\terr := id.Scan(bs)\n\tassert.NoError(t, err)\n\tassert.Equal(t, ID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9}, id)\n}\n\nfunc BenchmarkNew(b *testing.B) {\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\t_ = New()\n\t\t}\n\t})\n}\n\nfunc BenchmarkNewString(b *testing.B) {\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\t_ = New().String()\n\t\t}\n\t})\n}\n\n\/\/ func BenchmarkUUIDv1(b *testing.B) {\n\/\/ \tb.RunParallel(func(pb *testing.PB) {\n\/\/ \t\tfor pb.Next() {\n\/\/ \t\t\t_ = uuid.NewV1().String()\n\/\/ \t\t}\n\/\/ \t})\n\/\/ }\n\n\/\/ func BenchmarkUUIDv4(b *testing.B) {\n\/\/ \tb.RunParallel(func(pb *testing.PB) {\n\/\/ \t\tfor pb.Next() {\n\/\/ \t\t\t_ = uuid.NewV4().String()\n\/\/ \t\t}\n\/\/ \t})\n\/\/ }\nAdd a FromString benchmarkpackage xid\n\nimport (\n\t\"encoding\/json\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nconst strInvalidID = \"xid: invalid ID\"\n\ntype IDParts struct {\n\tid ID\n\ttimestamp int64\n\tmachine []byte\n\tpid uint16\n\tcounter int32\n}\n\nvar IDs = []IDParts{\n\tIDParts{\n\t\tID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9},\n\t\t1300816219,\n\t\t[]byte{0x60, 0xf4, 0x86},\n\t\t0xe428,\n\t\t4271561,\n\t},\n\tIDParts{\n\t\tID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},\n\t\t0,\n\t\t[]byte{0x00, 0x00, 0x00},\n\t\t0x0000,\n\t\t0,\n\t},\n\tIDParts{\n\t\tID{0x00, 0x00, 0x00, 0x00, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0x00, 0x00, 0x01},\n\t\t0,\n\t\t[]byte{0xaa, 0xbb, 0xcc},\n\t\t0xddee,\n\t\t1,\n\t},\n}\n\nfunc TestIDPartsExtraction(t *testing.T) {\n\tfor i, v := range IDs {\n\t\tassert.Equal(t, v.id.Time(), time.Unix(v.timestamp, 0), \"#%d timestamp\", i)\n\t\tassert.Equal(t, v.id.Machine(), v.machine, \"#%d machine\", i)\n\t\tassert.Equal(t, v.id.Pid(), v.pid, \"#%d pid\", i)\n\t\tassert.Equal(t, v.id.Counter(), v.counter, \"#%d counter\", i)\n\t}\n}\n\nfunc TestNew(t *testing.T) {\n\t\/\/ Generate 10 ids\n\tids := make([]ID, 10)\n\tfor i := 0; i < 10; i++ {\n\t\tids[i] = New()\n\t}\n\tfor i := 1; i < 10; i++ {\n\t\tprevID := ids[i-1]\n\t\tid := ids[i]\n\t\t\/\/ Test for uniqueness among all other 9 generated ids\n\t\tfor j, tid := range ids {\n\t\t\tif j != i {\n\t\t\t\tassert.NotEqual(t, id, tid, \"Generated ID is not unique\")\n\t\t\t}\n\t\t}\n\t\t\/\/ Check that timestamp was incremented and is within 30 seconds of the previous one\n\t\tsecs := id.Time().Sub(prevID.Time()).Seconds()\n\t\tassert.Equal(t, (secs >= 0 && secs <= 30), true, \"Wrong timestamp in generated ID\")\n\t\t\/\/ Check that machine ids are the same\n\t\tassert.Equal(t, id.Machine(), prevID.Machine())\n\t\t\/\/ Check that pids are the same\n\t\tassert.Equal(t, id.Pid(), prevID.Pid())\n\t\t\/\/ Test for proper increment\n\t\tdelta := int(id.Counter() - prevID.Counter())\n\t\tassert.Equal(t, delta, 1, \"Wrong increment in generated ID\")\n\t}\n}\n\nfunc TestIDString(t *testing.T) {\n\tid := ID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9}\n\tassert.Equal(t, \"9m4e2mr0ui3e8a215n4g\", id.String())\n}\n\nfunc TestFromString(t *testing.T) {\n\tid, err := FromString(\"9m4e2mr0ui3e8a215n4g\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, ID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9}, id)\n}\n\nfunc TestFromStringInvalid(t *testing.T) {\n\tid, err := FromString(\"invalid\")\n\tassert.EqualError(t, err, strInvalidID)\n\tassert.Equal(t, ID{}, id)\n}\n\ntype jsonType struct {\n\tID *ID\n\tStr string\n}\n\nfunc TestIDJSONMarshaling(t *testing.T) {\n\tid := ID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9}\n\tv := jsonType{ID: &id, Str: \"test\"}\n\tdata, err := json.Marshal(&v)\n\tassert.NoError(t, err)\n\tassert.Equal(t, `{\"ID\":\"9m4e2mr0ui3e8a215n4g\",\"Str\":\"test\"}`, string(data))\n}\n\nfunc TestIDJSONUnmarshaling(t *testing.T) {\n\tdata := []byte(`{\"ID\":\"9m4e2mr0ui3e8a215n4g\",\"Str\":\"test\"}`)\n\tv := jsonType{}\n\terr := json.Unmarshal(data, &v)\n\tassert.NoError(t, err)\n\tassert.Equal(t, ID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9}, *v.ID)\n}\n\nfunc TestIDJSONUnmarshalingError(t *testing.T) {\n\tv := jsonType{}\n\terr := json.Unmarshal([]byte(`{\"ID\":\"9M4E2MR0UI3E8A215N4G\"}`), &v)\n\tassert.EqualError(t, err, strInvalidID)\n\terr = json.Unmarshal([]byte(`{\"ID\":\"TYjhW2D0huQoQS\"}`), &v)\n\tassert.EqualError(t, err, strInvalidID)\n\terr = json.Unmarshal([]byte(`{\"ID\":\"TYjhW2D0huQoQS3kdk\"}`), &v)\n\tassert.EqualError(t, err, strInvalidID)\n}\n\nfunc TestIDDriverValue(t *testing.T) {\n\tid := ID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9}\n\tdata, err := id.Value()\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"9m4e2mr0ui3e8a215n4g\", data)\n}\n\nfunc TestIDDriverScan(t *testing.T) {\n\tid := ID{}\n\terr := id.Scan(\"9m4e2mr0ui3e8a215n4g\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, ID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9}, id)\n}\n\nfunc TestIDDriverScanError(t *testing.T) {\n\tid := ID{}\n\terr := id.Scan(0)\n\tassert.EqualError(t, err, \"xid: scanning unsupported type: int\")\n\terr = id.Scan(\"0\")\n\tassert.EqualError(t, err, strInvalidID)\n}\n\nfunc TestIDDriverScanByteFromDatabase(t *testing.T) {\n\tid := ID{}\n\tbs := []byte(\"9m4e2mr0ui3e8a215n4g\")\n\terr := id.Scan(bs)\n\tassert.NoError(t, err)\n\tassert.Equal(t, ID{0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9}, id)\n}\n\nfunc BenchmarkNew(b *testing.B) {\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\t_ = New()\n\t\t}\n\t})\n}\n\nfunc BenchmarkNewString(b *testing.B) {\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\t_ = New().String()\n\t\t}\n\t})\n}\n\nfunc BenchmarkFromString(b *testing.B) {\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\t_, _ = FromString(\"9m4e2mr0ui3e8a215n4g\")\n\t\t}\n\t})\n}\n\n\/\/ func BenchmarkUUIDv1(b *testing.B) {\n\/\/ \tb.RunParallel(func(pb *testing.PB) {\n\/\/ \t\tfor pb.Next() {\n\/\/ \t\t\t_ = uuid.NewV1().String()\n\/\/ \t\t}\n\/\/ \t})\n\/\/ }\n\n\/\/ func BenchmarkUUIDv4(b *testing.B) {\n\/\/ \tb.RunParallel(func(pb *testing.PB) {\n\/\/ \t\tfor pb.Next() {\n\/\/ \t\t\t_ = uuid.NewV4().String()\n\/\/ \t\t}\n\/\/ \t})\n\/\/ }\n<|endoftext|>"} {"text":"package module\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\n\tversion \"github.com\/hashicorp\/go-version\"\n\t\"github.com\/hashicorp\/terraform\/registry\/regsrc\"\n\t\"github.com\/hashicorp\/terraform\/registry\/response\"\n)\n\n\/\/ Map of module names and location of test modules.\n\/\/ Only one version for now, as we only lookup latest from the registry.\ntype testMod struct {\n\tlocation string\n\tversion string\n}\n\nconst (\n\ttestCredentials = \"a9564ebc3289b7a14551baf8ad5ec60a\"\n)\n\n\/\/ All the locationes from the mockRegistry start with a file:\/\/ scheme. If\n\/\/ the the location string here doesn't have a scheme, the mockRegistry will\n\/\/ find the absolute path and return a complete URL.\nvar testMods = map[string][]testMod{\n\t\"registry\/foo\/bar\": {{\n\t\tlocation: \"file:\/\/\/download\/registry\/foo\/bar\/0.2.3\/\/*?archive=tar.gz\",\n\t\tversion: \"0.2.3\",\n\t}},\n\t\"registry\/foo\/baz\": {{\n\t\tlocation: \"file:\/\/\/download\/registry\/foo\/baz\/1.10.0\/\/*?archive=tar.gz\",\n\t\tversion: \"1.10.0\",\n\t}},\n\t\"registry\/local\/sub\": {{\n\t\tlocation: \"test-fixtures\/registry-tar-subdir\/foo.tgz\/\/*?archive=tar.gz\",\n\t\tversion: \"0.1.2\",\n\t}},\n\t\"exists-in-registry\/identifier\/provider\": {{\n\t\tlocation: \"file:\/\/\/registry\/exists\",\n\t\tversion: \"0.2.0\",\n\t}},\n\t\"test-versions\/name\/provider\": {\n\t\t{version: \"2.2.0\"},\n\t\t{version: \"2.1.1\"},\n\t\t{version: \"1.2.2\"},\n\t\t{version: \"1.2.1\"},\n\t},\n\t\"private\/name\/provider\": {\n\t\t{version: \"1.0.0\"},\n\t},\n}\n\nfunc latestVersion(versions []string) string {\n\tvar col version.Collection\n\tfor _, v := range versions {\n\t\tver, err := version.NewVersion(v)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tcol = append(col, ver)\n\t}\n\n\tsort.Sort(col)\n\treturn col[len(col)-1].String()\n}\n\nfunc mockRegHandler() http.Handler {\n\tmux := http.NewServeMux()\n\n\tdownload := func(w http.ResponseWriter, r *http.Request) {\n\t\tp := strings.TrimLeft(r.URL.Path, \"\/\")\n\t\t\/\/ handle download request\n\t\tre := regexp.MustCompile(`^([-a-z]+\/\\w+\/\\w+).*\/download$`)\n\t\t\/\/ download lookup\n\t\tmatches := re.FindStringSubmatch(p)\n\t\tif len(matches) != 2 {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ check for auth\n\t\tif strings.Contains(matches[0], \"private\/\") {\n\t\t\tif !strings.Contains(r.Header.Get(\"Authorization\"), testCredentials) {\n\t\t\t\thttp.Error(w, \"\", http.StatusForbidden)\n\t\t\t}\n\t\t}\n\n\t\tversions, ok := testMods[matches[1]]\n\t\tif !ok {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t\tmod := versions[0]\n\n\t\tlocation := mod.location\n\t\tif !strings.HasPrefix(location, \"file:\/\/\/\") {\n\t\t\t\/\/ we can't use filepath.Abs because it will clean `\/\/`\n\t\t\twd, _ := os.Getwd()\n\t\t\tlocation = fmt.Sprintf(\"file:\/\/%s\/%s\", wd, location)\n\t\t}\n\n\t\tw.Header().Set(\"X-Terraform-Get\", location)\n\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\/\/ no body\n\t\treturn\n\t}\n\n\tversions := func(w http.ResponseWriter, r *http.Request) {\n\t\tp := strings.TrimLeft(r.URL.Path, \"\/\")\n\t\tre := regexp.MustCompile(`^([-a-z]+\/\\w+\/\\w+)\/versions$`)\n\t\tmatches := re.FindStringSubmatch(p)\n\t\tif len(matches) != 2 {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ check for auth\n\t\tif strings.Contains(matches[1], \"private\/\") {\n\t\t\tif !strings.Contains(r.Header.Get(\"Authorization\"), testCredentials) {\n\t\t\t\thttp.Error(w, \"\", http.StatusForbidden)\n\t\t\t}\n\t\t}\n\n\t\tname := matches[1]\n\t\tversions, ok := testMods[name]\n\t\tif !ok {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ only adding the single requested module for now\n\t\t\/\/ this is the minimal that any regisry is epected to support\n\t\tmpvs := &response.ModuleProviderVersions{\n\t\t\tSource: name,\n\t\t}\n\n\t\tfor _, v := range versions {\n\t\t\tmv := &response.ModuleVersion{\n\t\t\t\tVersion: v.version,\n\t\t\t}\n\t\t\tmpvs.Versions = append(mpvs.Versions, mv)\n\t\t}\n\n\t\tresp := response.ModuleVersions{\n\t\t\tModules: []*response.ModuleProviderVersions{mpvs},\n\t\t}\n\n\t\tjs, err := json.Marshal(resp)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tw.Write(js)\n\t}\n\n\tmux.Handle(\"\/v1\/modules\/\",\n\t\thttp.StripPrefix(\"\/v1\/modules\/\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tif strings.HasSuffix(r.URL.Path, \"\/download\") {\n\t\t\t\tdownload(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif strings.HasSuffix(r.URL.Path, \"\/versions\") {\n\t\t\t\tversions(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\thttp.NotFound(w, r)\n\t\t})),\n\t)\n\n\tmux.HandleFunc(\"\/.well-known\/terraform.json\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tio.WriteString(w, `{\"modules.v1\":\"http:\/\/localhost\/v1\/modules\/\"}`)\n\t})\n\treturn mux\n}\n\n\/\/ Just enough like a registry to exercise our code.\n\/\/ Returns the location of the latest version\nfunc mockRegistry() *httptest.Server {\n\tserver := httptest.NewServer(mockRegHandler())\n\treturn server\n}\n\n\/\/ GitHub archives always contain the module source in a single subdirectory,\n\/\/ so the registry will return a path with with a `\/\/*` suffix. We need to make\n\/\/ sure this doesn't intefere with our internal handling of `\/\/` subdir.\nfunc TestRegistryGitHubArchive(t *testing.T) {\n\tserver := mockRegistry()\n\tdefer server.Close()\n\n\tdisco := testDisco(server)\n\tstorage := testStorage(t, disco)\n\n\ttree := NewTree(\"\", testConfig(t, \"registry-tar-subdir\"))\n\n\tstorage.Mode = GetModeGet\n\tif err := tree.Load(storage); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tif !tree.Loaded() {\n\t\tt.Fatal(\"should be loaded\")\n\t}\n\n\tstorage.Mode = GetModeNone\n\tif err := tree.Load(storage); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\t\/\/ stop the registry server, and make sure that we don't need to call out again\n\tserver.Close()\n\ttree = NewTree(\"\", testConfig(t, \"registry-tar-subdir\"))\n\n\tstorage.Mode = GetModeGet\n\tif err := tree.Load(storage); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tif !tree.Loaded() {\n\t\tt.Fatal(\"should be loaded\")\n\t}\n\n\tactual := strings.TrimSpace(tree.String())\n\texpected := strings.TrimSpace(treeLoadSubdirStr)\n\tif actual != expected {\n\t\tt.Fatalf(\"got: \\n\\n%s\\nexpected: \\n\\n%s\", actual, expected)\n\t}\n}\n\n\/\/ Test that the \/\/subdir notation can be used with registry modules\nfunc TestRegisryModuleSubdir(t *testing.T) {\n\tserver := mockRegistry()\n\tdefer server.Close()\n\n\tdisco := testDisco(server)\n\tstorage := testStorage(t, disco)\n\ttree := NewTree(\"\", testConfig(t, \"registry-subdir\"))\n\n\tstorage.Mode = GetModeGet\n\tif err := tree.Load(storage); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tif !tree.Loaded() {\n\t\tt.Fatal(\"should be loaded\")\n\t}\n\n\tstorage.Mode = GetModeNone\n\tif err := tree.Load(storage); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tactual := strings.TrimSpace(tree.String())\n\texpected := strings.TrimSpace(treeLoadRegistrySubdirStr)\n\tif actual != expected {\n\t\tt.Fatalf(\"got: \\n\\n%s\\nexpected: \\n\\n%s\", actual, expected)\n\t}\n}\n\nfunc TestAccRegistryDiscover(t *testing.T) {\n\tif os.Getenv(\"TF_ACC\") == \"\" {\n\t\tt.Skip(\"skipping ACC test\")\n\t}\n\n\t\/\/ simply check that we get a valid github URL for this from the registry\n\tmodule, err := regsrc.ParseModuleSource(\"hashicorp\/consul\/aws\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ts := NewStorage(\"\/tmp\", nil, nil)\n\tloc, err := s.lookupModuleLocation(module, \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tu, err := url.Parse(loc)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !strings.HasSuffix(u.Host, \"github.com\") {\n\t\tt.Fatalf(\"expected host 'github.com', got: %q\", u.Host)\n\t}\n\n\tif !strings.Contains(u.String(), \"consul\") {\n\t\tt.Fatalf(\"url doesn't contain 'consul': %s\", u.String())\n\t}\n}\n\nfunc TestAccRegistryLoad(t *testing.T) {\n\tif os.Getenv(\"TF_ACC\") == \"\" {\n\t\tt.Skip(\"skipping ACC test\")\n\t}\n\n\tstorage := testStorage(t, nil)\n\ttree := NewTree(\"\", testConfig(t, \"registry-load\"))\n\n\tstorage.Mode = GetModeGet\n\tif err := tree.Load(storage); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tif !tree.Loaded() {\n\t\tt.Fatal(\"should be loaded\")\n\t}\n\n\tstorage.Mode = GetModeNone\n\tif err := tree.Load(storage); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\t\/\/ TODO expand this further by fetching some metadata from the registry\n\tactual := strings.TrimSpace(tree.String())\n\tif !strings.Contains(actual, \"(path: vault)\") {\n\t\tt.Fatal(\"missing vault module, got:\\n\", actual)\n\t}\n}\nmake testCredentials token obviously fakepackage module\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\n\tversion \"github.com\/hashicorp\/go-version\"\n\t\"github.com\/hashicorp\/terraform\/registry\/regsrc\"\n\t\"github.com\/hashicorp\/terraform\/registry\/response\"\n)\n\n\/\/ Map of module names and location of test modules.\n\/\/ Only one version for now, as we only lookup latest from the registry.\ntype testMod struct {\n\tlocation string\n\tversion string\n}\n\nconst (\n\ttestCredentials = \"test-auth-token\"\n)\n\n\/\/ All the locationes from the mockRegistry start with a file:\/\/ scheme. If\n\/\/ the the location string here doesn't have a scheme, the mockRegistry will\n\/\/ find the absolute path and return a complete URL.\nvar testMods = map[string][]testMod{\n\t\"registry\/foo\/bar\": {{\n\t\tlocation: \"file:\/\/\/download\/registry\/foo\/bar\/0.2.3\/\/*?archive=tar.gz\",\n\t\tversion: \"0.2.3\",\n\t}},\n\t\"registry\/foo\/baz\": {{\n\t\tlocation: \"file:\/\/\/download\/registry\/foo\/baz\/1.10.0\/\/*?archive=tar.gz\",\n\t\tversion: \"1.10.0\",\n\t}},\n\t\"registry\/local\/sub\": {{\n\t\tlocation: \"test-fixtures\/registry-tar-subdir\/foo.tgz\/\/*?archive=tar.gz\",\n\t\tversion: \"0.1.2\",\n\t}},\n\t\"exists-in-registry\/identifier\/provider\": {{\n\t\tlocation: \"file:\/\/\/registry\/exists\",\n\t\tversion: \"0.2.0\",\n\t}},\n\t\"test-versions\/name\/provider\": {\n\t\t{version: \"2.2.0\"},\n\t\t{version: \"2.1.1\"},\n\t\t{version: \"1.2.2\"},\n\t\t{version: \"1.2.1\"},\n\t},\n\t\"private\/name\/provider\": {\n\t\t{version: \"1.0.0\"},\n\t},\n}\n\nfunc latestVersion(versions []string) string {\n\tvar col version.Collection\n\tfor _, v := range versions {\n\t\tver, err := version.NewVersion(v)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tcol = append(col, ver)\n\t}\n\n\tsort.Sort(col)\n\treturn col[len(col)-1].String()\n}\n\nfunc mockRegHandler() http.Handler {\n\tmux := http.NewServeMux()\n\n\tdownload := func(w http.ResponseWriter, r *http.Request) {\n\t\tp := strings.TrimLeft(r.URL.Path, \"\/\")\n\t\t\/\/ handle download request\n\t\tre := regexp.MustCompile(`^([-a-z]+\/\\w+\/\\w+).*\/download$`)\n\t\t\/\/ download lookup\n\t\tmatches := re.FindStringSubmatch(p)\n\t\tif len(matches) != 2 {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ check for auth\n\t\tif strings.Contains(matches[0], \"private\/\") {\n\t\t\tif !strings.Contains(r.Header.Get(\"Authorization\"), testCredentials) {\n\t\t\t\thttp.Error(w, \"\", http.StatusForbidden)\n\t\t\t}\n\t\t}\n\n\t\tversions, ok := testMods[matches[1]]\n\t\tif !ok {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t\tmod := versions[0]\n\n\t\tlocation := mod.location\n\t\tif !strings.HasPrefix(location, \"file:\/\/\/\") {\n\t\t\t\/\/ we can't use filepath.Abs because it will clean `\/\/`\n\t\t\twd, _ := os.Getwd()\n\t\t\tlocation = fmt.Sprintf(\"file:\/\/%s\/%s\", wd, location)\n\t\t}\n\n\t\tw.Header().Set(\"X-Terraform-Get\", location)\n\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\/\/ no body\n\t\treturn\n\t}\n\n\tversions := func(w http.ResponseWriter, r *http.Request) {\n\t\tp := strings.TrimLeft(r.URL.Path, \"\/\")\n\t\tre := regexp.MustCompile(`^([-a-z]+\/\\w+\/\\w+)\/versions$`)\n\t\tmatches := re.FindStringSubmatch(p)\n\t\tif len(matches) != 2 {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ check for auth\n\t\tif strings.Contains(matches[1], \"private\/\") {\n\t\t\tif !strings.Contains(r.Header.Get(\"Authorization\"), testCredentials) {\n\t\t\t\thttp.Error(w, \"\", http.StatusForbidden)\n\t\t\t}\n\t\t}\n\n\t\tname := matches[1]\n\t\tversions, ok := testMods[name]\n\t\tif !ok {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ only adding the single requested module for now\n\t\t\/\/ this is the minimal that any regisry is epected to support\n\t\tmpvs := &response.ModuleProviderVersions{\n\t\t\tSource: name,\n\t\t}\n\n\t\tfor _, v := range versions {\n\t\t\tmv := &response.ModuleVersion{\n\t\t\t\tVersion: v.version,\n\t\t\t}\n\t\t\tmpvs.Versions = append(mpvs.Versions, mv)\n\t\t}\n\n\t\tresp := response.ModuleVersions{\n\t\t\tModules: []*response.ModuleProviderVersions{mpvs},\n\t\t}\n\n\t\tjs, err := json.Marshal(resp)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tw.Write(js)\n\t}\n\n\tmux.Handle(\"\/v1\/modules\/\",\n\t\thttp.StripPrefix(\"\/v1\/modules\/\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tif strings.HasSuffix(r.URL.Path, \"\/download\") {\n\t\t\t\tdownload(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif strings.HasSuffix(r.URL.Path, \"\/versions\") {\n\t\t\t\tversions(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\thttp.NotFound(w, r)\n\t\t})),\n\t)\n\n\tmux.HandleFunc(\"\/.well-known\/terraform.json\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\tio.WriteString(w, `{\"modules.v1\":\"http:\/\/localhost\/v1\/modules\/\"}`)\n\t})\n\treturn mux\n}\n\n\/\/ Just enough like a registry to exercise our code.\n\/\/ Returns the location of the latest version\nfunc mockRegistry() *httptest.Server {\n\tserver := httptest.NewServer(mockRegHandler())\n\treturn server\n}\n\n\/\/ GitHub archives always contain the module source in a single subdirectory,\n\/\/ so the registry will return a path with with a `\/\/*` suffix. We need to make\n\/\/ sure this doesn't intefere with our internal handling of `\/\/` subdir.\nfunc TestRegistryGitHubArchive(t *testing.T) {\n\tserver := mockRegistry()\n\tdefer server.Close()\n\n\tdisco := testDisco(server)\n\tstorage := testStorage(t, disco)\n\n\ttree := NewTree(\"\", testConfig(t, \"registry-tar-subdir\"))\n\n\tstorage.Mode = GetModeGet\n\tif err := tree.Load(storage); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tif !tree.Loaded() {\n\t\tt.Fatal(\"should be loaded\")\n\t}\n\n\tstorage.Mode = GetModeNone\n\tif err := tree.Load(storage); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\t\/\/ stop the registry server, and make sure that we don't need to call out again\n\tserver.Close()\n\ttree = NewTree(\"\", testConfig(t, \"registry-tar-subdir\"))\n\n\tstorage.Mode = GetModeGet\n\tif err := tree.Load(storage); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tif !tree.Loaded() {\n\t\tt.Fatal(\"should be loaded\")\n\t}\n\n\tactual := strings.TrimSpace(tree.String())\n\texpected := strings.TrimSpace(treeLoadSubdirStr)\n\tif actual != expected {\n\t\tt.Fatalf(\"got: \\n\\n%s\\nexpected: \\n\\n%s\", actual, expected)\n\t}\n}\n\n\/\/ Test that the \/\/subdir notation can be used with registry modules\nfunc TestRegisryModuleSubdir(t *testing.T) {\n\tserver := mockRegistry()\n\tdefer server.Close()\n\n\tdisco := testDisco(server)\n\tstorage := testStorage(t, disco)\n\ttree := NewTree(\"\", testConfig(t, \"registry-subdir\"))\n\n\tstorage.Mode = GetModeGet\n\tif err := tree.Load(storage); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tif !tree.Loaded() {\n\t\tt.Fatal(\"should be loaded\")\n\t}\n\n\tstorage.Mode = GetModeNone\n\tif err := tree.Load(storage); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tactual := strings.TrimSpace(tree.String())\n\texpected := strings.TrimSpace(treeLoadRegistrySubdirStr)\n\tif actual != expected {\n\t\tt.Fatalf(\"got: \\n\\n%s\\nexpected: \\n\\n%s\", actual, expected)\n\t}\n}\n\nfunc TestAccRegistryDiscover(t *testing.T) {\n\tif os.Getenv(\"TF_ACC\") == \"\" {\n\t\tt.Skip(\"skipping ACC test\")\n\t}\n\n\t\/\/ simply check that we get a valid github URL for this from the registry\n\tmodule, err := regsrc.ParseModuleSource(\"hashicorp\/consul\/aws\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ts := NewStorage(\"\/tmp\", nil, nil)\n\tloc, err := s.lookupModuleLocation(module, \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tu, err := url.Parse(loc)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !strings.HasSuffix(u.Host, \"github.com\") {\n\t\tt.Fatalf(\"expected host 'github.com', got: %q\", u.Host)\n\t}\n\n\tif !strings.Contains(u.String(), \"consul\") {\n\t\tt.Fatalf(\"url doesn't contain 'consul': %s\", u.String())\n\t}\n}\n\nfunc TestAccRegistryLoad(t *testing.T) {\n\tif os.Getenv(\"TF_ACC\") == \"\" {\n\t\tt.Skip(\"skipping ACC test\")\n\t}\n\n\tstorage := testStorage(t, nil)\n\ttree := NewTree(\"\", testConfig(t, \"registry-load\"))\n\n\tstorage.Mode = GetModeGet\n\tif err := tree.Load(storage); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tif !tree.Loaded() {\n\t\tt.Fatal(\"should be loaded\")\n\t}\n\n\tstorage.Mode = GetModeNone\n\tif err := tree.Load(storage); err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\t\/\/ TODO expand this further by fetching some metadata from the registry\n\tactual := strings.TrimSpace(tree.String())\n\tif !strings.Contains(actual, \"(path: vault)\") {\n\t\tt.Fatal(\"missing vault module, got:\\n\", actual)\n\t}\n}\n<|endoftext|>"} {"text":"package elastic\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/khezen\/bulklog\/collection\"\n)\n\n\/\/ Index - elasticsearch index definition\n\/\/ ref: https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/indices-templates.html\ntype Index struct {\n\tTemplate string `json:\"template\"`\n\tSettings IndexSettings `json:\"settings\"`\n\tMappings Mappings `json:\"mappings\"`\n}\n\n\/\/ IndexSettings -\ntype IndexSettings struct {\n\tNumberOfShards int `json:\"number_of_shards\"`\n}\n\n\/\/ Mappings - document schema definitions\ntype Mappings map[collection.SchemaName]Mapping\n\n\/\/ Mapping - document schema definition\n\/\/ ref : \/\/ ref: https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/mapping.html\ntype Mapping struct {\n\tProperties map[string]Field `json:\"properties\"`\n}\n\n\/\/ Field -\ntype Field struct {\n\tType string `json:\"type\"`\n}\n\n\/\/ RenderElasticIndex - render elasticsearch mapping\nfunc RenderElasticIndex(collect collection.Collection, settings IndexSettings) Index {\n\tindex := Index{\n\t\tTemplate: fmt.Sprintf(\"%s-*\", collect.Name),\n\t\tSettings: settings,\n\t\tMappings: make(map[collection.SchemaName]Mapping),\n\t}\n\tfor _, schema := range collect.Schemas {\n\t\tmapping := Mapping{\n\t\t\tProperties: make(map[string]Field),\n\t\t}\n\t\tfor key, field := range schema.Fields {\n\t\t\tmapping.Properties[key] = Field{\n\t\t\t\tType: translateType(field),\n\t\t\t}\n\t\t}\n\t\tindex.Mappings[schema.Name] = mapping\n\t}\n\treturn index\n}\n\nfunc translateType(field collection.Field) string {\n\tswitch field.Type {\n\tcase collection.Bool:\n\t\treturn \"bool\"\n\tcase collection.UInt8, collection.UInt16, collection.UInt32, collection.UInt64,\n\t\tcollection.Int8, collection.Int16, collection.Int32, collection.Int64:\n\t\treturn \"long\"\n\tcase collection.Float32, collection.Float64:\n\t\treturn \"double\"\n\tcase collection.DateTime:\n\t\treturn \"time\"\n\tcase collection.Object:\n\t\treturn \"object\"\n\tcase collection.String:\n\t\tif field.MaxLength > 0 || field.Length > 0 {\n\t\t\treturn \"keyword\"\n\t\t}\n\t\treturn \"text\"\n\tdefault:\n\t\treturn \"text\"\n\t}\n}\n\n\/\/ RenderIndexName - logs: logs-2017.05.26\nfunc RenderIndexName(d collection.Document) string {\n\tindexBuf := bytes.NewBufferString(string(d.CollectionName))\n\tindexBuf.WriteString(\"-\")\n\tindexBuf.WriteString(d.PostedAt.Format(\"2006.01.02\"))\n\treturn indexBuf.String()\n}\n\n\/\/ Digest returns the JSON request to be append to the bulk\nfunc Digest(d collection.Document) ([]byte, error) {\n\trequest := make(map[string]interface{})\n\t\/\/{ \"index\" : { \"_index\" : \"logs-2017.05.28\", \"_type\" : \"log\", \"_id\" : \"1\" } }\n\tdocDescription := make(map[string]interface{})\n\tdocDescription[\"_index\"] = RenderIndexName(d)\n\tdocDescription[\"_type\"] = d.SchemaName\n\tdocDescription[\"_id\"] = d.ID\n\tdocDescription[\"post_date\"] = d.PostedAt.Format(time.RFC3339)\n\trequest[\"index\"] = docDescription\n\tbody, err := json.Marshal(request)\n\tbody = append(body, '\\n')\n\tbody = append(body, d.Body...)\n\tbody = append(body, '\\n')\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn body, nil\n}\nerr check positionpackage elastic\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/khezen\/bulklog\/collection\"\n)\n\n\/\/ Index - elasticsearch index definition\n\/\/ ref: https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/indices-templates.html\ntype Index struct {\n\tTemplate string `json:\"template\"`\n\tSettings IndexSettings `json:\"settings\"`\n\tMappings Mappings `json:\"mappings\"`\n}\n\n\/\/ IndexSettings -\ntype IndexSettings struct {\n\tNumberOfShards int `json:\"number_of_shards\"`\n}\n\n\/\/ Mappings - document schema definitions\ntype Mappings map[collection.SchemaName]Mapping\n\n\/\/ Mapping - document schema definition\n\/\/ ref : \/\/ ref: https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/mapping.html\ntype Mapping struct {\n\tProperties map[string]Field `json:\"properties\"`\n}\n\n\/\/ Field -\ntype Field struct {\n\tType string `json:\"type\"`\n}\n\n\/\/ RenderElasticIndex - render elasticsearch mapping\nfunc RenderElasticIndex(collect collection.Collection, settings IndexSettings) Index {\n\tindex := Index{\n\t\tTemplate: fmt.Sprintf(\"%s-*\", collect.Name),\n\t\tSettings: settings,\n\t\tMappings: make(map[collection.SchemaName]Mapping),\n\t}\n\tfor _, schema := range collect.Schemas {\n\t\tmapping := Mapping{\n\t\t\tProperties: make(map[string]Field),\n\t\t}\n\t\tfor key, field := range schema.Fields {\n\t\t\tmapping.Properties[key] = Field{\n\t\t\t\tType: translateType(field),\n\t\t\t}\n\t\t}\n\t\tindex.Mappings[schema.Name] = mapping\n\t}\n\treturn index\n}\n\nfunc translateType(field collection.Field) string {\n\tswitch field.Type {\n\tcase collection.Bool:\n\t\treturn \"bool\"\n\tcase collection.UInt8, collection.UInt16, collection.UInt32, collection.UInt64,\n\t\tcollection.Int8, collection.Int16, collection.Int32, collection.Int64:\n\t\treturn \"long\"\n\tcase collection.Float32, collection.Float64:\n\t\treturn \"double\"\n\tcase collection.DateTime:\n\t\treturn \"time\"\n\tcase collection.Object:\n\t\treturn \"object\"\n\tcase collection.String:\n\t\tif field.MaxLength > 0 || field.Length > 0 {\n\t\t\treturn \"keyword\"\n\t\t}\n\t\treturn \"text\"\n\tdefault:\n\t\treturn \"text\"\n\t}\n}\n\n\/\/ RenderIndexName - logs: logs-2017.05.26\nfunc RenderIndexName(d collection.Document) string {\n\tindexBuf := bytes.NewBufferString(string(d.CollectionName))\n\tindexBuf.WriteString(\"-\")\n\tindexBuf.WriteString(d.PostedAt.Format(\"2006.01.02\"))\n\treturn indexBuf.String()\n}\n\n\/\/ Digest returns the JSON request to be append to the bulk\nfunc Digest(d collection.Document) ([]byte, error) {\n\trequest := make(map[string]interface{})\n\t\/\/{ \"index\" : { \"_index\" : \"logs-2017.05.28\", \"_type\" : \"log\", \"_id\" : \"1\" } }\n\tdocDescription := make(map[string]interface{})\n\tdocDescription[\"_index\"] = RenderIndexName(d)\n\tdocDescription[\"_type\"] = d.SchemaName\n\tdocDescription[\"_id\"] = d.ID\n\tdocDescription[\"post_date\"] = d.PostedAt.Format(time.RFC3339)\n\trequest[\"index\"] = docDescription\n\tbody, err := json.Marshal(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbody = append(body, '\\n')\n\tbody = append(body, d.Body...)\n\tbody = append(body, '\\n')\n\treturn body, nil\n}\n<|endoftext|>"} {"text":"package handlers\n\nimport (\n\t\"git.ianfross.com\/ifross\/expensetracker\/env\"\n\t\"git.ianfross.com\/ifross\/expensetracker\/auth\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/juju\/errors\"\n\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strconv\"\n)\n\ntype jsonResponse struct {\n\tStatus string `json:\"status\"`\n\tData interface{} `json:\"data,omitempty\"`\n\tMessage string `json:\"message,omitempty\"`\n\tCode int `json:\"code,omitempty\"`\n}\n\nfunc jsonSuccess(w http.ResponseWriter, data interface{}) {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\terr := json.NewEncoder(w).Encode(jsonResponse{\"success\", data, \"\", http.StatusOK})\n\tif err != nil {\n\t\tglog.Errorf(\"Error encoding json in successful respone:%v\", err)\n\t}\n}\n\nfunc jsonError(w http.ResponseWriter, code int, message string, err error) error {\n\tif err != nil {\n\t\tglog.Errorf(\"Error in handler: error=%v\\nmessage=%s\", errors.ErrorStack(err), message)\n\t} else {\n\t\tglog.Errorf(\"Error in handler: message=%s\", message)\n\t}\n\n\tw.WriteHeader(code)\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\treturn json.NewEncoder(w).Encode(jsonResponse{\"error\", nil, message, code})\n}\n\nfunc jsonErrorWithCodeText(w http.ResponseWriter, code int, err error) error {\n\treturn jsonError(w, code, http.StatusText(code), err)\n}\n\ntype HandlerVars struct {\n\tenv *env.Env\n\tps httprouter.Params\n}\n\nfunc createHandlerVars(e *env.Env, ps httprouter.Params) *HandlerVars {\n\treturn &HandlerVars{e, ps}\n}\n\ntype adminUsersPOSTHandler struct {\n\t*HandlerVars\n}\n\nfunc (a adminUsersPOSTHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\n\tu := struct {\n\t\tName string `json:\"name\"`\n\t\tEmail string `json:\"email\"`\n\t\tAdmin bool `json:\"admin\"`\n\t\tActive bool `json:\"active\"`\n\t\tPassword string `json:\"password\"`\n\t}{}\n\terr := json.NewDecoder(r.Body).Decode(&u)\n\n\tif err != nil && err != io.EOF {\n\t\tjsonErrorWithCodeText(w, http.StatusInternalServerError, errors.Trace(err))\n\t\treturn\n\t}\n\n\tuser, err := a.env.New(u.Name, u.Email, u.Password, u.Password, u.Active, u.Admin)\n\tif err != nil {\n\t\terr = errors.Trace(err)\n\t\tjsonError(w, http.StatusBadRequest, err.Error(), nil)\n\t\treturn\n\t}\n\n\terr = a.env.Insert(user)\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusInternalServerError, errors.Trace(err))\n\t\treturn\n\t}\n\n\tjsonSuccess(w, user)\n\tif err != nil {\n\t\tglog.Errorf(\"Error encoding json: %v\\n\", err)\n\t}\n}\n\nfunc CreateAdminUsersPOSTHandler(\n\te *env.Env,\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) (http.Handler, int, error) {\n\treturn adminUsersPOSTHandler{createHandlerVars(e, ps)}, 200, nil\n}\n\ntype adminUsersGETHandler struct {\n\t*HandlerVars\n}\n\nfunc CreateAdminUsersGETHandler(\n\te *env.Env,\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) (http.Handler, int, error) {\n\treturn adminUsersGETHandler{createHandlerVars(e, ps)}, 200, nil\n}\n\nfunc (a adminUsersGETHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tusers, err := a.env.Users()\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusInternalServerError, errors.Trace(err))\n\t\treturn\n\t}\n\n\tfmt.Println(\"Users:\", users)\n\n\tjsonSuccess(w, users)\n\tif err != nil {\n\t\tglog.Errorf(\"Error encoding json: %v\\n\", err)\n\t}\n}\n\ntype adminUserDELETEHandler struct {\n\t*HandlerVars\n}\n\nfunc CreateAdminUserDELETEHandler(\n\te *env.Env,\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) (http.Handler, int, error) {\n\treturn adminUserDELETEHandler{createHandlerVars(e, ps)}, http.StatusOK, nil\n}\n\nfunc (h adminUserDELETEHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tuidStr := h.ps.ByName(\"user_id\")\n\tuid, err := strconv.Atoi(uidStr)\n\tif err != nil {\n\t\tjsonError(w, http.StatusInternalServerError, err.Error(), errors.Trace(err))\n\t\treturn\n\t}\n\n\terr = h.env.DeleteUserById(int64(uid))\n\tif err != nil {\n\t\tjsonError(w, http.StatusInternalServerError, err.Error(), errors.Trace(err))\n\t\treturn\n\t}\n\n\tjsonSuccess(w, nil)\n\tif err != nil {\n\t\tfmt.Printf(\"Error sending success: %v\", err)\n\t}\n}\n\n\ntype adminGroupsGETHandler struct {\n\t*HandlerVars\n}\n\nfunc CreateAdminGroupsGETHandler(\n\te *env.Env,\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) (http.Handler, int, error) {\n\treturn adminGroupsGETHandler{createHandlerVars(e, ps)}, http.StatusOK, nil\n}\n\nfunc (h adminGroupsGETHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Get user\n\t_, err := h.env.AdminFromSession(w, r)\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusUnauthorized, errors.Trace(err))\n\t\treturn\n\t}\n\n\t\/\/ User is authenticated\n\tgroups, err := h.env.AllGroups()\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusInternalServerError, errors.Trace(err))\n\t\treturn\n\t}\n\n\tjsonSuccess(w, groups)\n}\n\ntype adminGroupPOSTHandler struct {\n\t*HandlerVars\n}\n\nfunc CreateAdminGroupPOSTHandler(\n\te *env.Env,\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) (http.Handler, int, error) {\n\treturn adminGroupPOSTHandler{createHandlerVars(e, ps)}, http.StatusOK, nil\n}\n\nfunc (h adminGroupPOSTHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t_, err := h.env.AdminFromSession(w, r)\n\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusUnauthorized, errors.Trace(err))\n\t\treturn\n\t}\n\n\tnewGroup := struct {\n\t\tName string `json:\"name\"`\n\t\tEmails []string `json:\"emails\"`\n\t\t}{}\n\n\terr = json.NewDecoder(r.Body).Decode(&newGroup)\n\n\tif err != nil && err != io.EOF {\n\t\tjsonErrorWithCodeText(w, http.StatusInternalServerError, errors.Trace(err))\n\t\treturn\n\t}\n\n\n\tvar users []*auth.User\n\t\/\/ First make sure all of them are users\n\tfor _, email := range newGroup.Emails {\n\t\tu, err := h.env.UserManager.ByEmail(email)\n\t\tif err != nil {\n\t\t\tjsonError(w, http.StatusBadRequest, fmt.Sprintf(\"User with email %s does not exist\", email), errors.Trace(err))\n\t\t\treturn\n\t\t}\n\t\tusers = append(users, u)\n\t}\n\n\tg, err := h.env.NewGroup(newGroup.Name)\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusInternalServerError, errors.Trace(err))\n\t\treturn\n\t}\n\n\tfor _, user := range users {\n\t\terr := h.env.AddUserToGroup(g, user, false)\n\t\tif err != nil {\n\t\t\tjsonError(w, http.StatusInternalServerError, \"Error creating group\", errors.Trace(err))\n\t\t\treturn\n\t\t}\n\t}\n\n\tjsonSuccess(w, g)\n}\n\nAdded DELETE admin group handler, started PUT.package handlers\n\nimport (\n\t\"git.ianfross.com\/ifross\/expensetracker\/env\"\n\t\"git.ianfross.com\/ifross\/expensetracker\/auth\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/juju\/errors\"\n\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"git.ianfross.com\/ifross\/expensetracker\/models\"\n)\n\ntype jsonResponse struct {\n\tStatus string `json:\"status\"`\n\tData interface{} `json:\"data,omitempty\"`\n\tMessage string `json:\"message,omitempty\"`\n\tCode int `json:\"code,omitempty\"`\n}\n\nfunc jsonSuccess(w http.ResponseWriter, data interface{}) {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\terr := json.NewEncoder(w).Encode(jsonResponse{\"success\", data, \"\", http.StatusOK})\n\tif err != nil {\n\t\tglog.Errorf(\"Error encoding json in successful respone:%v\", err)\n\t}\n}\n\nfunc jsonError(w http.ResponseWriter, code int, message string, err error) error {\n\tif err != nil {\n\t\tglog.Errorf(\"Error in handler: error=%v\\nmessage=%s\", errors.ErrorStack(err), message)\n\t} else {\n\t\tglog.Errorf(\"Error in handler: message=%s\", message)\n\t}\n\n\tw.WriteHeader(code)\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\treturn json.NewEncoder(w).Encode(jsonResponse{\"error\", nil, message, code})\n}\n\nfunc jsonErrorWithCodeText(w http.ResponseWriter, code int, err error) error {\n\treturn jsonError(w, code, http.StatusText(code), err)\n}\n\ntype HandlerVars struct {\n\tenv *env.Env\n\tps httprouter.Params\n}\n\nfunc createHandlerVars(e *env.Env, ps httprouter.Params) *HandlerVars {\n\treturn &HandlerVars{e, ps}\n}\n\ntype adminUsersPOSTHandler struct {\n\t*HandlerVars\n}\n\nfunc (a adminUsersPOSTHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\n\tu := struct {\n\t\tName string `json:\"name\"`\n\t\tEmail string `json:\"email\"`\n\t\tAdmin bool `json:\"admin\"`\n\t\tActive bool `json:\"active\"`\n\t\tPassword string `json:\"password\"`\n\t}{}\n\terr := json.NewDecoder(r.Body).Decode(&u)\n\n\tif err != nil && err != io.EOF {\n\t\tjsonErrorWithCodeText(w, http.StatusInternalServerError, errors.Trace(err))\n\t\treturn\n\t}\n\n\tuser, err := a.env.New(u.Name, u.Email, u.Password, u.Password, u.Active, u.Admin)\n\tif err != nil {\n\t\terr = errors.Trace(err)\n\t\tjsonError(w, http.StatusBadRequest, err.Error(), nil)\n\t\treturn\n\t}\n\n\terr = a.env.Insert(user)\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusInternalServerError, errors.Trace(err))\n\t\treturn\n\t}\n\n\tjsonSuccess(w, user)\n\tif err != nil {\n\t\tglog.Errorf(\"Error encoding json: %v\\n\", err)\n\t}\n}\n\nfunc CreateAdminUsersPOSTHandler(\n\te *env.Env,\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) (http.Handler, int, error) {\n\treturn adminUsersPOSTHandler{createHandlerVars(e, ps)}, 200, nil\n}\n\ntype adminUsersGETHandler struct {\n\t*HandlerVars\n}\n\nfunc CreateAdminUsersGETHandler(\n\te *env.Env,\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) (http.Handler, int, error) {\n\treturn adminUsersGETHandler{createHandlerVars(e, ps)}, 200, nil\n}\n\nfunc (a adminUsersGETHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tusers, err := a.env.Users()\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusInternalServerError, errors.Trace(err))\n\t\treturn\n\t}\n\n\tfmt.Println(\"Users:\", users)\n\n\tjsonSuccess(w, users)\n\tif err != nil {\n\t\tglog.Errorf(\"Error encoding json: %v\\n\", err)\n\t}\n}\n\ntype adminUserDELETEHandler struct {\n\t*HandlerVars\n}\n\nfunc CreateAdminUserDELETEHandler(\n\te *env.Env,\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) (http.Handler, int, error) {\n\treturn adminUserDELETEHandler{createHandlerVars(e, ps)}, http.StatusOK, nil\n}\n\nfunc (h adminUserDELETEHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tuidStr := h.ps.ByName(\"user_id\")\n\tuid, err := strconv.Atoi(uidStr)\n\tif err != nil {\n\t\tjsonError(w, http.StatusInternalServerError, err.Error(), errors.Trace(err))\n\t\treturn\n\t}\n\n\terr = h.env.DeleteUserById(int64(uid))\n\tif err != nil {\n\t\tjsonError(w, http.StatusInternalServerError, err.Error(), errors.Trace(err))\n\t\treturn\n\t}\n\n\tjsonSuccess(w, nil)\n\tif err != nil {\n\t\tfmt.Printf(\"Error sending success: %v\", err)\n\t}\n}\n\n\ntype adminGroupsGETHandler struct {\n\t*HandlerVars\n}\n\nfunc CreateAdminGroupsGETHandler(\n\te *env.Env,\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) (http.Handler, int, error) {\n\treturn adminGroupsGETHandler{createHandlerVars(e, ps)}, http.StatusOK, nil\n}\n\nfunc (h adminGroupsGETHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Get user\n\t_, err := h.env.AdminFromSession(w, r)\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusUnauthorized, errors.Trace(err))\n\t\treturn\n\t}\n\n\t\/\/ User is authenticated\n\tgroups, err := h.env.AllGroups()\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusInternalServerError, errors.Trace(err))\n\t\treturn\n\t}\n\n\tjsonSuccess(w, groups)\n}\n\ntype adminGroupPOSTHandler struct {\n\t*HandlerVars\n}\n\nfunc CreateAdminGroupPOSTHandler(\n\te *env.Env,\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) (http.Handler, int, error) {\n\treturn adminGroupPOSTHandler{createHandlerVars(e, ps)}, http.StatusOK, nil\n}\n\nfunc (h adminGroupPOSTHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t_, err := h.env.AdminFromSession(w, r)\n\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusUnauthorized, errors.Trace(err))\n\t\treturn\n\t}\n\n\tnewGroup := struct {\n\t\tName string `json:\"name\"`\n\t\tEmails []string `json:\"emails\"`\n\t\t}{}\n\n\terr = json.NewDecoder(r.Body).Decode(&newGroup)\n\n\tif err != nil && err != io.EOF {\n\t\tjsonErrorWithCodeText(w, http.StatusInternalServerError, errors.Trace(err))\n\t\treturn\n\t}\n\n\n\tvar users []*auth.User\n\t\/\/ First make sure all of them are users\n\tfor _, email := range newGroup.Emails {\n\t\tu, err := h.env.UserManager.ByEmail(email)\n\t\tif err != nil {\n\t\t\tjsonError(w, http.StatusBadRequest, fmt.Sprintf(\"User with email %s does not exist\", email), errors.Trace(err))\n\t\t\treturn\n\t\t}\n\t\tusers = append(users, u)\n\t}\n\n\tg, err := h.env.NewGroup(newGroup.Name)\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusInternalServerError, errors.Trace(err))\n\t\treturn\n\t}\n\n\tfor _, user := range users {\n\t\terr := h.env.AddUserToGroup(g, user, false)\n\t\tif err != nil {\n\t\t\tjsonError(w, http.StatusInternalServerError, \"Error creating group\", errors.Trace(err))\n\t\t\treturn\n\t\t}\n\t}\n\n\tjsonSuccess(w, g)\n}\n\ntype adminGroupDELETEHandler struct {\n\t*HandlerVars\n}\n\nfunc CreateAdminGroupDELETEHandler(\n\te *env.Env,\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) (http.Handler, int, err) {\n\treturn adminGroupDELETEHandler{createHandlerVars(e, ps)}\n}\n\nfunc (h adminGroupDELETEHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t_, err := h.env.AdminFromSession(w, r)\n\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusUnauthorized, errors.Trace(err))\n\t\treturn\n\t}\n\n\t\/\/ User is admin\n\n\tgroupId := struct {\n\t\tId int64 `json:\"id\"`\n\t}{}\n\n\terr = json.NewDecoder(r.Body).Decode(&groupId)\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusInternalServerError, errors.Trace(err))\n\t\treturn\n\t}\n\n\terr = h.env.DeleteGroup(&models.Group{Id:groupId.Id})\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusInternalServerError, errors.Trace(err))\n\t\treturn\n\t}\n\n\tjsonSuccess(w, nil)\n}\n\ntype adminGroupPUTHandler struct {\n\t*HandlerVars\n}\n\nfunc CreateAdminGroupPUTHandler(\n\te *env.Env,\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) (http.Handler, int, error) {\n\treturn adminGroupPUTHandler{createHandlerVars(e, ps)}, http.StatusOK, nil\n}\n\n\/\/ Need to figure out what happens to the expenses\nfunc (h adminGroupPUTHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t_, err := h.env.AdminFromSession(w, r)\n\n\tif err != nil {\n\t\tjsonErrorWithCodeText(w, http.StatusUnauthorized, errors.Trace(err))\n\t\treturn\n\t}\n\n\tgroup := struct {\n\t\tId int64 `json:\"id\"`\n\t\tName string `json:\"name\"`\n\t\tEmails []string `json:\"emails\"`\n\t\t}{}\n\n\terr = json.NewDecoder(r.Body).Decode(&group)\n\n\tif err != nil && err != io.EOF {\n\t\tjsonErrorWithCodeText(w, http.StatusInternalServerError, errors.Trace(err))\n\t\treturn\n\t}\n\n\t\/\/ TODO: finish this function\n\n\tjsonError(w, http.StatusServiceUnavailable, \"Unimplemented\", nil)\n}\n<|endoftext|>"} {"text":"package handlers\n\nimport (\n\t\"container\/heap\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/gojp\/goreportcard\/download\"\n)\n\nconst (\n\t\/\/ DBPath is the relative (or absolute) path to the bolt database file\n\tDBPath string = \"goreportcard.db\"\n\n\t\/\/ RepoBucket is the bucket in which repos will be cached in the bolt DB\n\tRepoBucket string = \"repos\"\n\n\t\/\/ MetaBucket is the bucket containing the names of the projects with the\n\t\/\/ top 100 high scores, and other meta information\n\tMetaBucket string = \"meta\"\n)\n\n\/\/ CheckHandler handles the request for checking a repo\nfunc CheckHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\n\trepo, err := download.Clean(r.FormValue(\"repo\"))\n\tif err != nil {\n\t\tlog.Println(\"ERROR: from download.Clean:\", err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(`Could not download the repository: ` + err.Error()))\n\t\treturn\n\t}\n\n\tlog.Printf(\"Checking repo %q...\", repo)\n\n\tforceRefresh := r.Method != \"GET\" \/\/ if this is a GET request, try to fetch from cached version in boltdb first\n\tresp, err := newChecksResp(repo, forceRefresh)\n\tif err != nil {\n\t\tlog.Println(\"ERROR: from newChecksResp:\", err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(`Could not download the repository.`))\n\t\treturn\n\t}\n\n\trespBytes, err := json.Marshal(resp)\n\tif err != nil {\n\t\tlog.Println(\"ERROR: could not marshal json:\", err)\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\t\/\/ write to boltdb\n\tdb, err := bolt.Open(DBPath, 0755, &bolt.Options{Timeout: 1 * time.Second})\n\tif err != nil {\n\t\tlog.Println(\"Failed to open bolt database: \", err)\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\t\/\/ is this a new repo? if so, increase the count in the high scores bucket later\n\tisNewRepo := false\n\tvar oldRepoBytes []byte\n\terr = db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(RepoBucket))\n\t\tif b == nil {\n\t\t\treturn fmt.Errorf(\"repo bucket not found\")\n\t\t}\n\t\toldRepoBytes = b.Get([]byte(repo))\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\t\/\/ get the old score and store it for stats updating\n\tvar oldScore *float64\n\tif isNewRepo = oldRepoBytes == nil; !isNewRepo {\n\t\toldRepo := checksResp{}\n\t\terr = json.Unmarshal(oldRepoBytes, &oldRepo)\n\t\tif err != nil {\n\t\t\tlog.Println(\"ERROR: could not unmarshal json:\", err)\n\t\t\thttp.Error(w, err.Error(), 500)\n\t\t\treturn\n\t\t}\n\t\toldScore = &oldRepo.Average\n\t}\n\n\t\/\/ if this is a new repo, or the user force-refreshed, update the cache\n\tif isNewRepo || forceRefresh {\n\t\terr = db.Update(func(tx *bolt.Tx) error {\n\t\t\tlog.Printf(\"Saving repo %q to cache...\", repo)\n\n\t\t\tb := tx.Bucket([]byte(RepoBucket))\n\t\t\tif b == nil {\n\t\t\t\treturn fmt.Errorf(\"repo bucket not found\")\n\t\t\t}\n\n\t\t\t\/\/ save repo to cache\n\t\t\terr = b.Put([]byte(repo), respBytes)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn updateMetadata(tx, resp, repo, isNewRepo, oldScore)\n\t\t})\n\n\t\tif err != nil {\n\t\t\tlog.Println(\"Bolt writing error:\", err)\n\t\t}\n\n\t}\n\n\tdb.Update(func(tx *bolt.Tx) error {\n\t\t\/\/ fetch meta-bucket\n\t\tmb := tx.Bucket([]byte(MetaBucket))\n\t\treturn updateRecentlyViewed(mb, repo)\n\t})\n\n\tb, err := json.Marshal(map[string]string{\"redirect\": \"\/report\/\" + repo})\n\tif err != nil {\n\t\tlog.Println(\"JSON marshal error:\", err)\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(b)\n\treturn\n}\n\nfunc updateHighScores(mb *bolt.Bucket, resp checksResp, repo string) error {\n\t\/\/ check if we need to update the high score list\n\tif resp.Files < 100 {\n\t\t\/\/ only repos with >= 100 files are considered for the high score list\n\t\treturn nil\n\t}\n\n\t\/\/ start updating high score list\n\tscoreBytes := mb.Get([]byte(\"scores\"))\n\tif scoreBytes == nil {\n\t\tscoreBytes, _ = json.Marshal([]ScoreHeap{})\n\t}\n\tscores := &ScoreHeap{}\n\tjson.Unmarshal(scoreBytes, scores)\n\n\theap.Init(scores)\n\tif len(*scores) > 0 && (*scores)[0].Score > resp.Average*100.0 && len(*scores) == 50 {\n\t\t\/\/ lowest score on list is higher than this repo's score, so no need to add, unless\n\t\t\/\/ we do not have 50 high scores yet\n\t\treturn nil\n\t}\n\t\/\/ if this repo is already in the list, remove the original entry:\n\tfor i := range *scores {\n\t\tif strings.ToLower((*scores)[i].Repo) == strings.ToLower(repo) {\n\t\t\theap.Remove(scores, i)\n\t\t\tbreak\n\t\t}\n\t}\n\t\/\/ now we can safely push it onto the heap\n\theap.Push(scores, scoreItem{\n\t\tRepo: repo,\n\t\tScore: resp.Average * 100.0,\n\t\tFiles: resp.Files,\n\t})\n\tif len(*scores) > 50 {\n\t\t\/\/ trim heap if it's grown to over 50\n\t\t*scores = (*scores)[1:51]\n\t}\n\tscoreBytes, err := json.Marshal(&scores)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = mb.Put([]byte(\"scores\"), scoreBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc updateStats(mb *bolt.Bucket, resp checksResp, oldScore *float64) error {\n\tscores := make([]int, 101, 101)\n\tstatsBytes := mb.Get([]byte(\"stats\"))\n\tif statsBytes == nil {\n\t\tstatsBytes, _ = json.Marshal(scores)\n\t}\n\terr := json.Unmarshal(statsBytes, &scores)\n\tif err != nil {\n\t\treturn err\n\t}\n\tscores[int(resp.Average*100)]++\n\tif oldScore != nil {\n\t\tscores[int(*oldScore*100)]--\n\t}\n\tnewStats, err := json.Marshal(scores)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = mb.Put([]byte(\"stats\"), newStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc updateReposCount(mb *bolt.Bucket, resp checksResp, repo string) (err error) {\n\tlog.Printf(\"New repo %q, adding to repo count...\", repo)\n\ttotalInt := 0\n\ttotal := mb.Get([]byte(\"total_repos\"))\n\tif total != nil {\n\t\terr = json.Unmarshal(total, &totalInt)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not unmarshal total repos count: %v\", err)\n\t\t}\n\t}\n\ttotalInt++ \/\/ increase repo count\n\ttotal, err = json.Marshal(totalInt)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not marshal total repos count: %v\", err)\n\t}\n\tmb.Put([]byte(\"total_repos\"), total)\n\tlog.Println(\"Repo count is now\", totalInt)\n\treturn nil\n}\n\ntype recentItem struct {\n\tRepo string\n}\n\nfunc updateRecentlyViewed(mb *bolt.Bucket, repo string) error {\n\tif mb == nil {\n\t\treturn fmt.Errorf(\"meta bucket not found\")\n\t}\n\tb := mb.Get([]byte(\"recent\"))\n\tif b == nil {\n\t\tb, _ = json.Marshal([]recentItem{})\n\t}\n\trecent := []recentItem{}\n\tjson.Unmarshal(b, &recent)\n\n\t\/\/ add it to the slice, if it is not in there already\n\tfor i := range recent {\n\t\tif recent[i].Repo == repo {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\trecent = append(recent, recentItem{Repo: repo})\n\tif len(recent) > 5 {\n\t\t\/\/ trim recent if it's grown to over 5\n\t\trecent = (recent)[1:6]\n\t}\n\tb, err := json.Marshal(&recent)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = mb.Put([]byte(\"recent\"), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc updateMetadata(tx *bolt.Tx, resp checksResp, repo string, isNewRepo bool, oldScore *float64) error {\n\t\/\/ fetch meta-bucket\n\tmb := tx.Bucket([]byte(MetaBucket))\n\tif mb == nil {\n\t\treturn fmt.Errorf(\"high score bucket not found\")\n\t}\n\t\/\/ update total repos count\n\tif isNewRepo {\n\t\terr := updateReposCount(mb, resp, repo)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr := updateHighScores(mb, resp, repo)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn updateStats(mb, resp, oldScore)\n}\nMaking the Check Repo analysis logic re-usablepackage handlers\n\nimport (\n\t\"container\/heap\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/gojp\/goreportcard\/download\"\n)\n\nconst (\n\t\/\/ DBPath is the relative (or absolute) path to the bolt database file\n\tDBPath string = \"goreportcard.db\"\n\n\t\/\/ RepoBucket is the bucket in which repos will be cached in the bolt DB\n\tRepoBucket string = \"repos\"\n\n\t\/\/ MetaBucket is the bucket containing the names of the projects with the\n\t\/\/ top 100 high scores, and other meta information\n\tMetaBucket string = \"meta\"\n)\n\n\/\/ CheckRepo performs the code analysis of a repo and updates the cache with\n\/\/ the analysis report and various metadata\nfunc CheckRepo(db *bolt.DB, repo string, forceRefresh bool) error {\n\tresp, err := newChecksResp(repo, forceRefresh)\n\tif err != nil {\n\t\tlog.Println(\"ERROR: from newChecksResp:\", err)\n\t\treturn err\n\t}\n\n\trespBytes, err := json.Marshal(resp)\n\tif err != nil {\n\t\tlog.Println(\"ERROR: could not marshal json:\", err)\n\t\treturn err\n\t}\n\n\t\/\/ is this a new repo? if so, increase the count in the high scores bucket later\n\tisNewRepo := false\n\tvar oldRepoBytes []byte\n\terr = db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(RepoBucket))\n\t\tif b == nil {\n\t\t\treturn fmt.Errorf(\"repo bucket not found\")\n\t\t}\n\t\toldRepoBytes = b.Get([]byte(repo))\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\t\/\/ get the old score and store it for stats updating\n\tvar oldScore *float64\n\tif isNewRepo = oldRepoBytes == nil; !isNewRepo {\n\t\toldRepo := checksResp{}\n\t\terr = json.Unmarshal(oldRepoBytes, &oldRepo)\n\t\tif err != nil {\n\t\t\tlog.Println(\"ERROR: could not unmarshal json:\", err)\n\t\t\treturn err\n\t\t}\n\t\toldScore = &oldRepo.Average\n\t}\n\n\t\/\/ if this is a new repo, or the user force-refreshed, update the cache\n\tif isNewRepo || forceRefresh {\n\t\terr = db.Update(func(tx *bolt.Tx) error {\n\t\t\tlog.Printf(\"Saving repo %q to cache...\", repo)\n\n\t\t\tb := tx.Bucket([]byte(RepoBucket))\n\t\t\tif b == nil {\n\t\t\t\treturn fmt.Errorf(\"repo bucket not found\")\n\t\t\t}\n\n\t\t\t\/\/ save repo to cache\n\t\t\terr = b.Put([]byte(repo), respBytes)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn updateMetadata(tx, resp, repo, isNewRepo, oldScore)\n\t\t})\n\n\t\tif err != nil {\n\t\t\tlog.Println(\"Bolt writing error:\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ CheckHandler handles the request for checking a repo\nfunc CheckHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tforceRefresh := r.Method != \"GET\" \/\/ if this is a GET request, try to fetch from cached version in boltdb first\n\trepoParam := r.FormValue(\"repo\")\n\trepo, err := download.Clean(repoParam)\n\tif err != nil {\n\t\tlog.Println(\"ERROR: from download.Clean:\", err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(`Could not download the repository: ` + err.Error()))\n\t\treturn\n\t}\n\tlog.Printf(\"Checking repo %q...\", repo)\n\t\/\/ write to boltdb\n\tdb, err := bolt.Open(DBPath, 0755, &bolt.Options{Timeout: 1 * time.Second})\n\tif err != nil {\n\t\tlog.Println(\"Failed to open bolt database: \", err)\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tdefer db.Close()\n\tif err = CheckRepo(db, repo, forceRefresh); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(`Error while analyzing the repository: ` + err.Error()))\n\t\treturn\n\t}\n\tdb.Update(func(tx *bolt.Tx) error {\n\t\t\/\/ fetch meta-bucket\n\t\tmb := tx.Bucket([]byte(MetaBucket))\n\t\treturn updateRecentlyViewed(mb, repo)\n\t})\n\tb, err := json.Marshal(map[string]string{\"redirect\": \"\/report\/\" + repo})\n\tif err != nil {\n\t\tlog.Println(\"JSON marshal error:\", err)\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(b)\n\treturn\n}\n\nfunc updateHighScores(mb *bolt.Bucket, resp checksResp, repo string) error {\n\t\/\/ check if we need to update the high score list\n\tif resp.Files < 100 {\n\t\t\/\/ only repos with >= 100 files are considered for the high score list\n\t\treturn nil\n\t}\n\n\t\/\/ start updating high score list\n\tscoreBytes := mb.Get([]byte(\"scores\"))\n\tif scoreBytes == nil {\n\t\tscoreBytes, _ = json.Marshal([]ScoreHeap{})\n\t}\n\tscores := &ScoreHeap{}\n\tjson.Unmarshal(scoreBytes, scores)\n\n\theap.Init(scores)\n\tif len(*scores) > 0 && (*scores)[0].Score > resp.Average*100.0 && len(*scores) == 50 {\n\t\t\/\/ lowest score on list is higher than this repo's score, so no need to add, unless\n\t\t\/\/ we do not have 50 high scores yet\n\t\treturn nil\n\t}\n\t\/\/ if this repo is already in the list, remove the original entry:\n\tfor i := range *scores {\n\t\tif strings.ToLower((*scores)[i].Repo) == strings.ToLower(repo) {\n\t\t\theap.Remove(scores, i)\n\t\t\tbreak\n\t\t}\n\t}\n\t\/\/ now we can safely push it onto the heap\n\theap.Push(scores, scoreItem{\n\t\tRepo: repo,\n\t\tScore: resp.Average * 100.0,\n\t\tFiles: resp.Files,\n\t})\n\tif len(*scores) > 50 {\n\t\t\/\/ trim heap if it's grown to over 50\n\t\t*scores = (*scores)[1:51]\n\t}\n\tscoreBytes, err := json.Marshal(&scores)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = mb.Put([]byte(\"scores\"), scoreBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc updateStats(mb *bolt.Bucket, resp checksResp, oldScore *float64) error {\n\tscores := make([]int, 101, 101)\n\tstatsBytes := mb.Get([]byte(\"stats\"))\n\tif statsBytes == nil {\n\t\tstatsBytes, _ = json.Marshal(scores)\n\t}\n\terr := json.Unmarshal(statsBytes, &scores)\n\tif err != nil {\n\t\treturn err\n\t}\n\tscores[int(resp.Average*100)]++\n\tif oldScore != nil {\n\t\tscores[int(*oldScore*100)]--\n\t}\n\tnewStats, err := json.Marshal(scores)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = mb.Put([]byte(\"stats\"), newStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc updateReposCount(mb *bolt.Bucket, repo string) (err error) {\n\tlog.Printf(\"New repo %q, adding to repo count...\", repo)\n\ttotalInt := 0\n\ttotal := mb.Get([]byte(\"total_repos\"))\n\tif total != nil {\n\t\terr = json.Unmarshal(total, &totalInt)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not unmarshal total repos count: %v\", err)\n\t\t}\n\t}\n\ttotalInt++ \/\/ increase repo count\n\ttotal, err = json.Marshal(totalInt)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not marshal total repos count: %v\", err)\n\t}\n\tmb.Put([]byte(\"total_repos\"), total)\n\tlog.Println(\"Repo count is now\", totalInt)\n\treturn nil\n}\n\ntype recentItem struct {\n\tRepo string\n}\n\nfunc updateRecentlyViewed(mb *bolt.Bucket, repo string) error {\n\tif mb == nil {\n\t\treturn fmt.Errorf(\"meta bucket not found\")\n\t}\n\tb := mb.Get([]byte(\"recent\"))\n\tif b == nil {\n\t\tb, _ = json.Marshal([]recentItem{})\n\t}\n\trecent := []recentItem{}\n\tjson.Unmarshal(b, &recent)\n\n\t\/\/ add it to the slice, if it is not in there already\n\tfor i := range recent {\n\t\tif recent[i].Repo == repo {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\trecent = append(recent, recentItem{Repo: repo})\n\tif len(recent) > 5 {\n\t\t\/\/ trim recent if it's grown to over 5\n\t\trecent = (recent)[1:6]\n\t}\n\tb, err := json.Marshal(&recent)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = mb.Put([]byte(\"recent\"), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc updateMetadata(tx *bolt.Tx, resp checksResp, repo string, isNewRepo bool, oldScore *float64) error {\n\t\/\/ fetch meta-bucket\n\tmb := tx.Bucket([]byte(MetaBucket))\n\tif mb == nil {\n\t\treturn fmt.Errorf(\"high score bucket not found\")\n\t}\n\t\/\/ update total repos count\n\tif isNewRepo {\n\t\terr := updateReposCount(mb, repo)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr := updateHighScores(mb, resp, repo)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn updateStats(mb, resp, oldScore)\n}\n<|endoftext|>"} {"text":"package cayley\n\nimport (\n\t\"github.com\/cayleygraph\/cayley\/graph\"\n\t_ \"github.com\/cayleygraph\/cayley\/graph\/memstore\"\n\t\"github.com\/cayleygraph\/cayley\/graph\/path\"\n\t\"github.com\/cayleygraph\/cayley\/quad\"\n\t_ \"github.com\/cayleygraph\/cayley\/writer\"\n)\n\nvar (\n\tStartMorphism = path.StartMorphism\n\tStartPath = path.StartPath\n\n\tNewTransaction = graph.NewTransaction\n)\n\ntype Iterator graph.Iterator\ntype QuadStore graph.QuadStore\ntype QuadWriter graph.QuadWriter\n\ntype Path path.Path\n\ntype Handle struct {\n\tgraph.QuadStore\n\tgraph.QuadWriter\n}\n\nfunc (h *Handle) Close() error {\n\terr := h.QuadWriter.Close()\n\th.QuadStore.Close()\n\treturn err\n}\n\nfunc Triple(subject, predicate, object interface{}) quad.Quad {\n\treturn Quad(subject, predicate, object, nil)\n}\n\nfunc Quad(subject, predicate, object, label interface{}) quad.Quad {\n\treturn quad.Make(subject, predicate, object, label)\n}\n\nfunc NewGraph(name, dbpath string, opts graph.Options) (*Handle, error) {\n\tqs, err := graph.NewQuadStore(name, dbpath, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tqw, err := graph.NewQuadWriter(\"single\", qs, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Handle{qs, qw}, nil\n}\n\nfunc NewMemoryGraph() (*Handle, error) {\n\treturn NewGraph(\"memstore\", \"\", nil)\n}\ncayley: use alias to make import helpers perfect matcherspackage cayley\n\nimport (\n\t\"github.com\/cayleygraph\/cayley\/graph\"\n\t_ \"github.com\/cayleygraph\/cayley\/graph\/memstore\"\n\t\"github.com\/cayleygraph\/cayley\/graph\/path\"\n\t\"github.com\/cayleygraph\/cayley\/quad\"\n\t_ \"github.com\/cayleygraph\/cayley\/writer\"\n)\n\nvar (\n\tStartMorphism = path.StartMorphism\n\tStartPath = path.StartPath\n\n\tNewTransaction = graph.NewTransaction\n)\n\ntype Iterator = graph.Iterator\ntype QuadStore = graph.QuadStore\ntype QuadWriter = graph.QuadWriter\n\ntype Path = path.Path\n\ntype Handle struct {\n\tgraph.QuadStore\n\tgraph.QuadWriter\n}\n\nfunc (h *Handle) Close() error {\n\terr := h.QuadWriter.Close()\n\th.QuadStore.Close()\n\treturn err\n}\n\nfunc Triple(subject, predicate, object interface{}) quad.Quad {\n\treturn Quad(subject, predicate, object, nil)\n}\n\nfunc Quad(subject, predicate, object, label interface{}) quad.Quad {\n\treturn quad.Make(subject, predicate, object, label)\n}\n\nfunc NewGraph(name, dbpath string, opts graph.Options) (*Handle, error) {\n\tqs, err := graph.NewQuadStore(name, dbpath, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tqw, err := graph.NewQuadWriter(\"single\", qs, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Handle{qs, qw}, nil\n}\n\nfunc NewMemoryGraph() (*Handle, error) {\n\treturn NewGraph(\"memstore\", \"\", nil)\n}\n<|endoftext|>"} {"text":"package handlers\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/DVI-GI-2017\/Jira__backend\/models\"\n\t\"github.com\/DVI-GI-2017\/Jira__backend\/params\"\n\t\"github.com\/DVI-GI-2017\/Jira__backend\/pool\"\n\t\"github.com\/DVI-GI-2017\/Jira__backend\/tools\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\nfunc CreateTask(w http.ResponseWriter, req *http.Request) {\n\tbody := params.ExtractParams(req).Body\n\n\ttaskInfo := new(models.Task)\n\n\tfmt.Println(body)\n\tfmt.Println(taskInfo)\n\terr := json.Unmarshal(body, &taskInfo)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\n\t\tfmt.Fprint(w, \"Error in request!\")\n\t\tlog.Printf(\"%v\", err)\n\n\t\treturn\n\t}\n\n\texists, err := pool.DispatchAction(pool.CheckTaskExists, taskInfo)\n\tif exists.(bool) {\n\t\tw.WriteHeader(http.StatusConflict)\n\t\tfmt.Fprintf(w, \"Task with title: %s already exists!\", taskInfo.Title)\n\n\t\tlog.Printf(\"Task with title: %s already exists!\", taskInfo.Title)\n\n\t\treturn\n\t}\n\n\tproject, err := pool.DispatchAction(pool.CreateTask, taskInfo)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadGateway)\n\t\tfmt.Fprint(w, \"Can not create task. Please, try later\")\n\t\tlog.Printf(\"can not create task: %v\", err)\n\n\t\treturn\n\t}\n\n\ttools.JsonResponse(project, w)\n}\n\nfunc AllTasks(w http.ResponseWriter, _ *http.Request) {\n\tprojects, err := pool.DispatchAction(pool.AllTasks, nil)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusNotFound)\n\n\t\tfmt.Fprint(w, \"Can not return all tasks!\")\n\t\tlog.Printf(\"Can not return all tasks: %v\", err)\n\n\t\treturn\n\t}\n\n\ttools.JsonResponse(projects.(models.TasksList), w)\n}\n\nfunc GetTaskById(w http.ResponseWriter, req *http.Request) {\n\tparameters := params.ExtractParams(req).PathParams\n\n\tif id, ok := parameters[\"id\"]; ok {\n\t\ttask, err := pool.DispatchAction(pool.FindTaskById, bson.ObjectIdHex(id))\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\n\t\t\tfmt.Fprintln(w, \"Can't find task!\")\n\t\t\tlog.Printf(\"Can not find task by id: %v because of: %v\", id, err)\n\t\t\treturn\n\t\t}\n\n\t\ttools.JsonResponse(task.(*models.Task), w)\n\t\treturn\n\t}\n\n\thttp.NotFound(w, req)\n}\nConvert tasks handlers to new format.package handlers\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/DVI-GI-2017\/Jira__backend\/models\"\n\t\"github.com\/DVI-GI-2017\/Jira__backend\/params\"\n\t\"github.com\/DVI-GI-2017\/Jira__backend\/pool\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\n\/\/ Create task\n\/\/ Post body - task\n\/\/ Returns created task if OK\nfunc CreateTask(w http.ResponseWriter, req *http.Request) {\n\tbody := params.ExtractParams(req).Body\n\n\tvar task models.Task\n\n\terr := json.Unmarshal(body, &task)\n\tif err != nil {\n\t\tJsonErrorResponse(w, err, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\texists, err := pool.DispatchAction(pool.CheckTaskExists, task)\n\tif exists.(bool) {\n\t\tJsonErrorResponse(w, fmt.Errorf(\"Task with title: %s already exists!\", task.Title), http.StatusConflict)\n\t\treturn\n\t}\n\n\tnewTask, err := pool.DispatchAction(pool.CreateTask, task)\n\tif err != nil {\n\t\tJsonErrorResponse(w, err, http.StatusBadGateway)\n\t\treturn\n\t}\n\n\tJsonResponse(w, newTask)\n}\n\n\/\/ Returns all tasks\nfunc AllTasks(w http.ResponseWriter, _ *http.Request) {\n\ttasks, err := pool.DispatchAction(pool.AllTasks, nil)\n\tif err != nil {\n\t\tJsonErrorResponse(w, err, http.StatusNotFound)\n\t\treturn\n\t}\n\n\tJsonResponse(w, tasks.(models.TasksList))\n}\n\n\/\/ Returns task with given id\n\/\/ Path params: \"id\" - task id.\nfunc GetTaskById(w http.ResponseWriter, req *http.Request) {\n\n\tid := params.ExtractParams(req).PathParams[\"id\"]\n\n\ttask, err := pool.DispatchAction(pool.FindTaskById, bson.ObjectIdHex(id))\n\tif err != nil {\n\t\tJsonErrorResponse(w, err, http.StatusNotFound)\n\t\treturn\n\t}\n\n\tJsonResponse(w, task.(models.Task))\n\treturn\n}\n<|endoftext|>"} {"text":"\/\/ Package inboxer is a Go library for checking email using the google Gmail\n\/\/ API.\npackage inboxer\n\n\/\/ SCOPE:\n\/\/ TODO:\n\/\/ Check for unread messages\n\/\/ Mark as read\/unread\/important\/spam\n\/\/ Get x number of messages\n\/\/ Get Previews\n\/\/ Get labels\n\/\/ Get emails by label\n\/\/ Get emails by date\n\/\/ Get emails by sender\n\/\/ Get emails by recipient\n\/\/ Get emails by subject\n\/\/ Get emails by mailing-list\n\/\/ Get emails by thread-topic\n\/\/ Watch inbox\n\/\/ LICENSE\n\/\/ README.md\n\/\/ how-to: add client credentials (for readme)\n\/\/ tests\n\/\/\n\/\/ DONE:\n\/\/ Get Body\n\/\/\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tgmail \"google.golang.org\/api\/gmail\/v1\"\n)\n\n\/\/ GetBody gets, decodes, and returns the body of the email. It returns an\n\/\/ error if decoding goes wrong. mimeType is used to indicate whether you wnat\n\/\/ the plain text or html encoding (\"text\/html\", \"text\/plain\").\nfunc GetBody(msg *gmail.Message, mimeType string) (string, error) {\n\tfor _, v := range msg.Payload.Parts {\n\t\tif v.MimeType == \"multipart\/alternative\" {\n\t\t\tfor _, l := range v.Parts {\n\t\t\t\tif l.MimeType == mimeType && l.Body.Size >= 1 {\n\t\t\t\t\tdec, err := decodeEmailBody(l.Body.Data)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn \"\", err\n\t\t\t\t\t}\n\t\t\t\t\treturn dec, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif v.MimeType == mimeType && v.Body.Size >= 1 {\n\t\t\tdec, err := decodeEmailBody(v.Body.Data)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\treturn dec, nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"Couldn't Read Body\")\n}\n\n\/\/ HasLabel takes a label and an email and checks if that email has that label\nfunc HasLabel(label string, msg *gmail.Message) bool {\n\tfor _, v := range msg.LabelIds {\n\t\tif v == strings.ToUpper(label) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ PartialMetadata stores email metadata\ntype PartialMetadata struct {\n\tSender, From, To, CC, Subject, MailingList, DeliveredTo, ThreadTopic []string\n}\n\n\/\/ GetPartialMetadata gets some of the useful metadata from the headers.\nfunc GetPartialMetadata(msg *gmail.Message) *PartialMetadata {\n\tinfo := &PartialMetadata{}\n\tfmt.Println(\"========================================================\")\n\tfor _, v := range msg.Payload.Headers {\n\t\tswitch v.Name {\n\t\tcase \"Sender\":\n\t\t\tinfo.Sender = append(info.Sender, v.Value)\n\t\tcase \"From\":\n\t\t\tinfo.From = append(info.From, v.Value)\n\t\tcase \"To\":\n\t\t\tinfo.To = append(info.To, v.Value)\n\t\tcase \"CC\":\n\t\t\tinfo.CC = append(info.CC, v.Value)\n\t\tcase \"Subject\":\n\t\t\tinfo.Subject = append(info.Subject, v.Value)\n\t\tcase \"Mailing-list\":\n\t\t\tinfo.MailingList = append(info.MailingList, v.Value)\n\t\tcase \"Delivered-To\":\n\t\t\tinfo.DeliveredTo = append(info.DeliveredTo, v.Value)\n\t\tcase \"Thread-Topic\":\n\t\t\tinfo.ThreadTopic = append(info.ThreadTopic, v.Value)\n\t\t}\n\t}\n\treturn info\n}\n\n\/\/ decodeEmailBody is used to decode the email body by converting from\n\/\/ URLEncoded base64 to a string.\nfunc decodeEmailBody(data string) (string, error) {\n\tdecoded, err := base64.URLEncoding.DecodeString(data)\n\tif err != nil {\n\t\tfmt.Println(\"decode error:\", err)\n\t\treturn \"\", err\n\t}\n\treturn string(decoded), nil\n}\n\n\/\/ ReceivedTime converts parses and converts a unix time stamp into a human\n\/\/ readable format ().\nfunc ReceivedTime(datetime int64) time.Time {\n\tconv := strconv.FormatInt(datetime, 10)\n\t\/\/ Remove trailing zeros.\n\tconv = conv[:len(conv)-3]\n\ttc, err := strconv.ParseInt(conv, 10, 64)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn time.Unix(tc, 0)\n}\n\n\/\/ func watchInbox() {\n\/\/ \treq := &gmail.WatchRequest{\n\/\/ \t\tLabelFilterAction: \"include\",\n\/\/ \t\tLabelIds: []string{\"UNREAD\"},\n\/\/ \t\tTopicName: \"gmailmsg\",\n\/\/ \t}\n\n\/\/ \twr, _ := srv.Users.Watch(\"me\", req).Do()\n\/\/ \tfmt.Println(wr.ForceSendFields)\n\/\/ }\n\n\/\/ func getMessages() (*gmail.ListMessagesResponse, error) {\n\/\/ \t\/\/ Connect to the gmail API service.\n\/\/ \tctx := context.Background()\n\/\/ \tsrv := gmailAPI.ConnectToService(ctx, gmail.MailGoogleComScope)\n\n\/\/ \t\/\/ Get the messages\n\/\/ \tmsgs, err := srv.Users.Messages.List(\"me\").Do()\n\/\/ \tif err != nil {\n\/\/ \t\treturn &gmail.ListMessagesResponse{}, err\n\/\/ \t}\n\n\/\/ \treturn msgs, nil\n\/\/ }\nadding features\/\/ Package inboxer is a Go library for checking email using the google Gmail\n\/\/ API.\npackage inboxer\n\n\/\/ SCOPE:\n\/\/ TODO:\n\/\/ Check for unread messages\n\/\/ Mark as read\/unread\/important\/spam\n\/\/ Get Previews\/snippet\n\/\/ Get labels\n\/\/ Get emails by label\n\/\/ Get emails by date\n\/\/\/\n\/\/ Watch inbox\n\/\/ LICENSE\n\/\/ README.md\n\/\/ how-to: add client credentials (for readme)\n\/\/ tests\n\/\/\n\/\/ WORKS:\n\/\/ Get emails by sender\n\/\/ Get emails by recipient\n\/\/ Get emails by subject\n\/\/ Get emails by mailing-list\n\/\/ Get emails by thread-topic\n\/\/\n\/\/ DONE:\n\/\/ Get Body\n\/\/\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tgmail \"google.golang.org\/api\/gmail\/v1\"\n)\n\n\/\/ GetBody gets, decodes, and returns the body of the email. It returns an\n\/\/ error if decoding goes wrong. mimeType is used to indicate whether you want\n\/\/ the plain text or html encoding (\"text\/html\", \"text\/plain\").\nfunc GetBody(msg *gmail.Message, mimeType string) (string, error) {\n\tfor _, v := range msg.Payload.Parts {\n\t\tif v.MimeType == \"multipart\/alternative\" {\n\t\t\tfor _, l := range v.Parts {\n\t\t\t\tif l.MimeType == mimeType && l.Body.Size >= 1 {\n\t\t\t\t\tdec, err := decodeEmailBody(l.Body.Data)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn \"\", err\n\t\t\t\t\t}\n\t\t\t\t\treturn dec, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif v.MimeType == mimeType && v.Body.Size >= 1 {\n\t\t\tdec, err := decodeEmailBody(v.Body.Data)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\treturn dec, nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"Couldn't Read Body\")\n}\n\nfunc CheckForUnread(srv *gmail.Service) (bool, error) {\n\t\/\/ Get the messages\n\t\/\/ msgs, err := srv.Users.Messages.List(\"me\").Do()\n\t\/\/ if err != nil {\n\t\/\/ \treturn false, err\n\t\/\/ }\n\tlabel, err := srv.Users.Labels.Get(\"me\", \"INBOX\").Do()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tfmt.Println(label.MessagesTotal)\n\tfmt.Println(label.MessagesUnread)\n\tfmt.Println(label.ThreadsTotal)\n\tfmt.Println(label.ThreadsUnread)\n\n\t\/\/ \tfmt.Println(len(msgs.Messages))\n\t\/\/ \tfor _, v := range msgs.Messages {\n\t\/\/ \t\tmsg, _ := srv.Users.Messages.Get(\"me\", v.Id).Do()\n\t\/\/ \t\tif HasLabel(\"unread\", msg) {\n\t\/\/ \t\t\treturn true, nil\n\t\/\/ \t\t}\n\t\/\/ \t}\n\treturn false, nil\n}\n\n\/\/ HasLabel takes a label and an email and checks if that email has that label\nfunc HasLabel(label string, msg *gmail.Message) bool {\n\tfor _, v := range msg.LabelIds {\n\t\tif v == strings.ToUpper(label) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ PartialMetadata stores email metadata\ntype PartialMetadata struct {\n\tSender, From, To, CC, Subject, MailingList, DeliveredTo, ThreadTopic []string\n}\n\n\/\/ GetPartialMetadata gets some of the useful metadata from the headers.\nfunc GetPartialMetadata(msg *gmail.Message) *PartialMetadata {\n\tinfo := &PartialMetadata{}\n\tfmt.Println(\"========================================================\")\n\tfor _, v := range msg.Payload.Headers {\n\t\tswitch v.Name {\n\t\tcase \"Sender\":\n\t\t\tinfo.Sender = append(info.Sender, v.Value)\n\t\tcase \"From\":\n\t\t\tinfo.From = append(info.From, v.Value)\n\t\tcase \"To\":\n\t\t\tinfo.To = append(info.To, v.Value)\n\t\tcase \"CC\":\n\t\t\tinfo.CC = append(info.CC, v.Value)\n\t\tcase \"Subject\":\n\t\t\tinfo.Subject = append(info.Subject, v.Value)\n\t\tcase \"Mailing-list\":\n\t\t\tinfo.MailingList = append(info.MailingList, v.Value)\n\t\tcase \"Delivered-To\":\n\t\t\tinfo.DeliveredTo = append(info.DeliveredTo, v.Value)\n\t\tcase \"Thread-Topic\":\n\t\t\tinfo.ThreadTopic = append(info.ThreadTopic, v.Value)\n\t\t}\n\t}\n\treturn info\n}\n\n\/\/ decodeEmailBody is used to decode the email body by converting from\n\/\/ URLEncoded base64 to a string.\nfunc decodeEmailBody(data string) (string, error) {\n\tdecoded, err := base64.URLEncoding.DecodeString(data)\n\tif err != nil {\n\t\tfmt.Println(\"decode error:\", err)\n\t\treturn \"\", err\n\t}\n\treturn string(decoded), nil\n}\n\n\/\/ ReceivedTime converts parses and converts a unix time stamp into a human\n\/\/ readable format ().\nfunc ReceivedTime(datetime int64) time.Time {\n\tconv := strconv.FormatInt(datetime, 10)\n\t\/\/ Remove trailing zeros.\n\tconv = conv[:len(conv)-3]\n\ttc, err := strconv.ParseInt(conv, 10, 64)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn time.Unix(tc, 0)\n}\n\n\/\/ GetMessages gets and returns gmail messages\nfunc GetMessages(srv *gmail.Service, howMany uint) ([]*gmail.Message, error) {\n\tvar msgSlice []*gmail.Message\n\n\t\/\/ Get the messages\n\tmsgs, err := srv.Users.Messages.List(\"me\").Do()\n\tif err != nil {\n\t\treturn msgSlice, err\n\t}\n\n\tfor _, v := range msgs.Messages[:howMany] {\n\t\tmsg, _ := srv.Users.Messages.Get(\"me\", v.Id).Do()\n\t\tmsgSlice = append(msgSlice, msg)\n\t}\n\treturn msgSlice, nil\n}\n\n\/\/ func watchInbox() {\n\/\/ \treq := &gmail.WatchRequest{\n\/\/ \t\tLabelFilterAction: \"include\",\n\/\/ \t\tLabelIds: []string{\"UNREAD\"},\n\/\/ \t\tTopicName: \"gmailmsg\",\n\/\/ \t}\n\n\/\/ \twr, _ := srv.Users.Watch(\"me\", req).Do()\n\/\/ \tfmt.Println(wr.ForceSendFields)\n\/\/ }\n<|endoftext|>"} {"text":"package repository_fetcher\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/docker\/docker\/archive\"\n\t\"github.com\/docker\/docker\/image\"\n\t\"github.com\/docker\/docker\/registry\"\n\t\"github.com\/pivotal-golang\/lager\"\n)\n\ntype RepositoryFetcher interface {\n\tFetch(logger lager.Logger, repoName string, tag string) (imageID string, envvars []string, err error)\n}\n\n\/\/ apes docker's *registry.Registry\ntype Registry interface {\n\tGetRepositoryData(repoName string) (*registry.RepositoryData, error)\n\tGetRemoteTags(registries []string, repository string, token []string) (map[string]string, error)\n\tGetRemoteHistory(imageID string, registry string, token []string) ([]string, error)\n\n\tGetRemoteImageJSON(imageID string, registry string, token []string) ([]byte, int, error)\n\tGetRemoteImageLayer(imageID string, registry string, token []string, size int64) (io.ReadCloser, error)\n}\n\n\/\/ apes docker's *graph.Graph\ntype Graph interface {\n\tGet(name string) (*image.Image, error)\n\tExists(imageID string) bool\n\tRegister(image *image.Image, imageJSON []byte, layer archive.ArchiveReader) error\n}\n\ntype DockerRepositoryFetcher struct {\n\tregistry Registry\n\tgraph Graph\n\n\tfetchingLayers map[string]chan struct{}\n\tfetchingMutex *sync.Mutex\n}\n\nfunc New(registry Registry, graph Graph) RepositoryFetcher {\n\treturn &DockerRepositoryFetcher{\n\t\tregistry: registry,\n\t\tgraph: graph,\n\t\tfetchingLayers: map[string]chan struct{}{},\n\t\tfetchingMutex: new(sync.Mutex),\n\t}\n}\n\nfunc (fetcher *DockerRepositoryFetcher) Fetch(logger lager.Logger, repoName string, tag string) (string, []string, error) {\n\tfLog := logger.Session(\"fetch\", lager.Data{\n\t\t\"repo\": repoName,\n\t\t\"tag\": tag,\n\t})\n\n\tfLog.Debug(\"fetching\")\n\n\trepoData, err := fetcher.registry.GetRepositoryData(repoName)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\ttagsList, err := fetcher.registry.GetRemoteTags(repoData.Endpoints, repoName, repoData.Tokens)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\timgID, ok := tagsList[tag]\n\tif !ok {\n\t\treturn \"\", nil, fmt.Errorf(\"unknown tag: %s:%s\", repoName, tag)\n\t}\n\n\ttoken := repoData.Tokens\n\n\tfor _, endpoint := range repoData.Endpoints {\n\t\tfLog.Debug(\"trying\", lager.Data{\n\t\t\t\"endpoint\": endpoint,\n\t\t\t\"image\": imgID,\n\t\t})\n\n\t\tenv, err := fetcher.fetchFromEndpoint(fLog, endpoint, imgID, token)\n\t\tif err == nil {\n\t\t\treturn imgID, filterEnv(env, logger), nil\n\t\t}\n\t}\n\n\treturn \"\", nil, fmt.Errorf(\"all endpoints failed: %s\", err)\n}\n\nfunc (fetcher *DockerRepositoryFetcher) fetchFromEndpoint(logger lager.Logger, endpoint string, imgID string, token []string) ([]string, error) {\n\thistory, err := fetcher.registry.GetRemoteHistory(imgID, endpoint, token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar allEnv []string\n\tfor i := len(history) - 1; i >= 0; i-- {\n\t\tenv, err := fetcher.fetchLayer(logger, endpoint, history[i], token)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tallEnv = append(allEnv, env...)\n\t}\n\n\treturn allEnv, nil\n}\n\nfunc (fetcher *DockerRepositoryFetcher) fetchLayer(logger lager.Logger, endpoint string, layerID string, token []string) ([]string, error) {\n\tfor acquired := false; !acquired; acquired = fetcher.fetching(layerID) {\n\t}\n\n\tdefer fetcher.doneFetching(layerID)\n\n\timg, err := fetcher.graph.Get(layerID)\n\tif err == nil {\n\t\tlogger.Info(\"using-cached\", lager.Data{\n\t\t\t\"layer\": layerID,\n\t\t})\n\n\t\treturn imgEnv(img), nil\n\t}\n\n\timgJSON, imgSize, err := fetcher.registry.GetRemoteImageJSON(layerID, endpoint, token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\timg, err = image.NewImgJSON(imgJSON)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlayer, err := fetcher.registry.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer layer.Close()\n\n\tstarted := time.Now()\n\n\tlogger.Info(\"downloading\", lager.Data{\n\t\t\"layer\": layerID,\n\t})\n\n\terr = fetcher.graph.Register(img, imgJSON, layer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger.Info(\"downloaded\", lager.Data{\n\t\t\"layer\": layerID,\n\t\t\"took\": time.Since(started),\n\t})\n\n\treturn imgEnv(img), nil\n}\n\nfunc (fetcher *DockerRepositoryFetcher) fetching(layerID string) bool {\n\tfetcher.fetchingMutex.Lock()\n\n\tfetching, found := fetcher.fetchingLayers[layerID]\n\tif !found {\n\t\tfetcher.fetchingLayers[layerID] = make(chan struct{})\n\t\tfetcher.fetchingMutex.Unlock()\n\t\treturn true\n\t} else {\n\t\tfetcher.fetchingMutex.Unlock()\n\t\t<-fetching\n\t\treturn false\n\t}\n}\n\nfunc (fetcher *DockerRepositoryFetcher) doneFetching(layerID string) {\n\tfetcher.fetchingMutex.Lock()\n\tclose(fetcher.fetchingLayers[layerID])\n\tdelete(fetcher.fetchingLayers, layerID)\n\tfetcher.fetchingMutex.Unlock()\n}\n\nfunc imgEnv(img *image.Image) []string {\n\tvar env []string\n\n\tif img.Config != nil {\n\t\tenv = img.Config.Env\n\t}\n\n\treturn env\n}\n\n\/\/ multiple layers may specify environment variables; they are collected with\n\/\/ the deepest layer first, so the first occurrence of the variable should win\nfunc filterEnv(env []string, logger lager.Logger) []string {\n\tseen := map[string]bool{}\n\n\tvar filtered []string\n\tfor _, e := range env {\n\t\tsegs := strings.SplitN(e, \"=\", 2)\n\t\tif len(segs) != 2 {\n\t\t\t\/\/ malformed docker image metadata?\n\t\t\tlogger.Info(\"Unrecognised environment variable\", lager.Data{\"e\": e})\n\t\t\tcontinue\n\t\t}\n\n\t\tif seen[segs[0]] {\n\t\t\tcontinue\n\t\t}\n\n\t\tfiltered = append(filtered, e)\n\t\tseen[segs[0]] = true\n\t}\n\n\treturn filtered\n}\ngo fmt [#79566468]package repository_fetcher\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/docker\/docker\/archive\"\n\t\"github.com\/docker\/docker\/image\"\n\t\"github.com\/docker\/docker\/registry\"\n\t\"github.com\/pivotal-golang\/lager\"\n)\n\ntype RepositoryFetcher interface {\n\tFetch(logger lager.Logger, repoName string, tag string) (imageID string, envvars []string, err error)\n}\n\n\/\/ apes docker's *registry.Registry\ntype Registry interface {\n\tGetRepositoryData(repoName string) (*registry.RepositoryData, error)\n\tGetRemoteTags(registries []string, repository string, token []string) (map[string]string, error)\n\tGetRemoteHistory(imageID string, registry string, token []string) ([]string, error)\n\n\tGetRemoteImageJSON(imageID string, registry string, token []string) ([]byte, int, error)\n\tGetRemoteImageLayer(imageID string, registry string, token []string, size int64) (io.ReadCloser, error)\n}\n\n\/\/ apes docker's *graph.Graph\ntype Graph interface {\n\tGet(name string) (*image.Image, error)\n\tExists(imageID string) bool\n\tRegister(image *image.Image, imageJSON []byte, layer archive.ArchiveReader) error\n}\n\ntype DockerRepositoryFetcher struct {\n\tregistry Registry\n\tgraph Graph\n\n\tfetchingLayers map[string]chan struct{}\n\tfetchingMutex *sync.Mutex\n}\n\nfunc New(registry Registry, graph Graph) RepositoryFetcher {\n\treturn &DockerRepositoryFetcher{\n\t\tregistry: registry,\n\t\tgraph: graph,\n\t\tfetchingLayers: map[string]chan struct{}{},\n\t\tfetchingMutex: new(sync.Mutex),\n\t}\n}\n\nfunc (fetcher *DockerRepositoryFetcher) Fetch(logger lager.Logger, repoName string, tag string) (string, []string, error) {\n\tfLog := logger.Session(\"fetch\", lager.Data{\n\t\t\"repo\": repoName,\n\t\t\"tag\": tag,\n\t})\n\n\tfLog.Debug(\"fetching\")\n\n\trepoData, err := fetcher.registry.GetRepositoryData(repoName)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\ttagsList, err := fetcher.registry.GetRemoteTags(repoData.Endpoints, repoName, repoData.Tokens)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\timgID, ok := tagsList[tag]\n\tif !ok {\n\t\treturn \"\", nil, fmt.Errorf(\"unknown tag: %s:%s\", repoName, tag)\n\t}\n\n\ttoken := repoData.Tokens\n\n\tfor _, endpoint := range repoData.Endpoints {\n\t\tfLog.Debug(\"trying\", lager.Data{\n\t\t\t\"endpoint\": endpoint,\n\t\t\t\"image\": imgID,\n\t\t})\n\n\t\tenv, err := fetcher.fetchFromEndpoint(fLog, endpoint, imgID, token)\n\t\tif err == nil {\n\t\t\treturn imgID, filterEnv(env, logger), nil\n\t\t}\n\t}\n\n\treturn \"\", nil, fmt.Errorf(\"all endpoints failed: %s\", err)\n}\n\nfunc (fetcher *DockerRepositoryFetcher) fetchFromEndpoint(logger lager.Logger, endpoint string, imgID string, token []string) ([]string, error) {\n\thistory, err := fetcher.registry.GetRemoteHistory(imgID, endpoint, token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar allEnv []string\n\tfor i := len(history) - 1; i >= 0; i-- {\n\t\tenv, err := fetcher.fetchLayer(logger, endpoint, history[i], token)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tallEnv = append(allEnv, env...)\n\t}\n\n\treturn allEnv, nil\n}\n\nfunc (fetcher *DockerRepositoryFetcher) fetchLayer(logger lager.Logger, endpoint string, layerID string, token []string) ([]string, error) {\n\tfor acquired := false; !acquired; acquired = fetcher.fetching(layerID) {\n\t}\n\n\tdefer fetcher.doneFetching(layerID)\n\n\timg, err := fetcher.graph.Get(layerID)\n\tif err == nil {\n\t\tlogger.Info(\"using-cached\", lager.Data{\n\t\t\t\"layer\": layerID,\n\t\t})\n\n\t\treturn imgEnv(img), nil\n\t}\n\n\timgJSON, imgSize, err := fetcher.registry.GetRemoteImageJSON(layerID, endpoint, token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\timg, err = image.NewImgJSON(imgJSON)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlayer, err := fetcher.registry.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer layer.Close()\n\n\tstarted := time.Now()\n\n\tlogger.Info(\"downloading\", lager.Data{\n\t\t\"layer\": layerID,\n\t})\n\n\terr = fetcher.graph.Register(img, imgJSON, layer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger.Info(\"downloaded\", lager.Data{\n\t\t\"layer\": layerID,\n\t\t\"took\": time.Since(started),\n\t})\n\n\treturn imgEnv(img), nil\n}\n\nfunc (fetcher *DockerRepositoryFetcher) fetching(layerID string) bool {\n\tfetcher.fetchingMutex.Lock()\n\n\tfetching, found := fetcher.fetchingLayers[layerID]\n\tif !found {\n\t\tfetcher.fetchingLayers[layerID] = make(chan struct{})\n\t\tfetcher.fetchingMutex.Unlock()\n\t\treturn true\n\t} else {\n\t\tfetcher.fetchingMutex.Unlock()\n\t\t<-fetching\n\t\treturn false\n\t}\n}\n\nfunc (fetcher *DockerRepositoryFetcher) doneFetching(layerID string) {\n\tfetcher.fetchingMutex.Lock()\n\tclose(fetcher.fetchingLayers[layerID])\n\tdelete(fetcher.fetchingLayers, layerID)\n\tfetcher.fetchingMutex.Unlock()\n}\n\nfunc imgEnv(img *image.Image) []string {\n\tvar env []string\n\n\tif img.Config != nil {\n\t\tenv = img.Config.Env\n\t}\n\n\treturn env\n}\n\n\/\/ multiple layers may specify environment variables; they are collected with\n\/\/ the deepest layer first, so the first occurrence of the variable should win\nfunc filterEnv(env []string, logger lager.Logger) []string {\n\tseen := map[string]bool{}\n\n\tvar filtered []string\n\tfor _, e := range env {\n\t\tsegs := strings.SplitN(e, \"=\", 2)\n\t\tif len(segs) != 2 {\n\t\t\t\/\/ malformed docker image metadata?\n\t\t\tlogger.Info(\"Unrecognised environment variable\", lager.Data{\"e\": e})\n\t\t\tcontinue\n\t\t}\n\n\t\tif seen[segs[0]] {\n\t\t\tcontinue\n\t\t}\n\n\t\tfiltered = append(filtered, e)\n\t\tseen[segs[0]] = true\n\t}\n\n\treturn filtered\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"time\"\n)\n\ntype HotstarResponse struct {\n\tErrorDescription string `json:\"errorDescription\"`\n\tMessage string `json:\"message\"`\n\tResultCode string `json:\"resultCode\"`\n\tResultObj struct {\n\t\tResponse struct {\n\t\t\tDocs []struct {\n\t\t\t\tActors string `json:\"actors\"`\n\t\t\t\tAnchors string `json:\"anchors\"`\n\t\t\t\tAuthors string `json:\"authors\"`\n\t\t\t\tBroadcastDate int `json:\"broadcastDate\"`\n\t\t\t\tCategoryName string `json:\"categoryName\"`\n\t\t\t\tChannelName string `json:\"channelName\"`\n\t\t\t\tContentID int `json:\"contentId\"`\n\t\t\t\tContentSubtitle string `json:\"contentSubtitle\"`\n\t\t\t\tContentTitle string `json:\"contentTitle\"`\n\t\t\t\tContentType string `json:\"contentType\"`\n\t\t\t\tContractEnd time.Time `json:\"contractEnd\"`\n\t\t\t\tContractStart time.Time `json:\"contractStart\"`\n\t\t\t\tCounter string `json:\"counter\"`\n\t\t\t\tCounterDay string `json:\"counter_day\"`\n\t\t\t\tCounterWeek string `json:\"counter_week\"`\n\t\t\t\tCountry string `json:\"country\"`\n\t\t\t\tDirectors string `json:\"directors\"`\n\t\t\t\tDuration int `json:\"duration\"`\n\t\t\t\tEpisodeNumber int `json:\"episodeNumber\"`\n\t\t\t\tEpisodeTitle string `json:\"episodeTitle\"`\n\t\t\t\tGenre string `json:\"genre\"`\n\t\t\t\tIsAdult string `json:\"isAdult\"`\n\t\t\t\tIsLastDays string `json:\"isLastDays\"`\n\t\t\t\tIsNew string `json:\"isNew\"`\n\t\t\t\tLanguage string `json:\"language\"`\n\t\t\t\tLastupdatedate int `json:\"lastupdatedate\"`\n\t\t\t\tLatest string `json:\"latest\"`\n\t\t\t\tLongDescription string `json:\"longDescription\"`\n\t\t\t\tObjectSubtype string `json:\"objectSubtype\"`\n\t\t\t\tObjectType string `json:\"objectType\"`\n\t\t\t\tOnAir string `json:\"onAir\"`\n\t\t\t\tPackageID string `json:\"packageId\"`\n\t\t\t\tPackageList []interface{} `json:\"packageList\"`\n\t\t\t\tPcExtendedRatings string `json:\"pcExtendedRatings\"`\n\t\t\t\tPcLevelVod string `json:\"pcLevelVod\"`\n\t\t\t\tPopularEpisode string `json:\"popularEpisode\"`\n\t\t\t\tSearchKeywords string `json:\"searchKeywords\"`\n\t\t\t\tSeason string `json:\"season\"`\n\t\t\t\tSeries string `json:\"series\"`\n\t\t\t\tShortDescription string `json:\"shortDescription\"`\n\t\t\t\tTitleBrief string `json:\"titleBrief\"`\n\t\t\t\tURLPictures string `json:\"urlPictures\"`\n\t\t\t\tYear string `json:\"year\"`\n\t\t\t} `json:\"docs\"`\n\t\t\tFacets []interface{} `json:\"facets\"`\n\t\t\tNumFound int `json:\"numFound\"`\n\t\t\tStart int `json:\"start\"`\n\t\t\tType string `json:\"type\"`\n\t\t} `json:\"response\"`\n\t\tResponseHeader struct {\n\t\t\tQTime int `json:\"QTime\"`\n\t\t\tStatus int `json:\"status\"`\n\t\t} `json:\"responseHeader\"`\n\t} `json:\"resultObj\"`\n\tSystemTime int `json:\"systemTime\"`\n}\n\ntype VootResponse struct {\n\tAssets []struct {\n\t\tID string `json:\"id\"`\n\t\tType int `json:\"type\"`\n\t\tName string `json:\"name\"`\n\t\tDescription string `json:\"description\"`\n\t\tImages []struct {\n\t\t\tRatio string `json:\"ratio\"`\n\t\t\tWidth int `json:\"width\"`\n\t\t\tHeight int `json:\"height\"`\n\t\t\tURL string `json:\"url\"`\n\t\t} `json:\"images\"`\n\t\tMetas struct {\n\t\t\tContentSynopsis string `json:\"ContentSynopsis\"`\n\t\t\tContentType string `json:\"ContentType\"`\n\t\t\tContentFileName string `json:\"ContentFileName\"`\n\t\t\tMovieMainTitle string `json:\"MovieMainTitle\"`\n\t\t\tSBU string `json:\"SBU\"`\n\t\t\tIsDownable string `json:\"IsDownable\"`\n\t\t\tContentDuration string `json:\"ContentDuration\"`\n\t\t\tReleaseYear string `json:\"ReleaseYear\"`\n\t\t} `json:\"metas\"`\n\t\tTags struct {\n\t\t\tKeywords []string `json:\"Keywords\"`\n\t\t\tCharacterList []string `json:\"CharacterList\"`\n\t\t\tContributorList []string `json:\"ContributorList\"`\n\t\t\tScene1 []string `json:\"Scene1\"`\n\t\t\tScene2 []string `json:\"Scene2\"`\n\t\t\tScene3 []string `json:\"Scene3\"`\n\t\t\tScene4 []string `json:\"Scene4\"`\n\t\t\tScene5 []string `json:\"Scene5\"`\n\t\t\tScene6 []string `json:\"Scene6\"`\n\t\t\tGenre []string `json:\"Genre\"`\n\t\t\tLanguage []string `json:\"Language\"`\n\t\t\tAdCueTime1 []string `json:\"AdCueTime1\"`\n\t\t\tAdCueTime2 []string `json:\"AdCueTime2\"`\n\t\t\tAdCueTime3 []string `json:\"AdCueTime3\"`\n\t\t\tAdCueTime4 []string `json:\"AdCueTime4\"`\n\t\t\tAdCueTime5 []string `json:\"AdCueTime5\"`\n\t\t\tAdCueTime6 []string `json:\"AdCueTime6\"`\n\t\t\tAdCueTime7 []string `json:\"AdCueTime7\"`\n\t\t\tAdCueTime8 []string `json:\"AdCueTime8\"`\n\t\t\tMediaExternalID []string `json:\"MediaExternalId\"`\n\t\t\tWatermarkURL []string `json:\"WatermarkURL\"`\n\t\t\tMovieDirector []string `json:\"MovieDirector\"`\n\t\t} `json:\"tags\"`\n\t\tStartDate int `json:\"start_date\"`\n\t\tEndDate int64 `json:\"end_date\"`\n\t\tExtraParams struct {\n\t\t\tSysStartDate string `json:\"sys_start_date\"`\n\t\t\tSysFinalDate string `json:\"sys_final_date\"`\n\t\t\tExternalIds interface{} `json:\"external_ids\"`\n\t\t\tEntryID string `json:\"entry_id\"`\n\t\t} `json:\"extra_params\"`\n\t\tRURL interface{} `json:\"rURL\"`\n\t} `json:\"assets\"`\n\tTotalItems int `json:\"total_items\"`\n\tStatus struct {\n\t\tCode int `json:\"code\"`\n\t\tMessage string `json:\"message\"`\n\t} `json:\"status\"`\n}\n\ntype ErosNowResponse struct {\n\tCount string `json:\"count\"`\n\tTotal string `json:\"total\"`\n\tRows []struct {\n\t\tAssetID string `json:\"asset_id\"`\n\t\tTitle string `json:\"title\"`\n\t\tLanguage string `json:\"language\"`\n\t\tRating string `json:\"rating\"`\n\t\tDescription string `json:\"description\"`\n\t\tSubtitles []string `json:\"subtitles\"`\n\t\tAccessLevel string `json:\"access_level\"`\n\t\tDuration string `json:\"duration\"`\n\t\tPeople struct {\n\t\t\tProducer []string `json:\"Producer\"`\n\t\t\tMusicDirector []string `json:\"Music director\"`\n\t\t\tActor []string `json:\"Actor\"`\n\t\t\tDirector []string `json:\"Director\"`\n\t\t} `json:\"people\"`\n\t\tShortDescription string `json:\"short_description\"`\n\t\tFree string `json:\"free\"`\n\t\tAssetType string `json:\"asset_type\"`\n\t\tReleaseYear string `json:\"release_year\"`\n\t\tImages struct {\n\t\t\tNum8 string `json:\"8\"`\n\t\t\tNum9 string `json:\"9\"`\n\t\t\tNum12 string `json:\"12\"`\n\t\t\tNum13 string `json:\"13\"`\n\t\t\tNum17 string `json:\"17\"`\n\t\t\tNum22 string `json:\"22\"`\n\t\t} `json:\"images\"`\n\t\tErosRating string `json:\"eros_rating,omitempty\"`\n\t} `json:\"rows\"`\n}\nchang structspackage main\n\nimport (\n\t\"time\"\n)\n\ntype HotstarResponse struct {\n\tErrorDescription string `json:\"errorDescription\"`\n\tMessage string `json:\"message\"`\n\tResultCode string `json:\"resultCode\"`\n\tResultObj struct {\n\t\tResponse struct {\n\t\t\tDocs []struct {\n\t\t\t\tWebsite string\n\t\t\t\tActors string `json:\"actors\"`\n\t\t\t\tAnchors string `json:\"anchors\"`\n\t\t\t\tAuthors string `json:\"authors\"`\n\t\t\t\tBroadcastDate int `json:\"broadcastDate\"`\n\t\t\t\tCategoryName string `json:\"categoryName\"`\n\t\t\t\tChannelName string `json:\"channelName\"`\n\t\t\t\tContentID int `json:\"contentId\"`\n\t\t\t\tContentSubtitle string `json:\"contentSubtitle\"`\n\t\t\t\tContentTitle string `json:\"contentTitle\"`\n\t\t\t\tContentType string `json:\"contentType\"`\n\t\t\t\tContractEnd time.Time `json:\"contractEnd\"`\n\t\t\t\tContractStart time.Time `json:\"contractStart\"`\n\t\t\t\tCounter string `json:\"counter\"`\n\t\t\t\tCounterDay string `json:\"counter_day\"`\n\t\t\t\tCounterWeek string `json:\"counter_week\"`\n\t\t\t\tCountry string `json:\"country\"`\n\t\t\t\tDirectors string `json:\"directors\"`\n\t\t\t\tDuration int `json:\"duration\"`\n\t\t\t\tEpisodeNumber int `json:\"episodeNumber\"`\n\t\t\t\tEpisodeTitle string `json:\"episodeTitle\"`\n\t\t\t\tGenre string `json:\"genre\"`\n\t\t\t\tIsAdult string `json:\"isAdult\"`\n\t\t\t\tIsLastDays string `json:\"isLastDays\"`\n\t\t\t\tIsNew string `json:\"isNew\"`\n\t\t\t\tLanguage string `json:\"language\"`\n\t\t\t\tLastupdatedate int `json:\"lastupdatedate\"`\n\t\t\t\tLatest string `json:\"latest\"`\n\t\t\t\tLongDescription string `json:\"longDescription\"`\n\t\t\t\tObjectSubtype string `json:\"objectSubtype\"`\n\t\t\t\tObjectType string `json:\"objectType\"`\n\t\t\t\tOnAir string `json:\"onAir\"`\n\t\t\t\tPackageID string `json:\"packageId\"`\n\t\t\t\tPackageList []interface{} `json:\"packageList\"`\n\t\t\t\tPcExtendedRatings string `json:\"pcExtendedRatings\"`\n\t\t\t\tPcLevelVod string `json:\"pcLevelVod\"`\n\t\t\t\tPopularEpisode string `json:\"popularEpisode\"`\n\t\t\t\tSearchKeywords string `json:\"searchKeywords\"`\n\t\t\t\tSeason string `json:\"season\"`\n\t\t\t\tSeries string `json:\"series\"`\n\t\t\t\tShortDescription string `json:\"shortDescription\"`\n\t\t\t\tTitleBrief string `json:\"titleBrief\"`\n\t\t\t\tURLPictures string `json:\"urlPictures\"`\n\t\t\t\tYear string `json:\"year\"`\n\t\t\t} `json:\"docs\"`\n\t\t\tFacets []interface{} `json:\"facets\"`\n\t\t\tNumFound int `json:\"numFound\"`\n\t\t\tStart int `json:\"start\"`\n\t\t\tType string `json:\"type\"`\n\t\t} `json:\"response\"`\n\t\tResponseHeader struct {\n\t\t\tQTime int `json:\"QTime\"`\n\t\t\tStatus int `json:\"status\"`\n\t\t} `json:\"responseHeader\"`\n\t} `json:\"resultObj\"`\n\tSystemTime int `json:\"systemTime\"`\n}\n\ntype VootResponse struct {\n\tAssets []struct {\n\t\tWebsite string\n\n\t\tID string `json:\"id\"`\n\t\tType int `json:\"type\"`\n\t\tName string `json:\"name\"`\n\t\tDescription string `json:\"description\"`\n\t\tImages []struct {\n\t\t\tRatio string `json:\"ratio\"`\n\t\t\tWidth int `json:\"width\"`\n\t\t\tHeight int `json:\"height\"`\n\t\t\tURL string `json:\"url\"`\n\t\t} `json:\"images\"`\n\t\tMetas struct {\n\t\t\tContentSynopsis string `json:\"ContentSynopsis\"`\n\t\t\tContentType string `json:\"ContentType\"`\n\t\t\tContentFileName string `json:\"ContentFileName\"`\n\t\t\tMovieMainTitle string `json:\"MovieMainTitle\"`\n\t\t\tSBU string `json:\"SBU\"`\n\t\t\tIsDownable string `json:\"IsDownable\"`\n\t\t\tContentDuration string `json:\"ContentDuration\"`\n\t\t\tReleaseYear string `json:\"ReleaseYear\"`\n\t\t} `json:\"metas\"`\n\t\tTags struct {\n\t\t\tKeywords []string `json:\"Keywords\"`\n\t\t\tCharacterList []string `json:\"CharacterList\"`\n\t\t\tContributorList []string `json:\"ContributorList\"`\n\t\t\tScene1 []string `json:\"Scene1\"`\n\t\t\tScene2 []string `json:\"Scene2\"`\n\t\t\tScene3 []string `json:\"Scene3\"`\n\t\t\tScene4 []string `json:\"Scene4\"`\n\t\t\tScene5 []string `json:\"Scene5\"`\n\t\t\tScene6 []string `json:\"Scene6\"`\n\t\t\tGenre []string `json:\"Genre\"`\n\t\t\tLanguage []string `json:\"Language\"`\n\t\t\tAdCueTime1 []string `json:\"AdCueTime1\"`\n\t\t\tAdCueTime2 []string `json:\"AdCueTime2\"`\n\t\t\tAdCueTime3 []string `json:\"AdCueTime3\"`\n\t\t\tAdCueTime4 []string `json:\"AdCueTime4\"`\n\t\t\tAdCueTime5 []string `json:\"AdCueTime5\"`\n\t\t\tAdCueTime6 []string `json:\"AdCueTime6\"`\n\t\t\tAdCueTime7 []string `json:\"AdCueTime7\"`\n\t\t\tAdCueTime8 []string `json:\"AdCueTime8\"`\n\t\t\tMediaExternalID []string `json:\"MediaExternalId\"`\n\t\t\tWatermarkURL []string `json:\"WatermarkURL\"`\n\t\t\tMovieDirector []string `json:\"MovieDirector\"`\n\t\t} `json:\"tags\"`\n\t\tStartDate int `json:\"start_date\"`\n\t\tEndDate int64 `json:\"end_date\"`\n\t\tExtraParams struct {\n\t\t\tSysStartDate string `json:\"sys_start_date\"`\n\t\t\tSysFinalDate string `json:\"sys_final_date\"`\n\t\t\tExternalIds interface{} `json:\"external_ids\"`\n\t\t\tEntryID string `json:\"entry_id\"`\n\t\t} `json:\"extra_params\"`\n\t\tRURL interface{} `json:\"rURL\"`\n\t} `json:\"assets\"`\n\tTotalItems int `json:\"total_items\"`\n\tStatus struct {\n\t\tCode int `json:\"code\"`\n\t\tMessage string `json:\"message\"`\n\t} `json:\"status\"`\n}\n\ntype ErosNowResponse struct {\n\tCount string `json:\"count\"`\n\tTotal string `json:\"total\"`\n\tRows []struct {\n\t\tWebsite string\n\n\t\tAssetID string `json:\"asset_id\"`\n\t\tTitle string `json:\"title\"`\n\t\tLanguage string `json:\"language\"`\n\t\tRating string `json:\"rating\"`\n\t\tDescription string `json:\"description\"`\n\t\tSubtitles []string `json:\"subtitles\"`\n\t\tAccessLevel string `json:\"access_level\"`\n\t\tDuration string `json:\"duration\"`\n\t\tPeople struct {\n\t\t\tProducer []string `json:\"Producer\"`\n\t\t\tMusicDirector []string `json:\"Music director\"`\n\t\t\tActor []string `json:\"Actor\"`\n\t\t\tDirector []string `json:\"Director\"`\n\t\t} `json:\"people\"`\n\t\tShortDescription string `json:\"short_description\"`\n\t\tFree string `json:\"free\"`\n\t\tAssetType string `json:\"asset_type\"`\n\t\tReleaseYear string `json:\"release_year\"`\n\t\tImages struct {\n\t\t\tNum8 string `json:\"8\"`\n\t\t\tNum9 string `json:\"9\"`\n\t\t\tNum12 string `json:\"12\"`\n\t\t\tNum13 string `json:\"13\"`\n\t\t\tNum17 string `json:\"17\"`\n\t\t\tNum22 string `json:\"22\"`\n\t\t} `json:\"images\"`\n\t\tErosRating string `json:\"eros_rating,omitempty\"`\n\t} `json:\"rows\"`\n}\n<|endoftext|>"} {"text":"package main\n\nimport \"os\"\nimport \"tritium\/packager\"\n\/\/import . \"tritium\/linker\"\nimport s \"tritium\/spec\"\nimport \"tritium\/doc\"\nimport \"tritium\/test\"\n\nfunc show_usage() {\n\tprintln(\"General purpose Tritium command line interface. Commands are: package, link, test\")\n\tprintln(\"\\tpackage:\\n\\t\\ttritium package --name \\n\\t\\tOr\\n\\t\\tpackage --output-path \")\n}\n\nfunc main() {\n\tif len(os.Args) > 1 {\n\t\tcommand := os.Args[1]\n\n\t\tif command == \"package\" {\n\n\t\t\tif len(os.Args) > 3 {\n\t\t\t\tif os.Args[2] == \"--name\" {\n\t\t\t\t\t\/\/ Build the package specified by the path\n\t\t\t\t\tpath := os.Args[3]\n\t\t\t\t\tpkg := packager.NewPackage(packager.DefaultPackagePath, packager.BuildOptions())\n\t\t\t\t\tpkg.Load(path)\n\t\t\t\t\t\/\/pkg.SerializedOutput()\n\t\t\t\t\t\/\/println(pkg.DebugInfo())\n\n\t\t\t\t} else if os.Args[2] == \"--output-path\" {\n\t\t\t\t\t_, path := packager.OutputDefaultPackage(os.Args[3])\n\t\t\t\t\tprintln(\"Output default package to:\", path)\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tpkg := packager.BuildDefaultPackage()\n\t\t\t\tpkg.SerializedOutput()\n\t\t\t}\n\n\t\t} else if command == \"pkginfo\" {\n\t\t\tname := os.Args[2]\n\t\t\tpkg := packager.NewPackage(packager.DefaultPackagePath, packager.BuildOptions())\n\t\t\tpkg.Load(name)\n\t\t\tprintln(pkg.DebugInfo())\n\t\t} else if command == \"doc\" {\n\t\t\tname := os.Args[2]\n\t\t\tpkg := packager.NewPackage(packager.DefaultPackagePath, packager.BuildOptions())\n\t\t\tpkg.Load(name)\n\t\t\tprintln(doc.Process(pkg.Package))\n\t\t} else if command == \"apollo-doc\" {\n\t\t\tif len(os.Args) < 3 {\n\t\t\t\tprintln(\"Usage: tritium apollo-doc \")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\toutputFile := os.Args[2]\n\n\t\t\tdoc.Generate(outputFile)\n\t\t} else if command == \"link\" {\n\t\t\tprintln(\"Linking files found in the directory:\", os.Args[2])\n\t\t\t\/\/LinkerToBytes(os.Args[2])\n\t\t} else if command == \"test\" {\n\t\t\tprintln(\"Running tests found in the directory:\", os.Args[2])\n\t\t\tif len(os.Args) == 3 {\n\t\t\t\ttest.TestCustomSuite(os.Args[2])\n\t\t\t} else {\n\t\t\t\tprintln(\"Usage:\\n tritium test \")\n\t\t\t}\n\t\t} else if command == \"benchmark\" {\n\t\t\tprintln(\"Bencmarking tests found in the directory:\", os.Args[2])\n\t\t\tif len(os.Args) == 3 {\n\t\t\t\ttest.BenchmarkCustomSuite(os.Args[2])\n\t\t\t} else {\n\t\t\t\tprintln(\"Usage:\\n tritium benchmark \")\n\t\t\t}\n\t\t} else if command == \"debug\" {\n\t\t\tprintln(\"Running tests found in the directory:\", os.Args[2])\n\t\t\tif len(os.Args) == 3 {\n\t\t\t\ts.All(command, os.Args[2])\n\t\t\t} else if len(os.Args) == 4 {\n\t\t\t\ts.All(command, os.Args[2], os.Args[3])\n\t\t\t} else {\n\t\t\t\tprintln(\"Usage:\\n tritium test \")\n\t\t\t}\n\n\t\t} else if command == \"old_test\" {\n\t\t\tprintln(\"Running tests found in the directory:\", os.Args[2])\n\t\t\tif len(os.Args) == 3 {\n\t\t\t\ts.All(command, os.Args[2])\n\t\t\t} else if len(os.Args) == 4 {\n\t\t\t\ts.All(command, os.Args[2], os.Args[3])\n\t\t\t} else {\n\t\t\t\tprintln(\"Usage:\\n tritium test \")\n\t\t\t}\n\n\t\t} else {\n\t\t\tprintln(\"No such command\", command)\n\t\t\tshow_usage()\n\t\t}\n\t} else {\n\t\tshow_usage()\n\t}\n}\nUse fmt.Println so output goes to stdout. Also add example to tritium help much likes hermes' help.package main\n\nimport \"os\"\nimport \"fmt\"\nimport \"tritium\/packager\"\n\/\/import . \"tritium\/linker\"\nimport s \"tritium\/spec\"\nimport \"tritium\/doc\"\nimport \"tritium\/test\"\n\nfunc show_usage() {\n\tfmt.Println(\"General purpose Tritium command line interface. Commands are: package, link, test\")\n\tfmt.Println(\"\\tpackage:\\n\\t\\ttritium package --name \\n\\t\\tOr\\n\\t\\tpackage --output-path \")\n\tfmt.Println(\"\\te.g.\\n\\t\\ttritium --output-path ~\/.manhattan\/packages\")\n}\n\nfunc main() {\n\tif len(os.Args) > 1 {\n\t\tcommand := os.Args[1]\n\n\t\tif command == \"package\" {\n\n\t\t\tif len(os.Args) > 3 {\n\t\t\t\tif os.Args[2] == \"--name\" {\n\t\t\t\t\t\/\/ Build the package specified by the path\n\t\t\t\t\tpath := os.Args[3]\n\t\t\t\t\tpkg := packager.NewPackage(packager.DefaultPackagePath, packager.BuildOptions())\n\t\t\t\t\tpkg.Load(path)\n\t\t\t\t\t\/\/pkg.SerializedOutput()\n\t\t\t\t\t\/\/fmt.Println(pkg.DebugInfo())\n\n\t\t\t\t} else if os.Args[2] == \"--output-path\" {\n\t\t\t\t\t_, path := packager.OutputDefaultPackage(os.Args[3])\n\t\t\t\t\tfmt.Println(\"Output default package to:\", path)\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tpkg := packager.BuildDefaultPackage()\n\t\t\t\tpkg.SerializedOutput()\n\t\t\t}\n\n\t\t} else if command == \"pkginfo\" {\n\t\t\tname := os.Args[2]\n\t\t\tpkg := packager.NewPackage(packager.DefaultPackagePath, packager.BuildOptions())\n\t\t\tpkg.Load(name)\n\t\t\tfmt.Println(pkg.DebugInfo())\n\t\t} else if command == \"doc\" {\n\t\t\tname := os.Args[2]\n\t\t\tpkg := packager.NewPackage(packager.DefaultPackagePath, packager.BuildOptions())\n\t\t\tpkg.Load(name)\n\t\t\tfmt.Println(doc.Process(pkg.Package))\n\t\t} else if command == \"apollo-doc\" {\n\t\t\tif len(os.Args) < 3 {\n\t\t\t\tfmt.Println(\"Usage: tritium apollo-doc \")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\toutputFile := os.Args[2]\n\n\t\t\tdoc.Generate(outputFile)\n\t\t} else if command == \"link\" {\n\t\t\tfmt.Println(\"Linking files found in the directory:\", os.Args[2])\n\t\t\t\/\/LinkerToBytes(os.Args[2])\n\t\t} else if command == \"test\" {\n\t\t\tfmt.Println(\"Running tests found in the directory:\", os.Args[2])\n\t\t\tif len(os.Args) == 3 {\n\t\t\t\ttest.TestCustomSuite(os.Args[2])\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Usage:\\n tritium test \")\n\t\t\t}\n\t\t} else if command == \"benchmark\" {\n\t\t\tfmt.Println(\"Bencmarking tests found in the directory:\", os.Args[2])\n\t\t\tif len(os.Args) == 3 {\n\t\t\t\ttest.BenchmarkCustomSuite(os.Args[2])\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Usage:\\n tritium benchmark \")\n\t\t\t}\n\t\t} else if command == \"debug\" {\n\t\t\tfmt.Println(\"Running tests found in the directory:\", os.Args[2])\n\t\t\tif len(os.Args) == 3 {\n\t\t\t\ts.All(command, os.Args[2])\n\t\t\t} else if len(os.Args) == 4 {\n\t\t\t\ts.All(command, os.Args[2], os.Args[3])\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Usage:\\n tritium test \")\n\t\t\t}\n\n\t\t} else if command == \"old_test\" {\n\t\t\tfmt.Println(\"Running tests found in the directory:\", os.Args[2])\n\t\t\tif len(os.Args) == 3 {\n\t\t\t\ts.All(command, os.Args[2])\n\t\t\t} else if len(os.Args) == 4 {\n\t\t\t\ts.All(command, os.Args[2], os.Args[3])\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Usage:\\n tritium test \")\n\t\t\t}\n\n\t\t} else {\n\t\t\tfmt.Println(\"No such command\", command)\n\t\t\tshow_usage()\n\t\t}\n\t} else {\n\t\tshow_usage()\n\t}\n}\n<|endoftext|>"} {"text":"package tutum\n\nimport (\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n)\n\nconst (\n\t\/\/ Time allowed to write a message to the peer.\n\tWRITE_WAIT = 5 * time.Second\n\t\/\/ Time allowed to read the next pong message from the peer.\n\tPONG_WAIT = 10 * time.Second\n\t\/\/ Send pings to client with this period. Must be less than PONG_WAIT.\n\tPING_PERIOD = PONG_WAIT \/ 2\n)\n\n\/*\n\tfunc dial()\n\tReturns : a websocket connection\n*\/\n\nfunc dial() (*websocket.Conn, error) {\n\tvar Url = \"\"\n\n\tif os.Getenv(\"TUTUM_STREAM_HOST\") != \"\" {\n\t\tu, _ := url.Parse(os.Getenv(\"TUTUM_STREAM_HOST\"))\n\t\t_, port, _ := net.SplitHostPort(u.Host)\n\t\tif port == \"\" {\n\t\t\tu.Host = u.Host + \":443\"\n\t\t}\n\t\tStreamUrl = u.Scheme + \":\/\/\" + u.Host + \"\/v1\/\"\n\t} else if os.Getenv(\"TUTUM_STREAM_URL\") != \"\" {\n\t\tu, _ := url.Parse(os.Getenv(\"TUTUM_STREAM_URL\"))\n\t\t_, port, _ := net.SplitHostPort(u.Host)\n\t\tif port == \"\" {\n\t\t\tu.Host = u.Host + \":443\"\n\t\t}\n\t\tStreamUrl = u.Scheme + \":\/\/\" + u.Host + \"\/v1\/\"\n\t}\n\n\tif os.Getenv(\"TUTUM_AUTH\") != \"\" {\n\t\tendpoint := \"\"\n\t\tendpoint = url.QueryEscape(os.Getenv(\"TUTUM_AUTH\"))\n\t\tUrl = StreamUrl + \"events?auth=\" + endpoint\n\t}\n\tif User != \"\" && ApiKey != \"\" {\n\t\tUrl = StreamUrl + \"events?token=\" + ApiKey + \"&user=\" + User\n\t}\n\n\theader := http.Header{}\n\theader.Add(\"User-Agent\", customUserAgent)\n\n\tvar Dialer websocket.Dialer\n\tws, _, err := Dialer.Dial(Url, header)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ws, nil\n}\n\nfunc dialHandler(e chan error) *websocket.Conn {\n\ttries := 0\n\tfor {\n\t\tws, err := dial()\n\t\tif err != nil {\n\t\t\ttries++\n\t\t\ttime.Sleep(3 * time.Second)\n\t\t\tif tries > 3 {\n\t\t\t\tlog.Println(\"[DIAL ERROR]: \" + err.Error())\n\t\t\t\te <- err\n\t\t\t}\n\t\t} else {\n\t\t\treturn ws\n\t\t}\n\t}\n}\n\nfunc messagesHandler(ws *websocket.Conn, ticker *time.Ticker, msg Event, c chan Event, e chan error) {\n\tws.SetPongHandler(func(string) error {\n\t\tws.SetReadDeadline(time.Now().Add(PONG_WAIT))\n\t\treturn nil\n\t})\n\tfor {\n\t\terr := ws.ReadJSON(&msg)\n\t\tif err != nil {\n\t\t\tlog.Println(\"READ ERR\")\n\t\t\tticker.Stop()\n\t\t\te <- err\n\t\t\treturn\n\t\t}\n\n\t\tif reflect.TypeOf(msg).String() == \"tutum.Event\" {\n\t\t\tc <- msg\n\t\t}\n\t}\n}\n\n\/*\n\tfunc TutumStreamCall\n\tReturns : The stream of all events from your NodeClusters, Containers, Services, Stack, Actions, ...\n*\/\n\nfunc TutumEvents(c chan Event, e chan error) {\n\tvar msg Event\n\tticker := time.NewTicker(PING_PERIOD)\n\tws := dialHandler(e)\n\n\tdefer func() {\n\t\tclose(c)\n\t\tclose(e)\n\t\tws.Close()\n\t}()\n\tgo messagesHandler(ws, ticker, msg, c, e)\n\nLoop:\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif err := ws.WriteMessage(websocket.PingMessage, []byte{}); err != nil {\n\t\t\t\tticker.Stop()\n\t\t\t\te <- err\n\t\t\t\tbreak Loop\n\t\t\t}\n\t\tcase <-e:\n\t\t\tticker.Stop()\n\t\t}\n\t}\n}\nfix websocket read error looppackage tutum\n\nimport (\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n)\n\nconst (\n\t\/\/ Time allowed to write a message to the peer.\n\tWRITE_WAIT = 5 * time.Second\n\t\/\/ Time allowed to read the next pong message from the peer.\n\tPONG_WAIT = 10 * time.Second\n\t\/\/ Send pings to client with this period. Must be less than PONG_WAIT.\n\tPING_PERIOD = PONG_WAIT \/ 2\n)\n\n\/*\n\tfunc dial()\n\tReturns : a websocket connection\n*\/\n\nfunc dial() (*websocket.Conn, error) {\n\tvar Url = \"\"\n\n\tif os.Getenv(\"TUTUM_STREAM_HOST\") != \"\" {\n\t\tu, _ := url.Parse(os.Getenv(\"TUTUM_STREAM_HOST\"))\n\t\t_, port, _ := net.SplitHostPort(u.Host)\n\t\tif port == \"\" {\n\t\t\tu.Host = u.Host + \":443\"\n\t\t}\n\t\tStreamUrl = u.Scheme + \":\/\/\" + u.Host + \"\/v1\/\"\n\t} else if os.Getenv(\"TUTUM_STREAM_URL\") != \"\" {\n\t\tu, _ := url.Parse(os.Getenv(\"TUTUM_STREAM_URL\"))\n\t\t_, port, _ := net.SplitHostPort(u.Host)\n\t\tif port == \"\" {\n\t\t\tu.Host = u.Host + \":443\"\n\t\t}\n\t\tStreamUrl = u.Scheme + \":\/\/\" + u.Host + \"\/v1\/\"\n\t}\n\n\tif os.Getenv(\"TUTUM_AUTH\") != \"\" {\n\t\tendpoint := \"\"\n\t\tendpoint = url.QueryEscape(os.Getenv(\"TUTUM_AUTH\"))\n\t\tUrl = StreamUrl + \"events?auth=\" + endpoint\n\t}\n\tif User != \"\" && ApiKey != \"\" {\n\t\tUrl = StreamUrl + \"events?token=\" + ApiKey + \"&user=\" + User\n\t}\n\n\theader := http.Header{}\n\theader.Add(\"User-Agent\", customUserAgent)\n\n\tvar Dialer websocket.Dialer\n\tws, _, err := Dialer.Dial(Url, header)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ws, nil\n}\n\nfunc dialHandler(e chan error) *websocket.Conn {\n\ttries := 0\n\tfor {\n\t\tws, err := dial()\n\t\tif err != nil {\n\t\t\ttries++\n\t\t\ttime.Sleep(3 * time.Second)\n\t\t\tif tries > 3 {\n\t\t\t\tlog.Println(\"[DIAL ERROR]: \" + err.Error())\n\t\t\t\te <- err\n\t\t\t}\n\t\t} else {\n\t\t\treturn ws\n\t\t}\n\t}\n}\n\nfunc messagesHandler(ws *websocket.Conn, ticker *time.Ticker, msg Event, c chan Event, e chan error) {\n\tws.SetPongHandler(func(string) error {\n\t\tws.SetReadDeadline(time.Now().Add(PONG_WAIT))\n\t\treturn nil\n\t})\n\tfor {\n\t\terr := ws.ReadJSON(&msg)\n\t\tif err != nil {\n\t\t\tlog.Println(\"READ ERR\")\n\t\t\te <- err\n\t\t\tbreak\n\t\t} else {\n\t\t\tif reflect.TypeOf(msg).String() == \"tutum.Event\" {\n\t\t\t\tc <- msg\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/*\n\tfunc TutumStreamCall\n\tReturns : The stream of all events from your NodeClusters, Containers, Services, Stack, Actions, ...\n*\/\n\nfunc TutumEvents(c chan Event, e chan error) {\n\tvar msg Event\n\tticker := time.NewTicker(PING_PERIOD)\n\tws := dialHandler(e)\n\n\tdefer func() {\n\t\tclose(c)\n\t\tclose(e)\n\t\tws.Close()\n\t}()\n\tgo messagesHandler(ws, ticker, msg, c, e)\n\nLoop:\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif err := ws.WriteMessage(websocket.PingMessage, []byte{}); err != nil {\n\t\t\t\tticker.Stop()\n\t\t\t\te <- err\n\t\t\t\tbreak Loop\n\t\t\t}\n\t\tcase <-e:\n\t\t\tticker.Stop()\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package quizduell\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n)\n\nconst (\n\ttvProtocolPrefix = \"https:\/\/\"\n\ttvHostName = \"quizduell.mobilemassresponse.de\"\n\tcorsHeaderToken = \"grandc3ntr1xrul3z\"\n)\n\ntype TVClient struct {\n\tUserID int\n\t\/\/ The API seems to be using this auth token (tt)\n\t\/\/ as a validation mechanism, instead of cookies\n\t\/\/ or the like.\n\tAuthToken string\n}\n\n\/\/ NewTVClient creates a new TV client that can be used\n\/\/ to interact with the TV version of Quizduell.\n\/\/ The authToken is User.TT\nfunc NewTVClient(userID int, authToken string) *TVClient {\n\treturn &TVClient{\n\t\tUserID: userID,\n\t\tAuthToken: authToken,\n\t}\n}\n\n\/\/ FromClient returns a new TV client based on an already\n\/\/ existant (and logged in) Quizduell client. If the user\n\/\/ hasn't created a TV profile yet, this will also be done\n\/\/ in the process.\nfunc FromClient(c *Client) (*TVClient, error) {\n\tuser, err := c.CreateTVUser()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTVClient(user.ID, user.TT), nil\n}\n\n\/\/ AgreeAGBs makes the current user agree to the AGB\n\/\/ put up by the TV quiz broadcaster.\nfunc (t *TVClient) AgreeAGBs() map[string]interface{} {\n\treturn t.request(\"\/feousers\/agbs\/\"+strconv.Itoa(t.UserID)+\"\/true\", url.Values{})\n}\n\n\/\/ GetState returns the state of the TV quiz\nfunc (t *TVClient) GetState() map[string]interface{} {\n\treturn t.request(\"\/states\/\"+strconv.Itoa(t.UserID), nil)\n}\n\nfunc (t *TVClient) GetRankings() map[string]interface{} {\n\treturn t.request(\"\/users\/myranking\/\"+strconv.Itoa(t.UserID), nil)\n}\n\nfunc (t *TVClient) GetMyProfile() map[string]interface{} {\n\treturn t.GetProfile(t.UserID)\n}\n\nfunc (t *TVClient) GetProfile(userID int) map[string]interface{} {\n\treturn t.request(\"\/users\/profiles\/\"+strconv.Itoa(userID), nil)\n}\n\nfunc (t *TVClient) DeleteUser() map[string]interface{} {\n\treturn t.request(\"\/users\/profiles\/\"+strconv.Itoa(t.UserID), nil, \"DELETE\")\n}\n\nfunc (t *TVClient) SetAvatarAndNickname(nick, avatarCode string) map[string]interface{} {\n\tdata := url.Values{}\n\n\tif avatarCode != \"\" {\n\t\tdata.Set(\"AvatarString\", avatarCode)\n\t}\n\tdata.Set(\"Nick\", nick)\n\n\treturn t.request(\"\/users\/\"+strconv.Itoa(t.UserID)+\"\/avatarandnick\", data)\n}\n\nfunc (t *TVClient) SelectCategory(categoryID int) map[string]interface{} {\n\treturn t.request(\"\/users\/\"+strconv.Itoa(t.UserID)+\"\/category\"+strconv.Itoa(categoryID), nil)\n}\n\nfunc (t *TVClient) SendAnswer(questionID, answerID int) map[string]interface{} {\n\treturn t.request(\"\/users\/\"+strconv.Itoa(t.UserID)+\"\/response\"+strconv.Itoa(questionID)+\"\/\"+strconv.Itoa(answerID), nil)\n}\n\nfunc (t *TVClient) UploadProfileImage(r io.Reader) map[string]interface{} {\n\timg, _ := ioutil.ReadAll(r)\n\n\tdata := url.Values{}\n\tdata.Set(\"img\", base64.StdEncoding.EncodeToString(img))\n\n\treturn t.request(\"\/users\/base64\/\"+strconv.Itoa(t.UserID)+\"\/jpg\", data, \"POST\", \"img\")\n}\n\nfunc (t *TVClient) request(path string, data url.Values, method ...string) map[string]interface{} {\n\trequestURL := tvProtocolPrefix + tvHostName + path\n\trequest, err := buildRequest(requestURL, data, method...)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\trequest.Header.Set(\"x-app-request\", corsHeaderToken)\n\trequest.Header.Set(\"x-tv-authtoken\", t.AuthToken)\n\trequest.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded; charset=utf-8\")\n\n\tresp, err := http.DefaultClient.Do(request)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar m map[string]interface{}\n\terr = json.Unmarshal(body, &m)\n\treturn m\n}\nAdded PostProfile.package quizduell\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n)\n\nconst (\n\ttvProtocolPrefix = \"https:\/\/\"\n\ttvHostName = \"quizduell.mobilemassresponse.de\"\n\tcorsHeaderToken = \"grandc3ntr1xrul3z\"\n)\n\ntype TVClient struct {\n\tUserID int\n\t\/\/ The API seems to be using this auth token (tt)\n\t\/\/ as a validation mechanism, instead of cookies\n\t\/\/ or the like.\n\tAuthToken string\n}\n\n\/\/ NewTVClient creates a new TV client that can be used\n\/\/ to interact with the TV version of Quizduell.\n\/\/ The authToken is User.TT\nfunc NewTVClient(userID int, authToken string) *TVClient {\n\treturn &TVClient{\n\t\tUserID: userID,\n\t\tAuthToken: authToken,\n\t}\n}\n\n\/\/ FromClient returns a new TV client based on an already\n\/\/ existant (and logged in) Quizduell client. If the user\n\/\/ hasn't created a TV profile yet, this will also be done\n\/\/ in the process.\nfunc FromClient(c *Client) (*TVClient, error) {\n\tuser, err := c.CreateTVUser()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTVClient(user.ID, user.TT), nil\n}\n\n\/\/ AgreeAGBs makes the current user agree to the AGB\n\/\/ put up by the TV quiz broadcaster.\nfunc (t *TVClient) AgreeAGBs() map[string]interface{} {\n\treturn t.request(\"\/feousers\/agbs\/\"+strconv.Itoa(t.UserID)+\"\/true\", url.Values{})\n}\n\n\/\/ GetState returns the state of the TV quiz\nfunc (t *TVClient) GetState() map[string]interface{} {\n\treturn t.request(\"\/states\/\"+strconv.Itoa(t.UserID), nil)\n}\n\nfunc (t *TVClient) GetRankings() map[string]interface{} {\n\treturn t.request(\"\/users\/myranking\/\"+strconv.Itoa(t.UserID), nil)\n}\n\nfunc (t *TVClient) GetMyProfile() map[string]interface{} {\n\treturn t.GetProfile(t.UserID)\n}\n\nfunc (t *TVClient) GetProfile(userID int) map[string]interface{} {\n\treturn t.request(\"\/users\/profiles\/\"+strconv.Itoa(userID), nil)\n}\n\nfunc (t *TVClient) PostProfile(profile map[string]interface{}) map[string]interface{} {\n\tdata := url.Values{}\n\n\tfor key, val := range profile {\n\t\tdata.Set(key, val)\n\t}\n\n\treturn t.request(\"\/users\/profiles\/\"+strconv.Itoa(t.UserID), data)\n}\n\nfunc (t *TVClient) DeleteUser() map[string]interface{} {\n\treturn t.request(\"\/users\/profiles\/\"+strconv.Itoa(t.UserID), nil, \"DELETE\")\n}\n\nfunc (t *TVClient) SetAvatarAndNickname(nick, avatarCode string) map[string]interface{} {\n\tdata := url.Values{}\n\n\tif avatarCode != \"\" {\n\t\tdata.Set(\"AvatarString\", avatarCode)\n\t}\n\tdata.Set(\"Nick\", nick)\n\n\treturn t.request(\"\/users\/\"+strconv.Itoa(t.UserID)+\"\/avatarandnick\", data)\n}\n\nfunc (t *TVClient) SelectCategory(categoryID int) map[string]interface{} {\n\treturn t.request(\"\/users\/\"+strconv.Itoa(t.UserID)+\"\/category\"+strconv.Itoa(categoryID), nil)\n}\n\nfunc (t *TVClient) SendAnswer(questionID, answerID int) map[string]interface{} {\n\treturn t.request(\"\/users\/\"+strconv.Itoa(t.UserID)+\"\/response\"+strconv.Itoa(questionID)+\"\/\"+strconv.Itoa(answerID), nil)\n}\n\nfunc (t *TVClient) UploadProfileImage(r io.Reader) map[string]interface{} {\n\timg, _ := ioutil.ReadAll(r)\n\n\tdata := url.Values{}\n\tdata.Set(\"img\", base64.StdEncoding.EncodeToString(img))\n\n\treturn t.request(\"\/users\/base64\/\"+strconv.Itoa(t.UserID)+\"\/jpg\", data, \"POST\", \"img\")\n}\n\nfunc (t *TVClient) request(path string, data url.Values, method ...string) map[string]interface{} {\n\trequestURL := tvProtocolPrefix + tvHostName + path\n\trequest, err := buildRequest(requestURL, data, method...)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\trequest.Header.Set(\"x-app-request\", corsHeaderToken)\n\trequest.Header.Set(\"x-tv-authtoken\", t.AuthToken)\n\trequest.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded; charset=utf-8\")\n\n\tresp, err := http.DefaultClient.Do(request)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar m map[string]interface{}\n\terr = json.Unmarshal(body, &m)\n\treturn m\n}\n<|endoftext|>"} {"text":"package micha\n\ntype InlineQueryResults []InlineQueryResult\n\ntype InlineQueryResult interface {\n\t_ItsInlineQueryResult()\n}\n\ntype InlineQueryResultBase struct {\n\tType string `json:\"type\"`\n\tId string `json:\"id\"`\n\n\t\/\/ Optional\n\tReplyMarkup *InlineKeyboardMarkup `json:\"reply_markup,omitempty\"`\n\tInputMessageContent InputMessageContent `json:\"input_message_content,omitempty\"`\n}\n\nfunc (i InlineQueryResultBase) _ItsInlineQueryResult() {}\n\n\/\/ InlineQueryResultArticle is an inline query response article.\ntype InlineQueryResultArticle struct {\n\tInlineQueryResultBase\n\tTitle string `json:\"title\"`\n\n\t\/\/ Optional\n\tUrl string `json:\"url\"`\n\tHideUrl bool `json:\"hide_url\"`\n\tDescription string `json:\"description\"`\n\tThumbUrl string `json:\"thumb_url\"`\n\tThumbWidth int `json:\"thumb_width\"`\n\tThumbHeight int `json:\"thumb_height\"`\n}\n\n\/\/ InlineQueryResultPhoto is an inline query response photo.\ntype InlineQueryResultPhoto struct {\n\tInlineQueryResultBase\n\tPhotoUrl string `json:\"photo_url\"`\n\n\t\/\/ Optional\n\tMimeType string `json:\"mime_type\"`\n\tPhotoWidth int `json:\"photo_width\"`\n\tPhotoHeight int `json:\"photo_height\"`\n\tThumbUrl string `json:\"thumb_url\"`\n\tTitle string `json:\"title\"`\n\tDescription string `json:\"description\"`\n\tCaption string `json:\"caption\"`\n}\n\n\/\/ InlineQueryResultGIF is an inline query response GIF.\ntype InlineQueryResultGIF struct {\n\tInlineQueryResultBase\n\tGifUrl string `json:\"gif_url\"`\n\n\t\/\/ Optional\n\tGifWidth int `json:\"gif_width\"`\n\tGifHeight int `json:\"gif_height\"`\n\tThumbUrl string `json:\"thumb_url\"`\n\tTitle string `json:\"title\"`\n\tCaption string `json:\"caption\"`\n}\n\n\/\/ InlineQueryResultMPEG4GIF is an inline query response MPEG4 GIF.\ntype InlineQueryResultMPEG4GIF struct {\n\tInlineQueryResultBase\n\tMpeg4Url string `json:\"mpeg4_url\"`\n\n\t\/\/ Optional\n\tMpeg4Width int `json:\"mpeg4_width\"`\n\tMpeg4Height int `json:\"mpeg4_height\"`\n\tThumbURL string `json:\"thumb_url\"`\n\tTitle string `json:\"title\"`\n\tCaption string `json:\"caption\"`\n}\n\n\/\/ InlineQueryResultVideo is an inline query response video.\ntype InlineQueryResultVideo struct {\n\tInlineQueryResultBase\n\tVideoUrl string `json:\"video_url\"`\n\tMimeType string `json:\"mime_type\"`\n\n\t\/\/ Optional\n\tThumbUrl string `json:\"thumb_url\"`\n\tTitle string `json:\"title\"`\n\tCaption string `json:\"caption\"`\n\tVideoWidth int `json:\"video_width\"`\n\tVideoHeight int `json:\"video_height\"`\n\tVideoDuration int `json:\"video_duration\"`\n\tDescription string `json:\"description\"`\n}\n\n\/\/ InlineQueryResultAudio is an inline query response audio.\ntype InlineQueryResultAudio struct {\n\tInlineQueryResultBase\n\tAudioUrl string `json:\"audio_url\"`\n\tTitle string `json:\"title\"`\n\n\t\/\/ Optional\n\tPerformer string `json:\"performer\"`\n\tAudioDuration int `json:\"audio_duration\"`\n}\n\n\/\/ InlineQueryResultVoice is an inline query response voice.\ntype InlineQueryResultVoice struct {\n\tInlineQueryResultBase\n\tVoiceUrl string `json:\"voice_url\"`\n\tTitle string `json:\"title\"`\n\n\t\/\/ Optional\n\tVoiceDuration int `json:\"voice_duration\"`\n}\n\n\/\/ InlineQueryResultDocument is an inline query response document.\ntype InlineQueryResultDocument struct {\n\tInlineQueryResultBase\n\tTitle string `json:\"title\"`\n\tDocumentUrl string `json:\"document_url\"`\n\tMimeType string `json:\"mime_type\"`\n\n\t\/\/ Optional\n\tCaption string `json:\"caption\"`\n\tDescription string `json:\"description\"`\n\tThumbURL string `json:\"thumb_url\"`\n\tThumbWidth int `json:\"thumb_width\"`\n\tThumbHeight int `json:\"thumb_height\"`\n}\n\n\/\/ InlineQueryResultLocation is an inline query response location.\ntype InlineQueryResultLocation struct {\n\tInlineQueryResultBase\n\tLatitude float64 `json:\"latitude\"`\n\tLongitude float64 `json:\"longitude\"`\n\tTitle string `json:\"title\"`\n\n\t\/\/ Optional\n\tThumbUrl string `json:\"thumb_url\"`\n\tThumbWidth int `json:\"thumb_width\"`\n\tThumbHeight int `json:\"thumb_height\"`\n}\n\ntype InputMessageContent interface {\n\t_ItsInputMessageContent()\n}\n\ntype InputMessageContentBase struct{}\n\nfunc (i InlineQueryResultBase) _ItsInputMessageContent() {}\n\n\/\/ InputTextMessageContent contains text for displaying as an inline query result.\ntype InputTextMessageContent struct {\n\tInputMessageContentBase\n\tMessageText string `json:\"message_text\"`\n\tParseMode string `json:\"parse_mode\"`\n\tDisableWebPagePreview bool `json:\"disable_web_page_preview\"`\n}\n\n\/\/ InputLocationMessageContent contains a location for displaying as an inline query result.\ntype InputLocationMessageContent struct {\n\tInputMessageContentBase\n\tLatitude float64 `json:\"latitude\"`\n\tLongitude float64 `json:\"longitude\"`\n}\n\n\/\/ InputVenueMessageContent contains a venue for displaying an inline query result.\ntype InputVenueMessageContent struct {\n\tInputMessageContentBase\n\tLatitude float64 `json:\"latitude\"`\n\tLongitude float64 `json:\"longitude\"`\n\tTitle string `json:\"title\"`\n\tAddress string `json:\"address\"`\n\tFoursquareId string `json:\"foursquare_id\"`\n}\n\n\/\/ InputContactMessageContent contains a contact for displaying as an inline query result.\ntype InputContactMessageContent struct {\n\tInputMessageContentBase\n\tPhoneNumber string `json:\"phone_number\"`\n\tFirstName string `json:\"first_name\"`\n\tLastName string `json:\"last_name\"`\n}\nInline types refactoring.package micha\n\nconst (\n\tINLINE_TYPE_ARTICLE = \"article\"\n\tINLINE_TYPE_PHOTO = \"photo\"\n\tINLINE_TYPE_GIF = \"gif\"\n\tINLINE_TYPE_VIDEO = \"video\"\n\tINLINE_TYPE_AUDIO = \"audio\"\n\tINLINE_TYPE_DOCUMENT = \"document\"\n\tINLINE_TYPE_VOICE = \"voice\"\n\tINLINE_TYPE_LOCATION = \"location\"\n)\n\ntype InlineQueryResults []InlineQueryResult\n\ntype InlineQueryResult interface {\n\t_ItsInlineQueryResult()\n}\n\ntype InlineQueryResultImplementation struct{}\n\nfunc (i InlineQueryResultImplementation) _ItsInlineQueryResult() {}\n\n\/\/ InlineQueryResultArticle is an inline query response article.\ntype InlineQueryResultArticle struct {\n\tInlineQueryResultImplementation\n\tType string `json:\"type\"`\n\tId string `json:\"id\"`\n\tTitle string `json:\"title\"`\n\n\t\/\/ Optional\n\tUrl string `json:\"url,omitempty\"`\n\tHideUrl bool `json:\"hide_url,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tThumbUrl string `json:\"thumb_url,omitempty\"`\n\tThumbWidth int `json:\"thumb_width,omitempty\"`\n\tThumbHeight int `json:\"thumb_height,omitempty\"`\n\tReplyMarkup *InlineKeyboardMarkup `json:\"reply_markup,omitempty\"`\n\tInputMessageContent InputMessageContent `json:\"input_message_content,omitempty\"`\n}\n\n\/\/ InlineQueryResultPhoto is an inline query response photo.\ntype InlineQueryResultPhoto struct {\n\tInlineQueryResultImplementation\n\tType string `json:\"type\"`\n\tId string `json:\"id\"`\n\tPhotoUrl string `json:\"photo_url\"`\n\n\t\/\/ Optional\n\tMimeType string `json:\"mime_type,omitempty\"`\n\tPhotoWidth int `json:\"photo_width,omitempty\"`\n\tPhotoHeight int `json:\"photo_height,omitempty\"`\n\tThumbUrl string `json:\"thumb_url,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tCaption string `json:\"caption,omitempty\"`\n\tReplyMarkup *InlineKeyboardMarkup `json:\"reply_markup,omitempty\"`\n\tInputMessageContent InputMessageContent `json:\"input_message_content,omitempty\"`\n}\n\n\/\/ InlineQueryResultGIF is an inline query response GIF.\ntype InlineQueryResultGif struct {\n\tInlineQueryResultImplementation\n\tType string `json:\"type\"`\n\tId string `json:\"id\"`\n\tGifUrl string `json:\"gif_url\"`\n\n\t\/\/ Optional\n\tGifWidth int `json:\"gif_width,omitempty\"`\n\tGifHeight int `json:\"gif_height,omitempty\"`\n\tThumbUrl string `json:\"thumb_url,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n\tCaption string `json:\"caption,omitempty\"`\n\tReplyMarkup *InlineKeyboardMarkup `json:\"reply_markup,omitempty\"`\n\tInputMessageContent InputMessageContent `json:\"input_message_content,omitempty\"`\n}\n\n\/\/ InlineQueryResultMPEG4GIF is an inline query response MPEG4 GIF.\ntype InlineQueryResultMpeg4Gif struct {\n\tInlineQueryResultImplementation\n\tType string `json:\"type\"`\n\tId string `json:\"id\"`\n\tMpeg4Url string `json:\"mpeg4_url\"`\n\n\t\/\/ Optional\n\tMpeg4Width int `json:\"mpeg4_width,omitempty\"`\n\tMpeg4Height int `json:\"mpeg4_height,omitempty\"`\n\tThumbUrl string `json:\"thumb_url,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n\tCaption string `json:\"caption,omitempty\"`\n\tReplyMarkup *InlineKeyboardMarkup `json:\"reply_markup,omitempty\"`\n\tInputMessageContent InputMessageContent `json:\"input_message_content,omitempty\"`\n}\n\n\/\/ InlineQueryResultVideo is an inline query response video.\ntype InlineQueryResultVideo struct {\n\tInlineQueryResultImplementation\n\tType string `json:\"type\"`\n\tId string `json:\"id\"`\n\tVideoUrl string `json:\"video_url\"`\n\tMimeType string `json:\"mime_type\"`\n\n\t\/\/ Optional\n\tThumbUrl string `json:\"thumb_url,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n\tCaption string `json:\"caption,omitempty\"`\n\tVideoWidth int `json:\"video_width,omitempty\"`\n\tVideoHeight int `json:\"video_height,omitempty\"`\n\tVideoDuration int `json:\"video_duration,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tReplyMarkup *InlineKeyboardMarkup `json:\"reply_markup,omitempty\"`\n\tInputMessageContent InputMessageContent `json:\"input_message_content,omitempty\"`\n}\n\n\/\/ InlineQueryResultAudio is an inline query response audio.\ntype InlineQueryResultAudio struct {\n\tInlineQueryResultImplementation\n\tType string `json:\"type\"`\n\tId string `json:\"id\"`\n\tAudioUrl string `json:\"audio_url\"`\n\tTitle string `json:\"title\"`\n\n\t\/\/ Optional\n\tPerformer string `json:\"performer,omitempty\"`\n\tAudioDuration int `json:\"audio_duration,omitempty\"`\n\tReplyMarkup *InlineKeyboardMarkup `json:\"reply_markup,omitempty\"`\n\tInputMessageContent InputMessageContent `json:\"input_message_content,omitempty\"`\n}\n\n\/\/ InlineQueryResultVoice is an inline query response voice.\ntype InlineQueryResultVoice struct {\n\tInlineQueryResultImplementation\n\tType string `json:\"type\"`\n\tId string `json:\"id\"`\n\tVoiceUrl string `json:\"voice_url\"`\n\tTitle string `json:\"title\"`\n\n\t\/\/ Optional\n\tVoiceDuration int `json:\"voice_duration,omitempty\"`\n\tReplyMarkup *InlineKeyboardMarkup `json:\"reply_markup,omitempty\"`\n\tInputMessageContent InputMessageContent `json:\"input_message_content,omitempty\"`\n}\n\n\/\/ InlineQueryResultDocument is an inline query response document.\ntype InlineQueryResultDocument struct {\n\tInlineQueryResultImplementation\n\tType string `json:\"type\"`\n\tId string `json:\"id\"`\n\tTitle string `json:\"title\"`\n\tDocumentUrl string `json:\"document_url\"`\n\tMimeType string `json:\"mime_type\"`\n\n\t\/\/ Optional\n\tCaption string `json:\"caption,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tThumbURL string `json:\"thumb_url,omitempty\"`\n\tThumbWidth int `json:\"thumb_width,omitempty\"`\n\tThumbHeight int `json:\"thumb_height,omitempty\"`\n\tReplyMarkup *InlineKeyboardMarkup `json:\"reply_markup,omitempty\"`\n\tInputMessageContent InputMessageContent `json:\"input_message_content,omitempty\"`\n}\n\n\/\/ InlineQueryResultLocation is an inline query response location.\ntype InlineQueryResultLocation struct {\n\tInlineQueryResultImplementation\n\tType string `json:\"type\"`\n\tId string `json:\"id\"`\n\tLatitude float64 `json:\"latitude\"`\n\tLongitude float64 `json:\"longitude\"`\n\tTitle string `json:\"title\"`\n\n\t\/\/ Optional\n\tThumbUrl string `json:\"thumb_url,omitempty\"`\n\tThumbWidth int `json:\"thumb_width,omitempty\"`\n\tThumbHeight int `json:\"thumb_height,omitempty\"`\n\tReplyMarkup *InlineKeyboardMarkup `json:\"reply_markup,omitempty\"`\n\tInputMessageContent InputMessageContent `json:\"input_message_content,omitempty\"`\n}\n\ntype InputMessageContent interface {\n\t_ItsInputMessageContent()\n}\n\ntype InputMessageContentImplementation struct{}\n\nfunc (i InputMessageContentImplementation) _ItsInputMessageContent() {}\n\n\/\/ InputTextMessageContent contains text for displaying as an inline query result.\ntype InputTextMessageContent struct {\n\tInputMessageContentImplementation\n\tMessageText string `json:\"message_text\"`\n\tParseMode string `json:\"parse_mode\"`\n\tDisableWebPagePreview bool `json:\"disable_web_page_preview\"`\n}\n\n\/\/ InputLocationMessageContent contains a location for displaying as an inline query result.\ntype InputLocationMessageContent struct {\n\tInputMessageContentImplementation\n\tLatitude float64 `json:\"latitude\"`\n\tLongitude float64 `json:\"longitude\"`\n}\n\n\/\/ InputVenueMessageContent contains a venue for displaying an inline query result.\ntype InputVenueMessageContent struct {\n\tInputMessageContentImplementation\n\tLatitude float64 `json:\"latitude\"`\n\tLongitude float64 `json:\"longitude\"`\n\tTitle string `json:\"title\"`\n\tAddress string `json:\"address\"`\n\tFoursquareId string `json:\"foursquare_id\"`\n}\n\n\/\/ InputContactMessageContent contains a contact for displaying as an inline query result.\ntype InputContactMessageContent struct {\n\tInputMessageContentImplementation\n\tPhoneNumber string `json:\"phone_number\"`\n\tFirstName string `json:\"first_name\"`\n\tLastName string `json:\"last_name\"`\n}\n<|endoftext|>"} {"text":"package adm\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"gopkg.in\/resty.v0\"\n)\n\nfunc ActivateAccount(c *resty.Client, a *auth.Token) (*auth.Token, error) {\n\tvar (\n\t\tkex *auth.Kex\n\t\terr error\n\t\tresp *resty.Response\n\t\tbody []byte\n\t)\n\tjBytes := &[]byte{}\n\tcipher := &[]byte{}\n\tplain := &[]byte{}\n\tcred := &auth.Token{}\n\n\tif *jBytes, err = json.Marshal(a); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ establish key exchange for credential transmission\n\tif kex, err = KeyExchange(c); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ encrypt credentials\n\tif err = kex.EncryptAndEncode(jBytes, cipher); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Send request\n\tif resp, err = c.R().\n\t\tSetHeader(`Content-Type`, `application\/octet-stream`).\n\t\tSetBody(*cipher).\n\t\tPut(fmt.Sprintf(\n\t\t\t\"\/authenticate\/activate\/%s\", kex.Request.String())); err != nil {\n\t\treturn nil, err\n\t} else if resp.StatusCode() != 200 {\n\t\treturn nil, fmt.Errorf(\"Activation failed with status code: %d\", resp.StatusCode())\n\t}\n\n\t\/\/ decrypt reply\n\tbody = resp.Body()\n\tif err = kex.DecodeAndDecrypt(&body, plain); err != nil {\n\t\treturn nil, err\n\t}\n\tif err = json.Unmarshal(*plain, *cred); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cred, nil\n}\n\n\/\/ vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix\nFIX: json unmarshal, not pointer valuepackage adm\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"gopkg.in\/resty.v0\"\n)\n\nfunc ActivateAccount(c *resty.Client, a *auth.Token) (*auth.Token, error) {\n\tvar (\n\t\tkex *auth.Kex\n\t\terr error\n\t\tresp *resty.Response\n\t\tbody []byte\n\t)\n\tjBytes := &[]byte{}\n\tcipher := &[]byte{}\n\tplain := &[]byte{}\n\tcred := &auth.Token{}\n\n\tif *jBytes, err = json.Marshal(a); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ establish key exchange for credential transmission\n\tif kex, err = KeyExchange(c); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ encrypt credentials\n\tif err = kex.EncryptAndEncode(jBytes, cipher); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Send request\n\tif resp, err = c.R().\n\t\tSetHeader(`Content-Type`, `application\/octet-stream`).\n\t\tSetBody(*cipher).\n\t\tPut(fmt.Sprintf(\n\t\t\t\"\/authenticate\/activate\/%s\", kex.Request.String())); err != nil {\n\t\treturn nil, err\n\t} else if resp.StatusCode() != 200 {\n\t\treturn nil, fmt.Errorf(\"Activation failed with status code: %d\", resp.StatusCode())\n\t}\n\n\t\/\/ decrypt reply\n\tbody = resp.Body()\n\tif err = kex.DecodeAndDecrypt(&body, plain); err != nil {\n\t\treturn nil, err\n\t}\n\tif err = json.Unmarshal(*plain, cred); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cred, nil\n}\n\n\/\/ vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lease\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"sync\"\n)\n\n\/\/ Create a ReadLease that never expires, unless voluntarily revoked or\n\/\/ upgraded.\n\/\/\n\/\/ The supplied function will be used to obtain the read lease contents, the\n\/\/ first time and whenever the supplied file leaser decides to expire the\n\/\/ temporary copy thus obtained. It must return the same contents every time,\n\/\/ and the contents must be of the given size.\n\/\/\n\/\/ This magic is not preserved after the lease is upgraded.\nfunc NewAutoRefreshingReadLease(\n\tfl FileLeaser,\n\tsize int64,\n\tf func() (io.ReadCloser, error)) (rl ReadLease) {\n\trl = &autoRefreshingReadLease{\n\t\tleaser: fl,\n\t\tsize: size,\n\t\tf: f,\n\t}\n\n\treturn\n}\n\ntype autoRefreshingReadLease struct {\n\tmu sync.Mutex\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Constant data\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tsize int64\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Dependencies\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tleaser FileLeaser\n\tf func() (io.ReadCloser, error)\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ The current wrapped lease, or nil if one has never been issued.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\twrapped ReadLease\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ Attempt to clean up after the supplied read\/write lease.\nfunc destroyReadWriteLease(rwl ReadWriteLease) {\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error destroying read\/write lease: %v\", err)\n\t\t}\n\t}()\n\n\t\/\/ Downgrade to a read lease.\n\trl, err := rwl.Downgrade()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Downgrade: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Revoke the read lease.\n\trl.Revoke()\n}\n\n\/\/ Set up a read\/write lease and fill in our contents.\n\/\/\n\/\/ REQUIRES: The caller has observed that rl.lease has expired.\n\/\/\n\/\/ LOCKS_REQUIRED(rl.mu)\nfunc (rl *autoRefreshingReadLease) getContents() (\n\trwl ReadWriteLease, err error) {\n\t\/\/ Obtain some space to write the contents.\n\trwl, err = rl.leaser.NewFile()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"NewFile: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Attempt to clean up if we exit early.\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tdestroyReadWriteLease(rwl)\n\t\t}\n\t}()\n\n\t\/\/ Obtain the reader for our contents.\n\trc, err := rl.f()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"User function: %v\", err)\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tcloseErr := rc.Close()\n\t\tif closeErr != nil && err == nil {\n\t\t\terr = fmt.Errorf(\"Close: %v\", closeErr)\n\t\t}\n\t}()\n\n\t\/\/ Copy into the read\/write lease.\n\tcopied, err := io.Copy(rwl, rc)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Copy: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Did the user lie about the size?\n\tif copied != rl.Size() {\n\t\terr = fmt.Errorf(\"Copied %v bytes; expected %v\", copied, rl.Size())\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Downgrade and save the supplied read\/write lease obtained with getContents\n\/\/ for later use.\n\/\/\n\/\/ LOCKS_REQUIRED(rl.mu)\nfunc (rl *autoRefreshingReadLease) saveContents(rwl ReadWriteLease) {\n\tdowngraded, err := rwl.Downgrade()\n\tif err != nil {\n\t\tlog.Printf(\"Failed to downgrade write lease (%q); abandoning.\", err.Error())\n\t\treturn\n\t}\n\n\trl.wrapped = downgraded\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Public interface\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (rl *autoRefreshingReadLease) Read(p []byte) (n int, err error) {\n\trl.mu.Lock()\n\tdefer rl.mu.Unlock()\n\n\t\/\/ Common case: is the existing lease still valid?\n\tif rl.wrapped != nil {\n\t\tpanic(\"TODO\")\n\t}\n\n\t\/\/ Get hold of a read\/write lease containing our contents.\n\trwl, err := rl.getContents()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"getContents: %v\", err)\n\t\treturn\n\t}\n\n\tdefer rl.saveContents(rwl)\n\n\t\/\/ Serve from the read\/write lease.\n\tn, err = rwl.Read(p)\n\n\treturn\n}\n\nfunc (rl *autoRefreshingReadLease) Seek(\n\toffset int64,\n\twhence int) (off int64, err error) {\n\trl.mu.Lock()\n\tdefer rl.mu.Unlock()\n\n\t\/\/ Common case: is the existing lease still valid?\n\tif rl.wrapped != nil {\n\t\tpanic(\"TODO\")\n\t}\n\n\t\/\/ Get hold of a read\/write lease containing our contents.\n\trwl, err := rl.getContents()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"getContents: %v\", err)\n\t\treturn\n\t}\n\n\tdefer rl.saveContents(rwl)\n\n\t\/\/ Serve from the read\/write lease.\n\toff, err = rwl.Seek(offset, whence)\n\n\treturn\n}\n\nfunc (rl *autoRefreshingReadLease) ReadAt(\n\tp []byte,\n\toff int64) (n int, err error) {\n\trl.mu.Lock()\n\tdefer rl.mu.Unlock()\n\n\t\/\/ Common case: is the existing lease still valid?\n\tif rl.wrapped != nil {\n\t\tpanic(\"TODO\")\n\t}\n\n\t\/\/ Get hold of a read\/write lease containing our contents.\n\trwl, err := rl.getContents()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"getContents: %v\", err)\n\t\treturn\n\t}\n\n\tdefer rl.saveContents(rwl)\n\n\t\/\/ Serve from the read\/write lease.\n\tn, err = rwl.ReadAt(p, off)\n\n\treturn\n}\n\nfunc (rl *autoRefreshingReadLease) Size() (size int64) {\n\tsize = rl.size\n\treturn\n}\n\nfunc (rl *autoRefreshingReadLease) Revoked() (revoked bool) {\n\tpanic(\"TODO\")\n}\n\nfunc (rl *autoRefreshingReadLease) Upgrade() (rwl ReadWriteLease, err error) {\n\tpanic(\"TODO\")\n}\n\nfunc (rl *autoRefreshingReadLease) Revoke() {\n\tpanic(\"TODO\")\n}\nAutoRefreshingReadLeaseTest.Revoked\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lease\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"sync\"\n)\n\n\/\/ Create a ReadLease that never expires, unless voluntarily revoked or\n\/\/ upgraded.\n\/\/\n\/\/ The supplied function will be used to obtain the read lease contents, the\n\/\/ first time and whenever the supplied file leaser decides to expire the\n\/\/ temporary copy thus obtained. It must return the same contents every time,\n\/\/ and the contents must be of the given size.\n\/\/\n\/\/ This magic is not preserved after the lease is upgraded.\nfunc NewAutoRefreshingReadLease(\n\tfl FileLeaser,\n\tsize int64,\n\tf func() (io.ReadCloser, error)) (rl ReadLease) {\n\trl = &autoRefreshingReadLease{\n\t\tleaser: fl,\n\t\tsize: size,\n\t\tf: f,\n\t}\n\n\treturn\n}\n\ntype autoRefreshingReadLease struct {\n\tmu sync.Mutex\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Constant data\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tsize int64\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Dependencies\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tleaser FileLeaser\n\tf func() (io.ReadCloser, error)\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ Set to true when we've been revoked for good.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\trevoked bool\n\n\t\/\/ The current wrapped lease, or nil if one has never been issued.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\twrapped ReadLease\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ Attempt to clean up after the supplied read\/write lease.\nfunc destroyReadWriteLease(rwl ReadWriteLease) {\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error destroying read\/write lease: %v\", err)\n\t\t}\n\t}()\n\n\t\/\/ Downgrade to a read lease.\n\trl, err := rwl.Downgrade()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Downgrade: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Revoke the read lease.\n\trl.Revoke()\n}\n\n\/\/ Set up a read\/write lease and fill in our contents.\n\/\/\n\/\/ REQUIRES: The caller has observed that rl.lease has expired.\n\/\/\n\/\/ LOCKS_REQUIRED(rl.mu)\nfunc (rl *autoRefreshingReadLease) getContents() (\n\trwl ReadWriteLease, err error) {\n\t\/\/ Obtain some space to write the contents.\n\trwl, err = rl.leaser.NewFile()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"NewFile: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Attempt to clean up if we exit early.\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tdestroyReadWriteLease(rwl)\n\t\t}\n\t}()\n\n\t\/\/ Obtain the reader for our contents.\n\trc, err := rl.f()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"User function: %v\", err)\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tcloseErr := rc.Close()\n\t\tif closeErr != nil && err == nil {\n\t\t\terr = fmt.Errorf(\"Close: %v\", closeErr)\n\t\t}\n\t}()\n\n\t\/\/ Copy into the read\/write lease.\n\tcopied, err := io.Copy(rwl, rc)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Copy: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Did the user lie about the size?\n\tif copied != rl.Size() {\n\t\terr = fmt.Errorf(\"Copied %v bytes; expected %v\", copied, rl.Size())\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Downgrade and save the supplied read\/write lease obtained with getContents\n\/\/ for later use.\n\/\/\n\/\/ LOCKS_REQUIRED(rl.mu)\nfunc (rl *autoRefreshingReadLease) saveContents(rwl ReadWriteLease) {\n\tdowngraded, err := rwl.Downgrade()\n\tif err != nil {\n\t\tlog.Printf(\"Failed to downgrade write lease (%q); abandoning.\", err.Error())\n\t\treturn\n\t}\n\n\trl.wrapped = downgraded\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Public interface\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (rl *autoRefreshingReadLease) Read(p []byte) (n int, err error) {\n\trl.mu.Lock()\n\tdefer rl.mu.Unlock()\n\n\t\/\/ Common case: is the existing lease still valid?\n\tif rl.wrapped != nil {\n\t\tpanic(\"TODO\")\n\t}\n\n\t\/\/ Get hold of a read\/write lease containing our contents.\n\trwl, err := rl.getContents()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"getContents: %v\", err)\n\t\treturn\n\t}\n\n\tdefer rl.saveContents(rwl)\n\n\t\/\/ Serve from the read\/write lease.\n\tn, err = rwl.Read(p)\n\n\treturn\n}\n\nfunc (rl *autoRefreshingReadLease) Seek(\n\toffset int64,\n\twhence int) (off int64, err error) {\n\trl.mu.Lock()\n\tdefer rl.mu.Unlock()\n\n\t\/\/ Common case: is the existing lease still valid?\n\tif rl.wrapped != nil {\n\t\tpanic(\"TODO\")\n\t}\n\n\t\/\/ Get hold of a read\/write lease containing our contents.\n\trwl, err := rl.getContents()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"getContents: %v\", err)\n\t\treturn\n\t}\n\n\tdefer rl.saveContents(rwl)\n\n\t\/\/ Serve from the read\/write lease.\n\toff, err = rwl.Seek(offset, whence)\n\n\treturn\n}\n\nfunc (rl *autoRefreshingReadLease) ReadAt(\n\tp []byte,\n\toff int64) (n int, err error) {\n\trl.mu.Lock()\n\tdefer rl.mu.Unlock()\n\n\t\/\/ Common case: is the existing lease still valid?\n\tif rl.wrapped != nil {\n\t\tpanic(\"TODO\")\n\t}\n\n\t\/\/ Get hold of a read\/write lease containing our contents.\n\trwl, err := rl.getContents()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"getContents: %v\", err)\n\t\treturn\n\t}\n\n\tdefer rl.saveContents(rwl)\n\n\t\/\/ Serve from the read\/write lease.\n\tn, err = rwl.ReadAt(p, off)\n\n\treturn\n}\n\nfunc (rl *autoRefreshingReadLease) Size() (size int64) {\n\tsize = rl.size\n\treturn\n}\n\nfunc (rl *autoRefreshingReadLease) Revoked() (revoked bool) {\n\trl.mu.Lock()\n\tdefer rl.mu.Unlock()\n\n\trevoked = rl.revoked\n\treturn\n}\n\nfunc (rl *autoRefreshingReadLease) Upgrade() (rwl ReadWriteLease, err error) {\n\tpanic(\"TODO\")\n}\n\nfunc (rl *autoRefreshingReadLease) Revoke() {\n\tpanic(\"TODO\")\n}\n<|endoftext|>"} {"text":"package integration_test\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/blang\/semver\"\n\t\"github.com\/cloudfoundry\/libbuildpack\/cutlass\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"testing\"\n)\n\nvar bpDir string\nvar buildpackVersion string\nvar packagedBuildpack cutlass.VersionedBuildpackPackage\n\nfunc init() {\n\tflag.StringVar(&buildpackVersion, \"version\", \"\", \"version to use (builds if empty)\")\n\tflag.BoolVar(&cutlass.Cached, \"cached\", true, \"cached buildpack\")\n\tflag.StringVar(&cutlass.DefaultMemory, \"memory\", \"128M\", \"default memory for pushed apps\")\n\tflag.StringVar(&cutlass.DefaultDisk, \"disk\", \"384M\", \"default disk for pushed apps\")\n\tflag.Parse()\n}\n\nvar _ = SynchronizedBeforeSuite(func() []byte {\n\t\/\/ Run once\n\tif buildpackVersion == \"\" {\n\t\tpackagedBuildpack, err := cutlass.PackageUniquelyVersionedBuildpack()\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tdata, err := json.Marshal(packagedBuildpack)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\treturn data\n\t}\n\n\treturn []byte{}\n}, func(data []byte) {\n\t\/\/ Run on all nodes\n\tvar err error\n\tif len(data) > 0 {\n\t\terr = json.Unmarshal(data, &packagedBuildpack)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tbuildpackVersion = packagedBuildpack.Version\n\t}\n\n\tbpDir, err = cutlass.FindRoot()\n\tExpect(err).NotTo(HaveOccurred())\n\n\tcutlass.SeedRandom()\n\tcutlass.DefaultStdoutStderr = GinkgoWriter\n})\n\nvar _ = SynchronizedAfterSuite(func() {\n\t\/\/ Run on all nodes\n}, func() {\n\t\/\/ Run once\n\t\/\/ Expect(cutlass.RemovePackagedBuildpack(packagedBuildpack)).To(Succeed())\n\tExpect(cutlass.DeleteOrphanedRoutes()).To(Succeed())\n})\n\nfunc TestIntegration(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"Integration Suite\")\n}\n\nfunc PushAppAndConfirm(app *cutlass.App) {\n\tExpect(app.Push()).To(Succeed())\n\tEventually(func() ([]string, error) { return app.InstanceStates() }, 20*time.Second).Should(Equal([]string{\"RUNNING\"}))\n\tExpect(app.ConfirmBuildpack(buildpackVersion)).To(Succeed())\n}\n\nfunc Restart(app *cutlass.App) {\n\tExpect(app.Restart()).To(Succeed())\n\tEventually(func() ([]string, error) { return app.InstanceStates() }, 20*time.Second).Should(Equal([]string{\"RUNNING\"}))\n}\n\nfunc ApiHasTask() bool {\n\tapiVersionString, err := cutlass.ApiVersion()\n\tExpect(err).To(BeNil())\n\tapiVersion, err := semver.Make(apiVersionString)\n\tExpect(err).To(BeNil())\n\tapiHasTask, err := semver.ParseRange(\">= 2.75.0\")\n\tExpect(err).To(BeNil())\n\treturn apiHasTask(apiVersion)\n}\n\nfunc AssertUsesProxyDuringStagingIfPresent(fixtureName string) {\n\tContext(\"with an uncached buildpack\", func() {\n\t\tBeforeEach(func() {\n\t\t\tif cutlass.Cached {\n\t\t\t\tSkip(\"Running cached tests\")\n\t\t\t}\n\t\t})\n\n\t\tIt(\"uses a proxy during staging if present\", func() {\n\t\t\tproxy, err := cutlass.NewProxy()\n\t\t\tExpect(err).To(BeNil())\n\t\t\tdefer proxy.Close()\n\n\t\t\tbpFile := filepath.Join(bpDir, buildpackVersion+\"tmp\")\n\t\t\tcmd := exec.Command(\"cp\", packagedBuildpack.File, bpFile)\n\t\t\terr = cmd.Run()\n\t\t\tExpect(err).To(BeNil())\n\t\t\tdefer os.Remove(bpFile)\n\n\t\t\ttraffic, err := cutlass.InternetTraffic(\n\t\t\t\tbpDir,\n\t\t\t\tfilepath.Join(\"fixtures\", fixtureName),\n\t\t\t\tbpFile,\n\t\t\t\t[]string{\"HTTP_PROXY=\" + proxy.URL, \"HTTPS_PROXY=\" + proxy.URL},\n\t\t\t)\n\t\t\tExpect(err).To(BeNil())\n\n\t\t\tdestUrl, err := url.Parse(proxy.URL)\n\t\t\tExpect(err).To(BeNil())\n\n\t\t\tExpect(cutlass.UniqueDestination(\n\t\t\t\ttraffic, fmt.Sprintf(\"%s.%s\", destUrl.Hostname(), destUrl.Port()),\n\t\t\t)).To(BeNil())\n\t\t})\n\t})\n}\n\nfunc AssertNoInternetTraffic(fixtureName string) {\n\tIt(\"has no traffic\", func() {\n\t\tif !cutlass.Cached {\n\t\t\tSkip(\"Running uncached tests\")\n\t\t}\n\n\t\tbpFile := filepath.Join(bpDir, buildpackVersion+\"tmp\")\n\t\tcmd := exec.Command(\"cp\", packagedBuildpack.File, bpFile)\n\t\terr := cmd.Run()\n\t\tExpect(err).To(BeNil())\n\t\tdefer os.Remove(bpFile)\n\n\t\ttraffic, err := cutlass.InternetTraffic(\n\t\t\tbpDir,\n\t\t\tfilepath.Join(\"fixtures\", fixtureName),\n\t\t\tbpFile,\n\t\t\t[]string{},\n\t\t)\n\t\tExpect(err).To(BeNil())\n\t\tExpect(traffic).To(BeEmpty())\n\t})\n}\nUse cutlass test helper CopyCfHome [#150860504]package integration_test\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/blang\/semver\"\n\t\"github.com\/cloudfoundry\/libbuildpack\/cutlass\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"testing\"\n)\n\nvar bpDir string\nvar buildpackVersion string\nvar packagedBuildpack cutlass.VersionedBuildpackPackage\n\nfunc init() {\n\tflag.StringVar(&buildpackVersion, \"version\", \"\", \"version to use (builds if empty)\")\n\tflag.BoolVar(&cutlass.Cached, \"cached\", true, \"cached buildpack\")\n\tflag.StringVar(&cutlass.DefaultMemory, \"memory\", \"128M\", \"default memory for pushed apps\")\n\tflag.StringVar(&cutlass.DefaultDisk, \"disk\", \"384M\", \"default disk for pushed apps\")\n\tflag.Parse()\n}\n\nvar _ = SynchronizedBeforeSuite(func() []byte {\n\t\/\/ Run once\n\tif buildpackVersion == \"\" {\n\t\tpackagedBuildpack, err := cutlass.PackageUniquelyVersionedBuildpack()\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tdata, err := json.Marshal(packagedBuildpack)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\treturn data\n\t}\n\n\treturn []byte{}\n}, func(data []byte) {\n\t\/\/ Run on all nodes\n\tvar err error\n\tif len(data) > 0 {\n\t\terr = json.Unmarshal(data, &packagedBuildpack)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tbuildpackVersion = packagedBuildpack.Version\n\t}\n\n\tbpDir, err = cutlass.FindRoot()\n\tExpect(err).NotTo(HaveOccurred())\n\n\tExpect(cutlass.CopyCfHome()).To(Succeed())\n\tcutlass.SeedRandom()\n\tcutlass.DefaultStdoutStderr = GinkgoWriter\n})\n\nvar _ = SynchronizedAfterSuite(func() {\n\t\/\/ Run on all nodes\n\tExpect(os.RemoveAll(\"CF_HOME\")).To(Succeed())\n}, func() {\n\t\/\/ Run once\n\t\/\/ Expect(cutlass.RemovePackagedBuildpack(packagedBuildpack)).To(Succeed())\n\tExpect(cutlass.DeleteOrphanedRoutes()).To(Succeed())\n})\n\nfunc TestIntegration(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"Integration Suite\")\n}\n\nfunc PushAppAndConfirm(app *cutlass.App) {\n\tExpect(app.Push()).To(Succeed())\n\tEventually(func() ([]string, error) { return app.InstanceStates() }, 20*time.Second).Should(Equal([]string{\"RUNNING\"}))\n\tExpect(app.ConfirmBuildpack(buildpackVersion)).To(Succeed())\n}\n\nfunc Restart(app *cutlass.App) {\n\tExpect(app.Restart()).To(Succeed())\n\tEventually(func() ([]string, error) { return app.InstanceStates() }, 20*time.Second).Should(Equal([]string{\"RUNNING\"}))\n}\n\nfunc ApiHasTask() bool {\n\tapiVersionString, err := cutlass.ApiVersion()\n\tExpect(err).To(BeNil())\n\tapiVersion, err := semver.Make(apiVersionString)\n\tExpect(err).To(BeNil())\n\tapiHasTask, err := semver.ParseRange(\">= 2.75.0\")\n\tExpect(err).To(BeNil())\n\treturn apiHasTask(apiVersion)\n}\n\nfunc AssertUsesProxyDuringStagingIfPresent(fixtureName string) {\n\tContext(\"with an uncached buildpack\", func() {\n\t\tBeforeEach(func() {\n\t\t\tif cutlass.Cached {\n\t\t\t\tSkip(\"Running cached tests\")\n\t\t\t}\n\t\t})\n\n\t\tIt(\"uses a proxy during staging if present\", func() {\n\t\t\tproxy, err := cutlass.NewProxy()\n\t\t\tExpect(err).To(BeNil())\n\t\t\tdefer proxy.Close()\n\n\t\t\tbpFile := filepath.Join(bpDir, buildpackVersion+\"tmp\")\n\t\t\tcmd := exec.Command(\"cp\", packagedBuildpack.File, bpFile)\n\t\t\terr = cmd.Run()\n\t\t\tExpect(err).To(BeNil())\n\t\t\tdefer os.Remove(bpFile)\n\n\t\t\ttraffic, err := cutlass.InternetTraffic(\n\t\t\t\tbpDir,\n\t\t\t\tfilepath.Join(\"fixtures\", fixtureName),\n\t\t\t\tbpFile,\n\t\t\t\t[]string{\"HTTP_PROXY=\" + proxy.URL, \"HTTPS_PROXY=\" + proxy.URL},\n\t\t\t)\n\t\t\tExpect(err).To(BeNil())\n\n\t\t\tdestUrl, err := url.Parse(proxy.URL)\n\t\t\tExpect(err).To(BeNil())\n\n\t\t\tExpect(cutlass.UniqueDestination(\n\t\t\t\ttraffic, fmt.Sprintf(\"%s.%s\", destUrl.Hostname(), destUrl.Port()),\n\t\t\t)).To(BeNil())\n\t\t})\n\t})\n}\n\nfunc AssertNoInternetTraffic(fixtureName string) {\n\tIt(\"has no traffic\", func() {\n\t\tif !cutlass.Cached {\n\t\t\tSkip(\"Running uncached tests\")\n\t\t}\n\n\t\tbpFile := filepath.Join(bpDir, buildpackVersion+\"tmp\")\n\t\tcmd := exec.Command(\"cp\", packagedBuildpack.File, bpFile)\n\t\terr := cmd.Run()\n\t\tExpect(err).To(BeNil())\n\t\tdefer os.Remove(bpFile)\n\n\t\ttraffic, err := cutlass.InternetTraffic(\n\t\t\tbpDir,\n\t\t\tfilepath.Join(\"fixtures\", fixtureName),\n\t\t\tbpFile,\n\t\t\t[]string{},\n\t\t)\n\t\tExpect(err).To(BeNil())\n\t\tExpect(traffic).To(BeEmpty())\n\t})\n}\n<|endoftext|>"} {"text":"package utils\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\tct \"github.com\/flynn\/flynn\/controller\/types\"\n\t\"github.com\/flynn\/flynn\/discoverd\/client\"\n\t\"github.com\/flynn\/flynn\/host\/types\"\n\t\"github.com\/flynn\/flynn\/host\/volume\"\n\t\"github.com\/flynn\/flynn\/pkg\/cluster\"\n\t\"github.com\/flynn\/flynn\/pkg\/stream\"\n)\n\nfunc JobConfig(f *ct.ExpandedFormation, name, hostID string) *host.Job {\n\tt := f.Release.Processes[name]\n\tenv := make(map[string]string, len(f.Release.Env)+len(t.Env)+4)\n\tfor k, v := range f.Release.Env {\n\t\tenv[k] = v\n\t}\n\tfor k, v := range t.Env {\n\t\tenv[k] = v\n\t}\n\tid := cluster.GenerateJobID(hostID)\n\tenv[\"FLYNN_APP_ID\"] = f.App.ID\n\tenv[\"FLYNN_RELEASE_ID\"] = f.Release.ID\n\tenv[\"FLYNN_PROCESS_TYPE\"] = name\n\tenv[\"FLYNN_JOB_ID\"] = id\n\tjob := &host.Job{\n\t\tID: id,\n\t\tMetadata: map[string]string{\n\t\t\t\"flynn-controller.app\": f.App.ID,\n\t\t\t\"flynn-controller.app_name\": f.App.Name,\n\t\t\t\"flynn-controller.release\": f.Release.ID,\n\t\t\t\"flynn-controller.type\": name,\n\t\t},\n\t\tArtifact: host.Artifact{\n\t\t\tType: f.Artifact.Type,\n\t\t\tURI: f.Artifact.URI,\n\t\t},\n\t\tConfig: host.ContainerConfig{\n\t\t\tCmd: t.Cmd,\n\t\t\tEnv: env,\n\t\t\tHostNetwork: t.HostNetwork,\n\t\t},\n\t\tResurrect: t.Resurrect,\n\t\tResources: t.Resources,\n\t}\n\tif len(t.Entrypoint) > 0 {\n\t\tjob.Config.Entrypoint = t.Entrypoint\n\t}\n\tjob.Config.Ports = make([]host.Port, len(t.Ports))\n\tfor i, p := range t.Ports {\n\t\tjob.Config.Ports[i].Proto = p.Proto\n\t\tjob.Config.Ports[i].Port = p.Port\n\t\tjob.Config.Ports[i].Service = p.Service\n\t}\n\treturn job\n}\n\nfunc ProvisionVolume(h VolumeCreator, job *host.Job) error {\n\tvol, err := h.CreateVolume(\"default\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tjob.Config.Volumes = []host.VolumeBinding{{\n\t\tTarget: \"\/data\",\n\t\tVolumeID: vol.ID,\n\t\tWriteable: true,\n\t}}\n\treturn nil\n}\n\nfunc JobMetaFromMetadata(metadata map[string]string) map[string]string {\n\tjobMeta := make(map[string]string, len(metadata))\n\tfor k, v := range metadata {\n\t\tif strings.HasPrefix(k, \"flynn-controller.\") {\n\t\t\tcontinue\n\t\t}\n\t\tjobMeta[k] = v\n\t}\n\treturn jobMeta\n}\n\ntype FormationKey struct {\n\tAppID, ReleaseID string\n}\n\nfunc NewFormationKey(appID, releaseID string) FormationKey {\n\treturn FormationKey{AppID: appID, ReleaseID: releaseID}\n}\n\nfunc ExpandFormation(c ControllerClient, f *ct.Formation) (*ct.ExpandedFormation, error) {\n\tapp, err := c.GetApp(f.AppID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting app: %s\", err)\n\t}\n\n\trelease, err := c.GetRelease(f.ReleaseID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting release: %s\", err)\n\t}\n\n\tartifact, err := c.GetArtifact(release.ArtifactID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting artifact: %s\", err)\n\t}\n\n\tprocs := make(map[string]int)\n\tfor typ, count := range f.Processes {\n\t\tprocs[typ] = count\n\t}\n\n\tef := &ct.ExpandedFormation{\n\t\tApp: app,\n\t\tRelease: release,\n\t\tArtifact: artifact,\n\t\tProcesses: procs,\n\t\tUpdatedAt: time.Now(),\n\t}\n\tif f.UpdatedAt != nil {\n\t\tef.UpdatedAt = *f.UpdatedAt\n\t}\n\treturn ef, nil\n}\n\ntype VolumeCreator interface {\n\tCreateVolume(string) (*volume.Info, error)\n}\n\ntype HostClient interface {\n\tVolumeCreator\n\tID() string\n\tAddJob(*host.Job) error\n\tGetJob(id string) (*host.ActiveJob, error)\n\tAttach(*host.AttachReq, bool) (cluster.AttachClient, error)\n\tStopJob(string) error\n\tListJobs() (map[string]host.ActiveJob, error)\n\tStreamEvents(id string, ch chan *host.Event) (stream.Stream, error)\n\tGetStatus() (*host.HostStatus, error)\n}\n\ntype ClusterClient interface {\n\tHost(string) (HostClient, error)\n\tHosts() ([]HostClient, error)\n\tStreamHostEvents(chan *discoverd.Event) (stream.Stream, error)\n}\n\ntype ControllerClient interface {\n\tGetApp(appID string) (*ct.App, error)\n\tGetRelease(releaseID string) (*ct.Release, error)\n\tGetArtifact(artifactID string) (*ct.Artifact, error)\n\tGetFormation(appID, releaseID string) (*ct.Formation, error)\n\tCreateApp(app *ct.App) error\n\tCreateRelease(release *ct.Release) error\n\tCreateArtifact(artifact *ct.Artifact) error\n\tPutFormation(formation *ct.Formation) error\n\tStreamFormations(since *time.Time, ch chan<- *ct.ExpandedFormation) (stream.Stream, error)\n\tAppList() ([]*ct.App, error)\n\tFormationList(appID string) ([]*ct.Formation, error)\n\tPutJob(*ct.Job) error\n}\n\nfunc ClusterClientWrapper(c *cluster.Client) clusterClientWrapper {\n\treturn clusterClientWrapper{c}\n}\n\ntype clusterClientWrapper struct {\n\t*cluster.Client\n}\n\nfunc (c clusterClientWrapper) Host(id string) (HostClient, error) {\n\treturn c.Client.Host(id)\n}\n\nfunc (c clusterClientWrapper) Hosts() ([]HostClient, error) {\n\thosts, err := c.Client.Hosts()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres := make([]HostClient, len(hosts))\n\tfor i, h := range hosts {\n\t\tres[i] = h\n\t}\n\treturn res, nil\n}\n\nfunc (c clusterClientWrapper) StreamHostEvents(ch chan *discoverd.Event) (stream.Stream, error) {\n\treturn c.Client.StreamHostEvents(ch)\n}\n\nvar AppNamePattern = regexp.MustCompile(`^[a-z\\d]+(-[a-z\\d]+)*$`)\n\nfunc ParseBasicAuth(h http.Header) (username, password string, err error) {\n\ts := strings.SplitN(h.Get(\"Authorization\"), \" \", 2)\n\n\tif len(s) != 2 {\n\t\treturn \"\", \"\", errors.New(\"failed to parse authentication string\")\n\t}\n\tif s[0] != \"Basic\" {\n\t\treturn \"\", \"\", fmt.Errorf(\"authorization scheme is %v, not Basic\", s[0])\n\t}\n\n\tc, err := base64.StdEncoding.DecodeString(s[1])\n\tif err != nil {\n\t\treturn \"\", \"\", errors.New(\"failed to parse base64 basic credentials\")\n\t}\n\n\ts = strings.SplitN(string(c), \":\", 2)\n\tif len(s) != 2 {\n\t\treturn \"\", \"\", errors.New(\"failed to parse basic credentials\")\n\t}\n\n\treturn s[0], s[1], nil\n}\ncontroller: Inject Flynn app name to the apps ENV varspackage utils\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\tct \"github.com\/flynn\/flynn\/controller\/types\"\n\t\"github.com\/flynn\/flynn\/discoverd\/client\"\n\t\"github.com\/flynn\/flynn\/host\/types\"\n\t\"github.com\/flynn\/flynn\/host\/volume\"\n\t\"github.com\/flynn\/flynn\/pkg\/cluster\"\n\t\"github.com\/flynn\/flynn\/pkg\/stream\"\n)\n\nfunc JobConfig(f *ct.ExpandedFormation, name, hostID string) *host.Job {\n\tt := f.Release.Processes[name]\n\tenv := make(map[string]string, len(f.Release.Env)+len(t.Env)+4)\n\tfor k, v := range f.Release.Env {\n\t\tenv[k] = v\n\t}\n\tfor k, v := range t.Env {\n\t\tenv[k] = v\n\t}\n\tid := cluster.GenerateJobID(hostID)\n\tenv[\"FLYNN_APP_ID\"] = f.App.ID\n\tenv[\"FLYNN_APP_NAME\"] = f.App.Name\n\tenv[\"FLYNN_RELEASE_ID\"] = f.Release.ID\n\tenv[\"FLYNN_PROCESS_TYPE\"] = name\n\tenv[\"FLYNN_JOB_ID\"] = id\n\tjob := &host.Job{\n\t\tID: id,\n\t\tMetadata: map[string]string{\n\t\t\t\"flynn-controller.app\": f.App.ID,\n\t\t\t\"flynn-controller.app_name\": f.App.Name,\n\t\t\t\"flynn-controller.release\": f.Release.ID,\n\t\t\t\"flynn-controller.type\": name,\n\t\t},\n\t\tArtifact: host.Artifact{\n\t\t\tType: f.Artifact.Type,\n\t\t\tURI: f.Artifact.URI,\n\t\t},\n\t\tConfig: host.ContainerConfig{\n\t\t\tCmd: t.Cmd,\n\t\t\tEnv: env,\n\t\t\tHostNetwork: t.HostNetwork,\n\t\t},\n\t\tResurrect: t.Resurrect,\n\t\tResources: t.Resources,\n\t}\n\tif len(t.Entrypoint) > 0 {\n\t\tjob.Config.Entrypoint = t.Entrypoint\n\t}\n\tjob.Config.Ports = make([]host.Port, len(t.Ports))\n\tfor i, p := range t.Ports {\n\t\tjob.Config.Ports[i].Proto = p.Proto\n\t\tjob.Config.Ports[i].Port = p.Port\n\t\tjob.Config.Ports[i].Service = p.Service\n\t}\n\treturn job\n}\n\nfunc ProvisionVolume(h VolumeCreator, job *host.Job) error {\n\tvol, err := h.CreateVolume(\"default\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tjob.Config.Volumes = []host.VolumeBinding{{\n\t\tTarget: \"\/data\",\n\t\tVolumeID: vol.ID,\n\t\tWriteable: true,\n\t}}\n\treturn nil\n}\n\nfunc JobMetaFromMetadata(metadata map[string]string) map[string]string {\n\tjobMeta := make(map[string]string, len(metadata))\n\tfor k, v := range metadata {\n\t\tif strings.HasPrefix(k, \"flynn-controller.\") {\n\t\t\tcontinue\n\t\t}\n\t\tjobMeta[k] = v\n\t}\n\treturn jobMeta\n}\n\ntype FormationKey struct {\n\tAppID, ReleaseID string\n}\n\nfunc NewFormationKey(appID, releaseID string) FormationKey {\n\treturn FormationKey{AppID: appID, ReleaseID: releaseID}\n}\n\nfunc ExpandFormation(c ControllerClient, f *ct.Formation) (*ct.ExpandedFormation, error) {\n\tapp, err := c.GetApp(f.AppID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting app: %s\", err)\n\t}\n\n\trelease, err := c.GetRelease(f.ReleaseID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting release: %s\", err)\n\t}\n\n\tartifact, err := c.GetArtifact(release.ArtifactID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting artifact: %s\", err)\n\t}\n\n\tprocs := make(map[string]int)\n\tfor typ, count := range f.Processes {\n\t\tprocs[typ] = count\n\t}\n\n\tef := &ct.ExpandedFormation{\n\t\tApp: app,\n\t\tRelease: release,\n\t\tArtifact: artifact,\n\t\tProcesses: procs,\n\t\tUpdatedAt: time.Now(),\n\t}\n\tif f.UpdatedAt != nil {\n\t\tef.UpdatedAt = *f.UpdatedAt\n\t}\n\treturn ef, nil\n}\n\ntype VolumeCreator interface {\n\tCreateVolume(string) (*volume.Info, error)\n}\n\ntype HostClient interface {\n\tVolumeCreator\n\tID() string\n\tAddJob(*host.Job) error\n\tGetJob(id string) (*host.ActiveJob, error)\n\tAttach(*host.AttachReq, bool) (cluster.AttachClient, error)\n\tStopJob(string) error\n\tListJobs() (map[string]host.ActiveJob, error)\n\tStreamEvents(id string, ch chan *host.Event) (stream.Stream, error)\n\tGetStatus() (*host.HostStatus, error)\n}\n\ntype ClusterClient interface {\n\tHost(string) (HostClient, error)\n\tHosts() ([]HostClient, error)\n\tStreamHostEvents(chan *discoverd.Event) (stream.Stream, error)\n}\n\ntype ControllerClient interface {\n\tGetApp(appID string) (*ct.App, error)\n\tGetRelease(releaseID string) (*ct.Release, error)\n\tGetArtifact(artifactID string) (*ct.Artifact, error)\n\tGetFormation(appID, releaseID string) (*ct.Formation, error)\n\tCreateApp(app *ct.App) error\n\tCreateRelease(release *ct.Release) error\n\tCreateArtifact(artifact *ct.Artifact) error\n\tPutFormation(formation *ct.Formation) error\n\tStreamFormations(since *time.Time, ch chan<- *ct.ExpandedFormation) (stream.Stream, error)\n\tAppList() ([]*ct.App, error)\n\tFormationList(appID string) ([]*ct.Formation, error)\n\tPutJob(*ct.Job) error\n}\n\nfunc ClusterClientWrapper(c *cluster.Client) clusterClientWrapper {\n\treturn clusterClientWrapper{c}\n}\n\ntype clusterClientWrapper struct {\n\t*cluster.Client\n}\n\nfunc (c clusterClientWrapper) Host(id string) (HostClient, error) {\n\treturn c.Client.Host(id)\n}\n\nfunc (c clusterClientWrapper) Hosts() ([]HostClient, error) {\n\thosts, err := c.Client.Hosts()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres := make([]HostClient, len(hosts))\n\tfor i, h := range hosts {\n\t\tres[i] = h\n\t}\n\treturn res, nil\n}\n\nfunc (c clusterClientWrapper) StreamHostEvents(ch chan *discoverd.Event) (stream.Stream, error) {\n\treturn c.Client.StreamHostEvents(ch)\n}\n\nvar AppNamePattern = regexp.MustCompile(`^[a-z\\d]+(-[a-z\\d]+)*$`)\n\nfunc ParseBasicAuth(h http.Header) (username, password string, err error) {\n\ts := strings.SplitN(h.Get(\"Authorization\"), \" \", 2)\n\n\tif len(s) != 2 {\n\t\treturn \"\", \"\", errors.New(\"failed to parse authentication string\")\n\t}\n\tif s[0] != \"Basic\" {\n\t\treturn \"\", \"\", fmt.Errorf(\"authorization scheme is %v, not Basic\", s[0])\n\t}\n\n\tc, err := base64.StdEncoding.DecodeString(s[1])\n\tif err != nil {\n\t\treturn \"\", \"\", errors.New(\"failed to parse base64 basic credentials\")\n\t}\n\n\ts = strings.SplitN(string(c), \":\", 2)\n\tif len(s) != 2 {\n\t\treturn \"\", \"\", errors.New(\"failed to parse basic credentials\")\n\t}\n\n\treturn s[0], s[1], nil\n}\n<|endoftext|>"} {"text":"package etcdhttp\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.net\/context\"\n\t\"github.com\/coreos\/etcd\/elog\"\n\tetcdserver \"github.com\/coreos\/etcd\/etcdserver2\"\n\t\"github.com\/coreos\/etcd\/raft\"\n\t\"github.com\/coreos\/etcd\/store\"\n)\n\nvar errClosed = errors.New(\"etcdhttp: client closed connection\")\n\nconst DefaultTimeout = 500 * time.Millisecond\n\ntype Handler struct {\n\tTimeout time.Duration\n\tServer etcdserver.Server\n}\n\nfunc (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t\/\/ TODO: set read\/write timeout?\n\n\ttimeout := h.Timeout\n\tif timeout == 0 {\n\t\ttimeout = DefaultTimeout\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\n\tswitch {\n\tcase strings.HasPrefix(r.URL.Path, \"\/raft\"):\n\t\th.serveRaft(ctx, w, r)\n\tcase strings.HasPrefix(r.URL.Path, \"\/keys\/\"):\n\t\th.serveKeys(ctx, w, r)\n\tdefault:\n\t\thttp.NotFound(w, r)\n\t}\n}\n\nfunc (h Handler) serveKeys(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\trr, err := parseRequest(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 400)\n\t\treturn\n\t}\n\n\tresp, err := h.Server.Do(ctx, rr)\n\tif err != nil {\n\t\t\/\/ TODO(bmizerany): switch on store errors and etcdserver.ErrUnknownMethod\n\t\tpanic(\"TODO\")\n\t}\n\n\tif err := encodeResponse(ctx, w, resp); err != nil {\n\t\thttp.Error(w, \"Timeout while waiting for response\", 504)\n\t}\n}\n\nfunc (h Handler) serveRaft(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\tb, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\telog.TODO()\n\t}\n\tvar m raft.Message\n\tif err := m.Unmarshal(b); err != nil {\n\t\telog.TODO()\n\t}\n\tif err := h.Server.Node.Step(ctx, m); err != nil {\n\t\telog.TODO()\n\t}\n}\n\nfunc parseRequest(r *http.Request) (etcdserver.Request, error) {\n\treturn etcdserver.Request{}, nil\n}\n\nfunc encodeResponse(ctx context.Context, w http.ResponseWriter, resp etcdserver.Response) (err error) {\n\tvar ev *store.Event\n\tswitch {\n\tcase resp.Event != nil:\n\t\tev = resp.Event\n\tcase resp.Watcher != nil:\n\t\tev, err = waitForEvent(ctx, w, resp.Watcher)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\tpanic(\"should not be reachable\")\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Header().Add(\"X-Etcd-Index\", fmt.Sprint(ev.Index()))\n\n\tif ev.IsCreated() {\n\t\tw.WriteHeader(http.StatusCreated)\n\t} else {\n\t\tw.WriteHeader(http.StatusOK)\n\t}\n\n\tif err := json.NewEncoder(w).Encode(ev); err != nil {\n\t\tpanic(err) \/\/ should never be reached\n\t}\n\treturn nil\n}\n\nfunc waitForEvent(ctx context.Context, w http.ResponseWriter, wa *store.Watcher) (*store.Event, error) {\n\t\/\/ TODO(bmizerany): support streaming?\n\tdefer wa.Remove()\n\tvar nch <-chan bool\n\tif x, ok := w.(http.CloseNotifier); ok {\n\t\tnch = x.CloseNotify()\n\t}\n\n\tselect {\n\tcase ev := <-wa.EventChan:\n\t\treturn ev, nil\n\tcase <-nch:\n\t\telog.TODO()\n\t\treturn nil, errClosed\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\t}\n}\netcdserver\/etcdhttp: parseRequestpackage etcdhttp\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.net\/context\"\n\t\"github.com\/coreos\/etcd\/elog\"\n\tetcdserver \"github.com\/coreos\/etcd\/etcdserver2\"\n\t\"github.com\/coreos\/etcd\/raft\"\n\t\"github.com\/coreos\/etcd\/store\"\n)\n\nvar errClosed = errors.New(\"etcdhttp: client closed connection\")\n\nconst DefaultTimeout = 500 * time.Millisecond\n\ntype Handler struct {\n\tTimeout time.Duration\n\tServer etcdserver.Server\n}\n\nfunc (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t\/\/ TODO: set read\/write timeout?\n\n\ttimeout := h.Timeout\n\tif timeout == 0 {\n\t\ttimeout = DefaultTimeout\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\n\tswitch {\n\tcase strings.HasPrefix(r.URL.Path, \"\/raft\"):\n\t\th.serveRaft(ctx, w, r)\n\tcase strings.HasPrefix(r.URL.Path, \"\/keys\/\"):\n\t\th.serveKeys(ctx, w, r)\n\tdefault:\n\t\thttp.NotFound(w, r)\n\t}\n}\n\nfunc (h Handler) serveKeys(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\trr, err := parseRequest(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 400)\n\t\treturn\n\t}\n\n\tresp, err := h.Server.Do(ctx, rr)\n\tif err != nil {\n\t\t\/\/ TODO(bmizerany): switch on store errors and etcdserver.ErrUnknownMethod\n\t\tpanic(\"TODO\")\n\t}\n\n\tif err := encodeResponse(ctx, w, resp); err != nil {\n\t\thttp.Error(w, \"Timeout while waiting for response\", 504)\n\t}\n}\n\nfunc (h Handler) serveRaft(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\tb, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\telog.TODO()\n\t}\n\tvar m raft.Message\n\tif err := m.Unmarshal(b); err != nil {\n\t\telog.TODO()\n\t}\n\tif err := h.Server.Node.Step(ctx, m); err != nil {\n\t\telog.TODO()\n\t}\n}\n\nfunc genId() int64 {\n\tpanic(\"implement me\")\n}\n\nfunc parseRequest(r *http.Request) (etcdserver.Request, error) {\n\tq := r.URL.Query()\n\trr := etcdserver.Request{\n\t\tId: genId(),\n\t\tMethod: r.Method,\n\t\tPath: r.URL.Path[len(\"\/keys\/\"):],\n\t\tVal: q.Get(\"value\"),\n\t\tPrevValue: q.Get(\"prevValue\"),\n\t\tPrevIndex: parseUint64(q.Get(\"prevIndex\")),\n\t\tRecursive: parseBool(q.Get(\"recursive\")),\n\t\tSince: parseUint64(q.Get(\"waitIndex\")),\n\t\tSorted: parseBool(q.Get(\"sorted\")),\n\t\tWait: parseBool(q.Get(\"wait\")),\n\t}\n\n\t\/\/ PrevExists is nullable, so we leave it null if prevExist wasn't\n\t\/\/ specified.\n\t_, ok := q[\"wait\"]\n\tif ok {\n\t\tbv := parseBool(q.Get(\"wait\"))\n\t\trr.PrevExists = &bv\n\t}\n\n\tttl := parseUint64(q.Get(\"ttl\"))\n\tif ttl > 0 {\n\t\texpr := time.Duration(ttl) * time.Second\n\t\trr.Expiration = time.Now().Add(expr).UnixNano()\n\t}\n\n\treturn rr, nil\n}\n\nfunc parseBool(s string) bool {\n\tv, _ := strconv.ParseBool(s)\n\treturn v\n}\n\nfunc parseUint64(s string) uint64 {\n\tv, _ := strconv.ParseUint(s, 10, 64)\n\treturn v\n}\n\nfunc encodeResponse(ctx context.Context, w http.ResponseWriter, resp etcdserver.Response) (err error) {\n\tvar ev *store.Event\n\tswitch {\n\tcase resp.Event != nil:\n\t\tev = resp.Event\n\tcase resp.Watcher != nil:\n\t\tev, err = waitForEvent(ctx, w, resp.Watcher)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\tpanic(\"should not be reachable\")\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Header().Add(\"X-Etcd-Index\", fmt.Sprint(ev.Index()))\n\n\tif ev.IsCreated() {\n\t\tw.WriteHeader(http.StatusCreated)\n\t} else {\n\t\tw.WriteHeader(http.StatusOK)\n\t}\n\n\tif err := json.NewEncoder(w).Encode(ev); err != nil {\n\t\tpanic(err) \/\/ should never be reached\n\t}\n\treturn nil\n}\n\nfunc waitForEvent(ctx context.Context, w http.ResponseWriter, wa *store.Watcher) (*store.Event, error) {\n\t\/\/ TODO(bmizerany): support streaming?\n\tdefer wa.Remove()\n\tvar nch <-chan bool\n\tif x, ok := w.(http.CloseNotifier); ok {\n\t\tnch = x.CloseNotify()\n\t}\n\n\tselect {\n\tcase ev := <-wa.EventChan:\n\t\treturn ev, nil\n\tcase <-nch:\n\t\telog.TODO()\n\t\treturn nil, errClosed\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\t}\n}\n<|endoftext|>"} {"text":"package ethchain\n\nimport (\n\t\"fmt\"\n\t\"github.com\/ethereum\/eth-go\/ethtrie\"\n\t\"github.com\/ethereum\/eth-go\/ethutil\"\n\t\"math\/big\"\n)\n\n\/*\n * The State transitioning model\n *\n * A state transition is a change made when a transaction is applied to the current world state\n * The state transitioning model does all all the necessary work to work out a valid new state root.\n * 1) Nonce handling\n * 2) Pre pay \/ buy gas of the coinbase (miner)\n * 3) Create a new state object if the recipient is \\0*32\n * 4) Value transfer\n * == If contract creation ==\n * 4a) Attempt to run transaction data\n * 4b) If valid, use result as code for the new state object\n * == end ==\n * 5) Run Script section\n * 6) Derive new state root\n *\/\ntype StateTransition struct {\n\tcoinbase, receiver []byte\n\ttx *Transaction\n\tgas, gasPrice *big.Int\n\tvalue *big.Int\n\tdata []byte\n\tstate *State\n\tblock *Block\n\n\tcb, rec, sen *StateObject\n}\n\nfunc NewStateTransition(coinbase *StateObject, tx *Transaction, state *State, block *Block) *StateTransition {\n\treturn &StateTransition{coinbase.Address(), tx.Recipient, tx, new(big.Int), new(big.Int).Set(tx.GasPrice), tx.Value, tx.Data, state, block, coinbase, nil, nil}\n}\n\nfunc (self *StateTransition) Coinbase() *StateObject {\n\tif self.cb != nil {\n\t\treturn self.cb\n\t}\n\n\tself.cb = self.state.GetAccount(self.coinbase)\n\treturn self.cb\n}\nfunc (self *StateTransition) Sender() *StateObject {\n\tif self.sen != nil {\n\t\treturn self.sen\n\t}\n\n\tself.sen = self.state.GetAccount(self.tx.Sender())\n\n\treturn self.sen\n}\nfunc (self *StateTransition) Receiver() *StateObject {\n\tif self.tx != nil && self.tx.CreatesContract() {\n\t\treturn nil\n\t}\n\n\tif self.rec != nil {\n\t\treturn self.rec\n\t}\n\n\tself.rec = self.state.GetAccount(self.tx.Recipient)\n\treturn self.rec\n}\n\nfunc (self *StateTransition) MakeStateObject(state *State, tx *Transaction) *StateObject {\n\tcontract := MakeContract(tx, state)\n\n\treturn contract\n}\n\nfunc (self *StateTransition) UseGas(amount *big.Int) error {\n\tif self.gas.Cmp(amount) < 0 {\n\t\treturn OutOfGasError()\n\t}\n\tself.gas.Sub(self.gas, amount)\n\n\treturn nil\n}\n\nfunc (self *StateTransition) AddGas(amount *big.Int) {\n\tself.gas.Add(self.gas, amount)\n}\n\nfunc (self *StateTransition) BuyGas() error {\n\tvar err error\n\n\tsender := self.Sender()\n\tif sender.Amount.Cmp(self.tx.GasValue()) < 0 {\n\t\treturn fmt.Errorf(\"Insufficient funds to pre-pay gas. Req %v, has %v\", self.tx.GasValue(), sender.Amount)\n\t}\n\n\tcoinbase := self.Coinbase()\n\terr = coinbase.BuyGas(self.tx.Gas, self.tx.GasPrice)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tself.AddGas(self.tx.Gas)\n\tsender.SubAmount(self.tx.GasValue())\n\n\treturn nil\n}\n\nfunc (self *StateTransition) RefundGas() {\n\tcoinbase, sender := self.Coinbase(), self.Sender()\n\tcoinbase.RefundGas(self.gas, self.tx.GasPrice)\n\n\t\/\/ Return remaining gas\n\tremaining := new(big.Int).Mul(self.gas, self.tx.GasPrice)\n\tsender.AddAmount(remaining)\n}\n\nfunc (self *StateTransition) preCheck() (err error) {\n\tvar (\n\t\ttx = self.tx\n\t\tsender = self.Sender()\n\t)\n\n\t\/\/ Make sure this transaction's nonce is correct\n\tif sender.Nonce != tx.Nonce {\n\t\treturn NonceError(tx.Nonce, sender.Nonce)\n\t}\n\n\t\/\/ Pre-pay gas \/ Buy gas of the coinbase account\n\tif err = self.BuyGas(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (self *StateTransition) TransitionState() (err error) {\n\tstatelogger.Infof(\"(~) %x\\n\", self.tx.Hash())\n\n\t\/*\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tlogger.Infoln(r)\n\t\t\t\terr = fmt.Errorf(\"state transition err %v\", r)\n\t\t\t}\n\t\t}()\n\t*\/\n\n\t\/\/ XXX Transactions after this point are considered valid.\n\tif err = self.preCheck(); err != nil {\n\t\treturn\n\t}\n\n\tvar (\n\t\ttx = self.tx\n\t\tsender = self.Sender()\n\t\treceiver *StateObject\n\t)\n\n\tdefer self.RefundGas()\n\n\t\/\/ Increment the nonce for the next transaction\n\tsender.Nonce += 1\n\n\t\/\/ Transaction gas\n\tif err = self.UseGas(GasTx); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Pay data gas\n\tdataPrice := big.NewInt(int64(len(self.data)))\n\tdataPrice.Mul(dataPrice, GasData)\n\tif err = self.UseGas(dataPrice); err != nil {\n\t\treturn\n\t}\n\n\t\/* FIXME\n\t * If tx goes TO \"0\", goes OOG during init, reverse changes, but initial endowment should happen. The ether is lost forever\n\t *\/\n\tvar snapshot *State\n\n\t\/\/ If the receiver is nil it's a contract (\\0*32).\n\tif tx.CreatesContract() {\n\t\tsnapshot = self.state.Copy()\n\n\t\t\/\/ Create a new state object for the contract\n\t\treceiver = self.MakeStateObject(self.state, tx)\n\t\tself.rec = receiver\n\t\tif receiver == nil {\n\t\t\treturn fmt.Errorf(\"Unable to create contract\")\n\t\t}\n\t} else {\n\t\treceiver = self.Receiver()\n\t}\n\n\t\/\/ Transfer value from sender to receiver\n\tif err = self.transferValue(sender, receiver); err != nil {\n\t\treturn\n\t}\n\n\tif snapshot == nil {\n\t\tsnapshot = self.state.Copy()\n\t}\n\n\t\/\/ Process the init code and create 'valid' contract\n\tif IsContractAddr(self.receiver) {\n\t\t\/\/ Evaluate the initialization script\n\t\t\/\/ and use the return value as the\n\t\t\/\/ script section for the state object.\n\t\tself.data = nil\n\n\t\tcode, err := self.Eval(receiver.Init(), receiver, \"init\")\n\t\tif err != nil {\n\t\t\tself.state.Set(snapshot)\n\n\t\t\treturn fmt.Errorf(\"Error during init execution %v\", err)\n\t\t}\n\n\t\treceiver.script = code\n\t} else {\n\t\tif len(receiver.Script()) > 0 {\n\t\t\t_, err = self.Eval(receiver.Script(), receiver, \"code\")\n\t\t\tif err != nil {\n\t\t\t\tself.state.Set(snapshot)\n\n\t\t\t\treturn fmt.Errorf(\"Error during code execution %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (self *StateTransition) transferValue(sender, receiver *StateObject) error {\n\tif sender.Amount.Cmp(self.value) < 0 {\n\t\treturn fmt.Errorf(\"Insufficient funds to transfer value. Req %v, has %v\", self.value, sender.Amount)\n\t}\n\n\t\/\/ Subtract the amount from the senders account\n\tsender.SubAmount(self.value)\n\t\/\/ Add the amount to receivers account which should conclude this transaction\n\treceiver.AddAmount(self.value)\n\n\treturn nil\n}\n\nfunc (self *StateTransition) Eval(script []byte, context *StateObject, typ string) (ret []byte, err error) {\n\tvar (\n\t\tblock = self.block\n\t\tinitiator = self.Sender()\n\t\tstate = self.state\n\t)\n\n\tclosure := NewClosure(initiator, context, script, state, self.gas, self.gasPrice)\n\tvm := NewVm(state, nil, RuntimeVars{\n\t\tOrigin: initiator.Address(),\n\t\tBlock: block,\n\t\tBlockNumber: block.Number,\n\t\tPrevHash: block.PrevHash,\n\t\tCoinbase: block.Coinbase,\n\t\tTime: block.Time,\n\t\tDiff: block.Difficulty,\n\t\tValue: self.value,\n\t})\n\tvm.Verbose = true\n\tvm.Fn = typ\n\n\tret, err = Call(vm, closure, self.data)\n\n\treturn\n}\n\nfunc Call(vm *Vm, closure *Closure, data []byte) (ret []byte, err error) {\n\tret, _, err = closure.Call(vm, data)\n\n\tif ethutil.Config.Paranoia {\n\t\tvar (\n\t\t\tcontext = closure.object\n\t\t\ttrie = context.state.trie\n\t\t)\n\n\t\tvalid, t2 := ethtrie.ParanoiaCheck(trie)\n\t\tif !valid {\n\t\t\t\/\/ TODO FIXME ASAP\n\t\t\tcontext.state.trie = t2\n\n\t\t\tstatelogger.Infoln(\"Warn: PARANOIA: Different state object roots during copy\")\n\t\t}\n\t}\n\n\treturn\n}\nParanoia check movedpackage ethchain\n\nimport (\n\t\"fmt\"\n\t\"math\/big\"\n)\n\n\/*\n * The State transitioning model\n *\n * A state transition is a change made when a transaction is applied to the current world state\n * The state transitioning model does all all the necessary work to work out a valid new state root.\n * 1) Nonce handling\n * 2) Pre pay \/ buy gas of the coinbase (miner)\n * 3) Create a new state object if the recipient is \\0*32\n * 4) Value transfer\n * == If contract creation ==\n * 4a) Attempt to run transaction data\n * 4b) If valid, use result as code for the new state object\n * == end ==\n * 5) Run Script section\n * 6) Derive new state root\n *\/\ntype StateTransition struct {\n\tcoinbase, receiver []byte\n\ttx *Transaction\n\tgas, gasPrice *big.Int\n\tvalue *big.Int\n\tdata []byte\n\tstate *State\n\tblock *Block\n\n\tcb, rec, sen *StateObject\n}\n\nfunc NewStateTransition(coinbase *StateObject, tx *Transaction, state *State, block *Block) *StateTransition {\n\treturn &StateTransition{coinbase.Address(), tx.Recipient, tx, new(big.Int), new(big.Int).Set(tx.GasPrice), tx.Value, tx.Data, state, block, coinbase, nil, nil}\n}\n\nfunc (self *StateTransition) Coinbase() *StateObject {\n\tif self.cb != nil {\n\t\treturn self.cb\n\t}\n\n\tself.cb = self.state.GetAccount(self.coinbase)\n\treturn self.cb\n}\nfunc (self *StateTransition) Sender() *StateObject {\n\tif self.sen != nil {\n\t\treturn self.sen\n\t}\n\n\tself.sen = self.state.GetAccount(self.tx.Sender())\n\n\treturn self.sen\n}\nfunc (self *StateTransition) Receiver() *StateObject {\n\tif self.tx != nil && self.tx.CreatesContract() {\n\t\treturn nil\n\t}\n\n\tif self.rec != nil {\n\t\treturn self.rec\n\t}\n\n\tself.rec = self.state.GetAccount(self.tx.Recipient)\n\treturn self.rec\n}\n\nfunc (self *StateTransition) MakeStateObject(state *State, tx *Transaction) *StateObject {\n\tcontract := MakeContract(tx, state)\n\n\treturn contract\n}\n\nfunc (self *StateTransition) UseGas(amount *big.Int) error {\n\tif self.gas.Cmp(amount) < 0 {\n\t\treturn OutOfGasError()\n\t}\n\tself.gas.Sub(self.gas, amount)\n\n\treturn nil\n}\n\nfunc (self *StateTransition) AddGas(amount *big.Int) {\n\tself.gas.Add(self.gas, amount)\n}\n\nfunc (self *StateTransition) BuyGas() error {\n\tvar err error\n\n\tsender := self.Sender()\n\tif sender.Amount.Cmp(self.tx.GasValue()) < 0 {\n\t\treturn fmt.Errorf(\"Insufficient funds to pre-pay gas. Req %v, has %v\", self.tx.GasValue(), sender.Amount)\n\t}\n\n\tcoinbase := self.Coinbase()\n\terr = coinbase.BuyGas(self.tx.Gas, self.tx.GasPrice)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tself.AddGas(self.tx.Gas)\n\tsender.SubAmount(self.tx.GasValue())\n\n\treturn nil\n}\n\nfunc (self *StateTransition) RefundGas() {\n\tcoinbase, sender := self.Coinbase(), self.Sender()\n\tcoinbase.RefundGas(self.gas, self.tx.GasPrice)\n\n\t\/\/ Return remaining gas\n\tremaining := new(big.Int).Mul(self.gas, self.tx.GasPrice)\n\tsender.AddAmount(remaining)\n}\n\nfunc (self *StateTransition) preCheck() (err error) {\n\tvar (\n\t\ttx = self.tx\n\t\tsender = self.Sender()\n\t)\n\n\t\/\/ Make sure this transaction's nonce is correct\n\tif sender.Nonce != tx.Nonce {\n\t\treturn NonceError(tx.Nonce, sender.Nonce)\n\t}\n\n\t\/\/ Pre-pay gas \/ Buy gas of the coinbase account\n\tif err = self.BuyGas(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (self *StateTransition) TransitionState() (err error) {\n\tstatelogger.Infof(\"(~) %x\\n\", self.tx.Hash())\n\n\t\/*\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tlogger.Infoln(r)\n\t\t\t\terr = fmt.Errorf(\"state transition err %v\", r)\n\t\t\t}\n\t\t}()\n\t*\/\n\n\t\/\/ XXX Transactions after this point are considered valid.\n\tif err = self.preCheck(); err != nil {\n\t\treturn\n\t}\n\n\tvar (\n\t\ttx = self.tx\n\t\tsender = self.Sender()\n\t\treceiver *StateObject\n\t)\n\n\tdefer self.RefundGas()\n\n\t\/\/ Increment the nonce for the next transaction\n\tsender.Nonce += 1\n\n\t\/\/ Transaction gas\n\tif err = self.UseGas(GasTx); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Pay data gas\n\tdataPrice := big.NewInt(int64(len(self.data)))\n\tdataPrice.Mul(dataPrice, GasData)\n\tif err = self.UseGas(dataPrice); err != nil {\n\t\treturn\n\t}\n\n\t\/* FIXME\n\t * If tx goes TO \"0\", goes OOG during init, reverse changes, but initial endowment should happen. The ether is lost forever\n\t *\/\n\tvar snapshot *State\n\n\t\/\/ If the receiver is nil it's a contract (\\0*32).\n\tif tx.CreatesContract() {\n\t\tsnapshot = self.state.Copy()\n\n\t\t\/\/ Create a new state object for the contract\n\t\treceiver = self.MakeStateObject(self.state, tx)\n\t\tself.rec = receiver\n\t\tif receiver == nil {\n\t\t\treturn fmt.Errorf(\"Unable to create contract\")\n\t\t}\n\t} else {\n\t\treceiver = self.Receiver()\n\t}\n\n\t\/\/ Transfer value from sender to receiver\n\tif err = self.transferValue(sender, receiver); err != nil {\n\t\treturn\n\t}\n\n\tif snapshot == nil {\n\t\tsnapshot = self.state.Copy()\n\t}\n\n\t\/\/ Process the init code and create 'valid' contract\n\tif IsContractAddr(self.receiver) {\n\t\t\/\/ Evaluate the initialization script\n\t\t\/\/ and use the return value as the\n\t\t\/\/ script section for the state object.\n\t\tself.data = nil\n\n\t\tcode, err := self.Eval(receiver.Init(), receiver, \"init\")\n\t\tif err != nil {\n\t\t\tself.state.Set(snapshot)\n\n\t\t\treturn fmt.Errorf(\"Error during init execution %v\", err)\n\t\t}\n\n\t\treceiver.script = code\n\t} else {\n\t\tif len(receiver.Script()) > 0 {\n\t\t\t_, err = self.Eval(receiver.Script(), receiver, \"code\")\n\t\t\tif err != nil {\n\t\t\t\tself.state.Set(snapshot)\n\n\t\t\t\treturn fmt.Errorf(\"Error during code execution %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (self *StateTransition) transferValue(sender, receiver *StateObject) error {\n\tif sender.Amount.Cmp(self.value) < 0 {\n\t\treturn fmt.Errorf(\"Insufficient funds to transfer value. Req %v, has %v\", self.value, sender.Amount)\n\t}\n\n\t\/\/ Subtract the amount from the senders account\n\tsender.SubAmount(self.value)\n\t\/\/ Add the amount to receivers account which should conclude this transaction\n\treceiver.AddAmount(self.value)\n\n\treturn nil\n}\n\nfunc (self *StateTransition) Eval(script []byte, context *StateObject, typ string) (ret []byte, err error) {\n\tvar (\n\t\tblock = self.block\n\t\tinitiator = self.Sender()\n\t\tstate = self.state\n\t)\n\n\tclosure := NewClosure(initiator, context, script, state, self.gas, self.gasPrice)\n\tvm := NewVm(state, nil, RuntimeVars{\n\t\tOrigin: initiator.Address(),\n\t\tBlock: block,\n\t\tBlockNumber: block.Number,\n\t\tPrevHash: block.PrevHash,\n\t\tCoinbase: block.Coinbase,\n\t\tTime: block.Time,\n\t\tDiff: block.Difficulty,\n\t\tValue: self.value,\n\t})\n\tvm.Verbose = true\n\tvm.Fn = typ\n\n\tret, err = Call(vm, closure, self.data)\n\n\treturn\n}\n\nfunc Call(vm *Vm, closure *Closure, data []byte) (ret []byte, err error) {\n\tret, _, err = closure.Call(vm, data)\n\n\treturn\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2016 Richard Hawkins\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ Package app manages the main game loop.\n\npackage main\n\nimport (\n\t_ \"image\/png\"\n\t\"log\"\n\t\"runtime\"\n\n\t\"github.com\/go-gl\/gl\/v4.1-core\/gl\"\n\t\"github.com\/go-gl\/glfw\/v3.1\/glfw\"\n\t\"github.com\/go-gl\/mathgl\/mgl32\"\n\t\"github.com\/hurricanerix\/shade\/display\"\n\t\"github.com\/hurricanerix\/shade\/events\"\n\t\"github.com\/hurricanerix\/shade\/light\"\n\t\"github.com\/hurricanerix\/shade\/sprite\"\n)\n\nconst windowWidth = 640\nconst windowHeight = 480\n\nfunc init() {\n\t\/\/ GLFW event handling must run on the main OS thread\n\truntime.LockOSThread()\n}\n\nfunc main() {\n\tscreen, err := display.SetMode(\"03-lighting\", windowWidth, windowHeight)\n\tif err != nil {\n\t\tlog.Fatalln(\"failed to set display mode:\", err)\n\t}\n\tambientColor := mgl32.Vec4{0.2, 0.2, 0.2, 1.0}\n\n\tface, err := loadSprite(\"color.png\", \"normal.png\", 1, 1)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tface.Bind(screen.Program)\n\n\tlight := light.Positional{\n\t\tPos: mgl32.Vec3{0.5, 0.5, 1.0},\n\t\tColor: mgl32.Vec4{0.8, 0.8, 1.0, 1.0},\n\t\tPower: 1000,\n\t}\n\n\tfor running := true; running; {\n\t\tscreen.Fill(0.0, 0.0, 0.0)\n\n\t\t\/\/ TODO move this somewhere else (maybe a Clear method of display\n\t\tgl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)\n\n\t\t\/\/ TODO refector events to be cleaner\n\t\tif screen.Window.ShouldClose() {\n\t\t\trunning = !screen.Window.ShouldClose()\n\t\t}\n\n\t\tfor _, event := range events.Get() {\n\t\t\tif event.KeyEvent && event.Action == glfw.Press && event.Key == glfw.KeyEscape {\n\t\t\t\trunning = false\n\t\t\t\tevent.Window.SetShouldClose(true)\n\t\t\t}\n\t\t\tif !event.KeyEvent {\n\t\t\t\tlight.Pos[0] = event.X\n\t\t\t\tlight.Pos[1] = float32(windowHeight) - event.Y\n\t\t\t}\n\t\t}\n\n\t\tpos := mgl32.Vec3{\n\t\t\twindowWidth\/2 - float32(face.Width)\/2,\n\t\t\twindowHeight\/2 - float32(face.Height)\/2,\n\t\t\t0}\n\t\te := sprite.Effects{\n\t\t\tScale: mgl32.Vec3{1.0, 1.0, 1.0},\n\t\t\tEnableLighting: true,\n\t\t\tAmbientColor: ambientColor,\n\t\t\tLight: light,\n\t\t}\n\t\tface.Draw(pos, &e)\n\n\t\tscreen.Flip()\n\n\t\t\/\/ TODO refector events to be cleaner\n\t\tglfw.PollEvents()\n\t}\n\n}\n\nfunc loadSprite(colorPath, normalPath string, framesWide, framesHigh int) (*sprite.Context, error) {\n\tc, err := sprite.Load(colorPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tn, err := sprite.Load(normalPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts, err := sprite.New(c, n, framesWide, framesHigh)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}\nFix lighting\/\/ Copyright 2016 Richard Hawkins\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/ Package app manages the main game loop.\n\npackage main\n\nimport (\n\t_ \"image\/png\"\n\t\"log\"\n\t\"runtime\"\n\n\t\"github.com\/go-gl\/gl\/v4.1-core\/gl\"\n\t\"github.com\/go-gl\/glfw\/v3.1\/glfw\"\n\t\"github.com\/go-gl\/mathgl\/mgl32\"\n\t\"github.com\/hurricanerix\/shade\/display\"\n\t\"github.com\/hurricanerix\/shade\/events\"\n\t\"github.com\/hurricanerix\/shade\/light\"\n\t\"github.com\/hurricanerix\/shade\/sprite\"\n)\n\nconst windowWidth = 640\nconst windowHeight = 480\n\nfunc init() {\n\t\/\/ GLFW event handling must run on the main OS thread\n\truntime.LockOSThread()\n}\n\nfunc main() {\n\tscreen, err := display.SetMode(\"03-lighting\", windowWidth, windowHeight)\n\tif err != nil {\n\t\tlog.Fatalln(\"failed to set display mode:\", err)\n\t}\n\tambientColor := mgl32.Vec4{0.2, 0.2, 0.2, 1.0}\n\n\tface, err := loadSprite(\"color.png\", \"normal.png\", 1, 1)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tface.Bind(screen.Program)\n\n\tlight := light.Positional{\n\t\tPos: mgl32.Vec3{0.5, 0.5, 100.0},\n\t\tColor: mgl32.Vec4{0.8, 0.8, 1.0, 1.0},\n\t\tPower: 10000,\n\t}\n\n\tfor running := true; running; {\n\t\tscreen.Fill(0.0, 0.0, 0.0)\n\n\t\t\/\/ TODO move this somewhere else (maybe a Clear method of display\n\t\tgl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)\n\n\t\t\/\/ TODO refector events to be cleaner\n\t\tif screen.Window.ShouldClose() {\n\t\t\trunning = !screen.Window.ShouldClose()\n\t\t}\n\n\t\tfor _, event := range events.Get() {\n\t\t\tif event.KeyEvent && event.Action == glfw.Press && event.Key == glfw.KeyEscape {\n\t\t\t\trunning = false\n\t\t\t\tevent.Window.SetShouldClose(true)\n\t\t\t}\n\t\t\tif !event.KeyEvent {\n\t\t\t\tlight.Pos[0] = event.X\n\t\t\t\tlight.Pos[1] = float32(windowHeight) - event.Y\n\t\t\t}\n\t\t}\n\n\t\tpos := mgl32.Vec3{\n\t\t\twindowWidth\/2 - float32(face.Width)\/2,\n\t\t\twindowHeight\/2 - float32(face.Height)\/2,\n\t\t\t0}\n\t\te := sprite.Effects{\n\t\t\tScale: mgl32.Vec3{1.0, 1.0, 1.0},\n\t\t\tEnableLighting: true,\n\t\t\tAmbientColor: ambientColor,\n\t\t\tLight: light,\n\t\t}\n\t\tface.Draw(pos, &e)\n\n\t\tscreen.Flip()\n\n\t\t\/\/ TODO refector events to be cleaner\n\t\tglfw.PollEvents()\n\t}\n\n}\n\nfunc loadSprite(colorPath, normalPath string, framesWide, framesHigh int) (*sprite.Context, error) {\n\tc, err := sprite.Load(colorPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tn, err := sprite.Load(normalPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts, err := sprite.New(c, n, framesWide, framesHigh)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}\n<|endoftext|>"} {"text":"package errgroup\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n)\n\n\/\/ Check reports whether the \"err\" is not nil.\n\/\/ If it is a group then it returns true if that or its children contains any error.\nfunc Check(err error) error {\n\tif isNotNil(err) {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Walk loops through each of the errors of \"err\".\n\/\/ If \"err\" is *Group then it fires the \"visitor\" for each of its errors, including children.\n\/\/ if \"err\" is *Error then it fires the \"visitor\" with its type and wrapped error.\n\/\/ Otherwise it fires the \"visitor\" once with typ of nil and err as \"err\".\nfunc Walk(err error, visitor func(typ interface{}, err error)) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif group, ok := err.(*Group); ok {\n\t\tlist := group.getAllErrors()\n\t\tfor _, entry := range list {\n\t\t\tif e, ok := entry.(*Error); ok {\n\t\t\t\tvisitor(e.Type, e.Err) \/\/ e.Unwrap() <-no.\n\t\t\t} else {\n\t\t\t\tvisitor(nil, err)\n\t\t\t}\n\t\t}\n\t} else if e, ok := err.(*Error); ok {\n\t\tvisitor(e.Type, e.Err)\n\t} else {\n\t\tvisitor(nil, err)\n\t}\n\n\treturn err\n}\n\n\/*\nfunc Errors(err error, conv bool) []error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif group, ok := err.(*Group); ok {\n\t\tlist := group.getAllErrors()\n\t\tif conv {\n\t\t\tfor i, entry := range list {\n\t\t\t\tif _, ok := entry.(*Error); !ok {\n\t\t\t\t\tlist[i] = &Error{Err: entry, Type: group.Type}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn list\n\t}\n\n\treturn []error{err}\n}\n\nfunc Type(err error) interface{} {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif e, ok := err.(*Error); ok && e.Err != nil {\n\t\treturn e.Type\n\t}\n\n\treturn nil\n}\n\nfunc Fill(parent *Group, errors []*Error) {\n\tfor _, err := range errors {\n\t\tif err.Type == parent.Type {\n\t\t\tparent.Add(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tparent.Group(err.Type).Err(err)\n\t}\n\treturn\n}\n*\/\n\n\/\/ Error implements the error interface.\n\/\/ It is a special error type which keep the \"Type\" of the\n\/\/ Group that it's created through Group's `Err` and `Errf` methods.\ntype Error struct {\n\tErr error `json:\"error\" xml:\"Error\" yaml:\"Error\" toml:\"Error\" sql:\"error\"`\n\tType interface{} `json:\"type\" xml:\"Type\" yaml:\"Type\" toml:\"Type\" sql:\"type\"`\n}\n\n\/\/ Error returns the error message of the \"Err\".\nfunc (e *Error) Error() string {\n\treturn e.Err.Error()\n}\n\n\/\/ Unwrap calls and returns the result of the \"Err\" Unwrap method or nil.\nfunc (e *Error) Unwrap() error {\n\treturn errors.Unwrap(e.Err)\n}\n\n\/\/ Is reports whether the \"err\" is an *Error.\nfunc (e *Error) Is(err error) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\n\tok := errors.Is(e.Err, err)\n\tif !ok {\n\t\tte, ok := err.(*Error)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\treturn errors.Is(e.Err, te.Err)\n\t}\n\n\treturn ok\n}\n\n\/\/ As reports whether the \"target\" can be used as &Error{target.Type: ?}.\nfunc (e *Error) As(target interface{}) bool {\n\tif target == nil {\n\t\treturn target == e\n\t}\n\n\tok := errors.As(e.Err, target)\n\tif !ok {\n\t\tte, ok := target.(*Error)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\tif te.Type != nil {\n\t\t\tif te.Type != e.Type {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn errors.As(e.Err, &te.Err)\n\t}\n\n\treturn ok\n}\n\n\/\/ Group is an error container of a specific Type and can have child containers per type too.\ntype Group struct {\n\tparent *Group\n\t\/\/ a list of children groups, used to get or create new group through Group method.\n\tchildren map[interface{}]*Group\n\tdepth int\n\n\tType interface{}\n\tErrors []error \/\/ []*Error\n\n\t\/\/ if true then this Group's Error method will return the messages of the errors made by this Group's Group method.\n\t\/\/ Defaults to true.\n\tIncludeChildren bool \/\/ it clones.\n\t\/\/ IncludeTypeText bool\n\tindex int \/\/ group index.\n}\n\n\/\/ New returns a new empty Group.\nfunc New(typ interface{}) *Group {\n\treturn &Group{\n\t\tType: typ,\n\t\tIncludeChildren: true,\n\t}\n}\n\nconst delim = \"\\n\"\n\nfunc (g *Group) Error() (s string) {\n\tif len(g.Errors) > 0 {\n\t\tmsgs := make([]string, len(g.Errors), len(g.Errors))\n\t\tfor i, err := range g.Errors {\n\t\t\tmsgs[i] = err.Error()\n\t\t}\n\n\t\ts = strings.Join(msgs, delim)\n\t}\n\n\tif g.IncludeChildren && len(g.children) > 0 {\n\t\t\/\/ return with order of definition.\n\t\tgroups := g.getAllChildren()\n\t\tsortGroups(groups)\n\n\t\tfor _, ge := range groups {\n\t\t\tfor _, childErr := range ge.Errors {\n\t\t\t\ts += childErr.Error() + delim\n\t\t\t}\n\t\t}\n\n\t\tif s != \"\" {\n\t\t\treturn s[:len(s)-1]\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (g *Group) getAllErrors() []error {\n\tlist := g.Errors[:]\n\n\tif len(g.children) > 0 {\n\t\t\/\/ return with order of definition.\n\t\tgroups := g.getAllChildren()\n\t\tsortGroups(groups)\n\n\t\tfor _, ge := range groups {\n\t\t\tlist = append(list, ge.Errors...)\n\t\t}\n\t}\n\n\treturn list\n}\n\nfunc (g *Group) getAllChildren() []*Group {\n\tif len(g.children) == 0 {\n\t\treturn nil\n\t}\n\n\tvar groups []*Group\n\tfor _, child := range g.children {\n\t\tgroups = append(groups, append([]*Group{child}, child.getAllChildren()...)...)\n\t}\n\n\treturn groups\n}\n\n\/\/ Unwrap implements the dynamic std errors interface and it returns the parent Group.\nfunc (g *Group) Unwrap() error {\n\treturn g.parent\n}\n\n\/\/ Group creates a new group of \"typ\" type, if does not exist, and returns it.\nfunc (g *Group) Group(typ interface{}) *Group {\n\tif g.children == nil {\n\t\tg.children = make(map[interface{}]*Group)\n\t} else {\n\t\tfor _, child := range g.children {\n\t\t\tif child.Type == typ {\n\t\t\t\treturn child\n\t\t\t}\n\t\t}\n\t}\n\n\tchild := &Group{\n\t\tType: typ,\n\t\tparent: g,\n\t\tdepth: g.depth + 1,\n\t\tIncludeChildren: g.IncludeChildren,\n\t\tindex: g.index + 1 + len(g.children),\n\t}\n\n\tg.children[typ] = child\n\n\treturn child\n}\n\n\/\/ Add adds an error to the group.\nfunc (g *Group) Add(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\n\tg.Errors = append(g.Errors, err)\n}\n\n\/\/ Addf adds an error to the group like `fmt.Errorf` and returns it.\nfunc (g *Group) Addf(format string, args ...interface{}) error {\n\terr := fmt.Errorf(format, args...)\n\tg.Add(err)\n\treturn err\n}\n\n\/\/ Err adds an error the the group, it transforms it to an Error type if necessary and returns it.\nfunc (g *Group) Err(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\te, ok := err.(*Error)\n\tif !ok {\n\t\tif ge, ok := err.(*Group); ok {\n\t\t\tif g.children == nil {\n\t\t\t\tg.children = make(map[interface{}]*Group)\n\t\t\t}\n\n\t\t\tg.children[ge.Type] = ge\n\t\t\treturn ge\n\t\t}\n\n\t\te = &Error{err, 0}\n\t}\n\te.Type = g.Type\n\n\tg.Add(e)\n\treturn e\n}\n\n\/\/ Errf adds an error like `fmt.Errorf` and returns it.\nfunc (g *Group) Errf(format string, args ...interface{}) error {\n\treturn g.Err(fmt.Errorf(format, args...))\n}\n\nfunc sortGroups(groups []*Group) {\n\tsort.Slice(groups, func(i, j int) bool {\n\t\treturn groups[i].index < groups[j].index\n\t})\n}\n\nfunc tryGetTypeText(typ interface{}) string {\n\tif typ == nil {\n\t\treturn \"\"\n\t}\n\n\tswitch v := typ.(type) {\n\tcase string:\n\t\treturn v\n\tcase fmt.Stringer:\n\t\treturn v.String()\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\nfunc isNotNil(err error) bool {\n\tif g, ok := err.(*Group); ok {\n\t\tif len(g.Errors) > 0 {\n\t\t\treturn true\n\t\t}\n\n\t\tif len(g.children) > 0 {\n\t\t\tfor _, child := range g.children {\n\t\t\t\tif isNotNil(child) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn false\n\t}\n\n\treturn err != nil\n}\nFix typopackage errgroup\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n)\n\n\/\/ Check reports whether the \"err\" is not nil.\n\/\/ If it is a group then it returns true if that or its children contains any error.\nfunc Check(err error) error {\n\tif isNotNil(err) {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Walk loops through each of the errors of \"err\".\n\/\/ If \"err\" is *Group then it fires the \"visitor\" for each of its errors, including children.\n\/\/ if \"err\" is *Error then it fires the \"visitor\" with its type and wrapped error.\n\/\/ Otherwise it fires the \"visitor\" once with typ of nil and err as \"err\".\nfunc Walk(err error, visitor func(typ interface{}, err error)) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif group, ok := err.(*Group); ok {\n\t\tlist := group.getAllErrors()\n\t\tfor _, entry := range list {\n\t\t\tif e, ok := entry.(*Error); ok {\n\t\t\t\tvisitor(e.Type, e.Err) \/\/ e.Unwrap() <-no.\n\t\t\t} else {\n\t\t\t\tvisitor(nil, err)\n\t\t\t}\n\t\t}\n\t} else if e, ok := err.(*Error); ok {\n\t\tvisitor(e.Type, e.Err)\n\t} else {\n\t\tvisitor(nil, err)\n\t}\n\n\treturn err\n}\n\n\/*\nfunc Errors(err error, conv bool) []error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif group, ok := err.(*Group); ok {\n\t\tlist := group.getAllErrors()\n\t\tif conv {\n\t\t\tfor i, entry := range list {\n\t\t\t\tif _, ok := entry.(*Error); !ok {\n\t\t\t\t\tlist[i] = &Error{Err: entry, Type: group.Type}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn list\n\t}\n\n\treturn []error{err}\n}\n\nfunc Type(err error) interface{} {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif e, ok := err.(*Error); ok && e.Err != nil {\n\t\treturn e.Type\n\t}\n\n\treturn nil\n}\n\nfunc Fill(parent *Group, errors []*Error) {\n\tfor _, err := range errors {\n\t\tif err.Type == parent.Type {\n\t\t\tparent.Add(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tparent.Group(err.Type).Err(err)\n\t}\n\treturn\n}\n*\/\n\n\/\/ Error implements the error interface.\n\/\/ It is a special error type which keep the \"Type\" of the\n\/\/ Group that it's created through Group's `Err` and `Errf` methods.\ntype Error struct {\n\tErr error `json:\"error\" xml:\"Error\" yaml:\"Error\" toml:\"Error\" sql:\"error\"`\n\tType interface{} `json:\"type\" xml:\"Type\" yaml:\"Type\" toml:\"Type\" sql:\"type\"`\n}\n\n\/\/ Error returns the error message of the \"Err\".\nfunc (e *Error) Error() string {\n\treturn e.Err.Error()\n}\n\n\/\/ Unwrap calls and returns the result of the \"Err\" Unwrap method or nil.\nfunc (e *Error) Unwrap() error {\n\treturn errors.Unwrap(e.Err)\n}\n\n\/\/ Is reports whether the \"err\" is an *Error.\nfunc (e *Error) Is(err error) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\n\tok := errors.Is(e.Err, err)\n\tif !ok {\n\t\tte, ok := err.(*Error)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\treturn errors.Is(e.Err, te.Err)\n\t}\n\n\treturn ok\n}\n\n\/\/ As reports whether the \"target\" can be used as &Error{target.Type: ?}.\nfunc (e *Error) As(target interface{}) bool {\n\tif target == nil {\n\t\treturn target == e\n\t}\n\n\tok := errors.As(e.Err, target)\n\tif !ok {\n\t\tte, ok := target.(*Error)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\tif te.Type != nil {\n\t\t\tif te.Type != e.Type {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn errors.As(e.Err, &te.Err)\n\t}\n\n\treturn ok\n}\n\n\/\/ Group is an error container of a specific Type and can have child containers per type too.\ntype Group struct {\n\tparent *Group\n\t\/\/ a list of children groups, used to get or create new group through Group method.\n\tchildren map[interface{}]*Group\n\tdepth int\n\n\tType interface{}\n\tErrors []error \/\/ []*Error\n\n\t\/\/ if true then this Group's Error method will return the messages of the errors made by this Group's Group method.\n\t\/\/ Defaults to true.\n\tIncludeChildren bool \/\/ it clones.\n\t\/\/ IncludeTypeText bool\n\tindex int \/\/ group index.\n}\n\n\/\/ New returns a new empty Group.\nfunc New(typ interface{}) *Group {\n\treturn &Group{\n\t\tType: typ,\n\t\tIncludeChildren: true,\n\t}\n}\n\nconst delim = \"\\n\"\n\nfunc (g *Group) Error() (s string) {\n\tif len(g.Errors) > 0 {\n\t\tmsgs := make([]string, len(g.Errors), len(g.Errors))\n\t\tfor i, err := range g.Errors {\n\t\t\tmsgs[i] = err.Error()\n\t\t}\n\n\t\ts = strings.Join(msgs, delim)\n\t}\n\n\tif g.IncludeChildren && len(g.children) > 0 {\n\t\t\/\/ return with order of definition.\n\t\tgroups := g.getAllChildren()\n\t\tsortGroups(groups)\n\n\t\tfor _, ge := range groups {\n\t\t\tfor _, childErr := range ge.Errors {\n\t\t\t\ts += childErr.Error() + delim\n\t\t\t}\n\t\t}\n\n\t\tif s != \"\" {\n\t\t\treturn s[:len(s)-1]\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (g *Group) getAllErrors() []error {\n\tlist := g.Errors[:]\n\n\tif len(g.children) > 0 {\n\t\t\/\/ return with order of definition.\n\t\tgroups := g.getAllChildren()\n\t\tsortGroups(groups)\n\n\t\tfor _, ge := range groups {\n\t\t\tlist = append(list, ge.Errors...)\n\t\t}\n\t}\n\n\treturn list\n}\n\nfunc (g *Group) getAllChildren() []*Group {\n\tif len(g.children) == 0 {\n\t\treturn nil\n\t}\n\n\tvar groups []*Group\n\tfor _, child := range g.children {\n\t\tgroups = append(groups, append([]*Group{child}, child.getAllChildren()...)...)\n\t}\n\n\treturn groups\n}\n\n\/\/ Unwrap implements the dynamic std errors interface and it returns the parent Group.\nfunc (g *Group) Unwrap() error {\n\treturn g.parent\n}\n\n\/\/ Group creates a new group of \"typ\" type, if does not exist, and returns it.\nfunc (g *Group) Group(typ interface{}) *Group {\n\tif g.children == nil {\n\t\tg.children = make(map[interface{}]*Group)\n\t} else {\n\t\tfor _, child := range g.children {\n\t\t\tif child.Type == typ {\n\t\t\t\treturn child\n\t\t\t}\n\t\t}\n\t}\n\n\tchild := &Group{\n\t\tType: typ,\n\t\tparent: g,\n\t\tdepth: g.depth + 1,\n\t\tIncludeChildren: g.IncludeChildren,\n\t\tindex: g.index + 1 + len(g.children),\n\t}\n\n\tg.children[typ] = child\n\n\treturn child\n}\n\n\/\/ Add adds an error to the group.\nfunc (g *Group) Add(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\n\tg.Errors = append(g.Errors, err)\n}\n\n\/\/ Addf adds an error to the group like `fmt.Errorf` and returns it.\nfunc (g *Group) Addf(format string, args ...interface{}) error {\n\terr := fmt.Errorf(format, args...)\n\tg.Add(err)\n\treturn err\n}\n\n\/\/ Err adds an error to the group, it transforms it to an Error type if necessary and returns it.\nfunc (g *Group) Err(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\te, ok := err.(*Error)\n\tif !ok {\n\t\tif ge, ok := err.(*Group); ok {\n\t\t\tif g.children == nil {\n\t\t\t\tg.children = make(map[interface{}]*Group)\n\t\t\t}\n\n\t\t\tg.children[ge.Type] = ge\n\t\t\treturn ge\n\t\t}\n\n\t\te = &Error{err, 0}\n\t}\n\te.Type = g.Type\n\n\tg.Add(e)\n\treturn e\n}\n\n\/\/ Errf adds an error like `fmt.Errorf` and returns it.\nfunc (g *Group) Errf(format string, args ...interface{}) error {\n\treturn g.Err(fmt.Errorf(format, args...))\n}\n\nfunc sortGroups(groups []*Group) {\n\tsort.Slice(groups, func(i, j int) bool {\n\t\treturn groups[i].index < groups[j].index\n\t})\n}\n\nfunc tryGetTypeText(typ interface{}) string {\n\tif typ == nil {\n\t\treturn \"\"\n\t}\n\n\tswitch v := typ.(type) {\n\tcase string:\n\t\treturn v\n\tcase fmt.Stringer:\n\t\treturn v.String()\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\nfunc isNotNil(err error) bool {\n\tif g, ok := err.(*Group); ok {\n\t\tif len(g.Errors) > 0 {\n\t\t\treturn true\n\t\t}\n\n\t\tif len(g.children) > 0 {\n\t\t\tfor _, child := range g.children {\n\t\t\t\tif isNotNil(child) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn false\n\t}\n\n\treturn err != nil\n}\n<|endoftext|>"} {"text":"package core\n\nimport (\n\t\"testing\"\n\n\tcontext \"github.com\/ipfs\/go-ipfs\/Godeps\/_workspace\/src\/golang.org\/x\/net\/context\"\n\tconfig \"github.com\/ipfs\/go-ipfs\/repo\/config\"\n\t\"github.com\/ipfs\/go-ipfs\/util\/testutil\"\n\t\"github.com\/ipfs\/go-ipfs\/repo\"\n\tpath \"github.com\/ipfs\/go-ipfs\/path\"\n)\n\nfunc TestResolveInvalidPath(t *testing.T) {\n\tctx := context.TODO()\n\tid := testIdentity\n\n\tr := &repo.Mock{\n\t\tC: config.Config{\n\t\t\tIdentity: id,\n\t\t\tDatastore: config.Datastore{\n\t\t\t\tType: \"memory\",\n\t\t\t},\n\t\t\tAddresses: config.Addresses{\n\t\t\t\tSwarm: []string{\"\/ip4\/0.0.0.0\/tcp\/4001\"},\n\t\t\t\tAPI: \"\/ip4\/127.0.0.1\/tcp\/8000\",\n\t\t\t},\n\t\t},\n\t\tD: testutil.ThreadSafeCloserMapDatastore(),\n\t}\n\n\tn, err := NewIPFSNode(ctx, Standard(r, false))\n\tif n == nil || err != nil {\n\t\tt.Error(\"Should have constructed.\", err)\n\t}\n\n\t_, err = Resolve(ctx, n, path.Path(\"\/ipfs\/\"))\n\tif err == nil {\n\t\tt.Error(\"Should get invalid path\")\n\t}\n\n}\nFixed tests to actually test for the error we are seekingpackage core\n\nimport (\n\t\"testing\"\n\n\tpath \"github.com\/ipfs\/go-ipfs\/path\"\n\t\"strings\"\n)\n\nfunc TestResolveInvalidPath(t *testing.T) {\n\tn, err := NewMockNode()\n\tif n == nil || err != nil {\n\t\tt.Fatal(\"Should have constructed.\", err)\n\t}\n\n\t_, err = Resolve(n.Context(), n, path.Path(\"\/ipfs\/\"))\n\tif !strings.HasPrefix(err.Error(), \"invalid path\") {\n\t\tt.Fatal(\"Should get invalid path.\", err)\n\t}\n\n}\n<|endoftext|>"} {"text":"package types\n\nimport (\n\t\"crypto\/ecdsa\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/big\"\n\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\t\"github.com\/ethereum\/go-ethereum\/crypto\"\n\t\"github.com\/ethereum\/go-ethereum\/logger\"\n\t\"github.com\/ethereum\/go-ethereum\/logger\/glog\"\n\t\"github.com\/ethereum\/go-ethereum\/rlp\"\n)\n\nfunc IsContractAddr(addr []byte) bool {\n\treturn len(addr) == 0\n}\n\ntype Transaction struct {\n\tAccountNonce uint64\n\tPrice *big.Int\n\tGasLimit *big.Int\n\tRecipient *common.Address `rlp:\"nil\"` \/\/ nil means contract creation\n\tAmount *big.Int\n\tPayload []byte\n\tV byte\n\tR, S *big.Int\n}\n\nfunc NewContractCreationTx(amount, gasLimit, gasPrice *big.Int, data []byte) *Transaction {\n\treturn &Transaction{\n\t\tRecipient: nil,\n\t\tAmount: amount,\n\t\tGasLimit: gasLimit,\n\t\tPrice: gasPrice,\n\t\tPayload: data,\n\t\tR: new(big.Int),\n\t\tS: new(big.Int),\n\t}\n}\n\nfunc NewTransactionMessage(to common.Address, amount, gasAmount, gasPrice *big.Int, data []byte) *Transaction {\n\treturn &Transaction{\n\t\tRecipient: &to,\n\t\tAmount: amount,\n\t\tGasLimit: gasAmount,\n\t\tPrice: gasPrice,\n\t\tPayload: data,\n\t\tR: new(big.Int),\n\t\tS: new(big.Int),\n\t}\n}\n\nfunc NewTransactionFromBytes(data []byte) *Transaction {\n\t\/\/ TODO: remove this function if possible. callers would\n\t\/\/ much better off decoding into transaction directly.\n\t\/\/ it's not that hard.\n\ttx := new(Transaction)\n\trlp.DecodeBytes(data, tx)\n\treturn tx\n}\n\nfunc (tx *Transaction) Hash() common.Hash {\n\treturn rlpHash([]interface{}{\n\t\ttx.AccountNonce, tx.Price, tx.GasLimit, tx.Recipient, tx.Amount, tx.Payload,\n\t})\n}\n\nfunc (self *Transaction) Data() []byte {\n\treturn self.Payload\n}\n\nfunc (self *Transaction) Gas() *big.Int {\n\treturn self.GasLimit\n}\n\nfunc (self *Transaction) GasPrice() *big.Int {\n\treturn self.Price\n}\n\nfunc (self *Transaction) Value() *big.Int {\n\treturn self.Amount\n}\n\nfunc (self *Transaction) Nonce() uint64 {\n\treturn self.AccountNonce\n}\n\nfunc (self *Transaction) SetNonce(AccountNonce uint64) {\n\tself.AccountNonce = AccountNonce\n}\n\nfunc (self *Transaction) From() (common.Address, error) {\n\tpubkey, err := self.PublicKey()\n\tif err != nil {\n\t\treturn common.Address{}, err\n\t}\n\n\tvar addr common.Address\n\tcopy(addr[:], crypto.Sha3(pubkey[1:])[12:])\n\treturn addr, nil\n}\n\n\/\/ To returns the recipient of the transaction.\n\/\/ If transaction is a contract creation (with no recipient address)\n\/\/ To returns nil.\nfunc (tx *Transaction) To() *common.Address {\n\treturn tx.Recipient\n}\n\nfunc (tx *Transaction) GetSignatureValues() (v byte, r []byte, s []byte) {\n\tv = byte(tx.V)\n\tr = common.LeftPadBytes(tx.R.Bytes(), 32)\n\ts = common.LeftPadBytes(tx.S.Bytes(), 32)\n\treturn\n}\n\nfunc (tx *Transaction) PublicKey() ([]byte, error) {\n\tif !crypto.ValidateSignatureValues(tx.V, tx.R, tx.S) {\n\t\treturn nil, errors.New(\"invalid v, r, s values\")\n\t}\n\n\thash := tx.Hash()\n\tv, r, s := tx.GetSignatureValues()\n\tsig := append(r, s...)\n\tsig = append(sig, v-27)\n\n\tp, err := crypto.SigToPub(hash[:], sig)\n\tif err != nil {\n\t\tglog.V(logger.Error).Infof(\"Could not get pubkey from signature: \", err)\n\t\treturn nil, err\n\t}\n\n\tpubkey := crypto.FromECDSAPub(p)\n\tif len(pubkey) == 0 || pubkey[0] != 4 {\n\t\treturn nil, errors.New(\"invalid public key\")\n\t}\n\treturn pubkey, nil\n}\n\nfunc (tx *Transaction) SetSignatureValues(sig []byte) error {\n\ttx.R = common.Bytes2Big(sig[:32])\n\ttx.S = common.Bytes2Big(sig[32:64])\n\ttx.V = sig[64] + 27\n\treturn nil\n}\n\nfunc (tx *Transaction) SignECDSA(prv *ecdsa.PrivateKey) error {\n\th := tx.Hash()\n\tsig, err := crypto.Sign(h[:], prv)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttx.SetSignatureValues(sig)\n\treturn nil\n}\n\n\/\/ TODO: remove\nfunc (tx *Transaction) RlpData() interface{} {\n\tdata := []interface{}{tx.AccountNonce, tx.Price, tx.GasLimit, tx.Recipient, tx.Amount, tx.Payload}\n\treturn append(data, tx.V, tx.R.Bytes(), tx.S.Bytes())\n}\n\nfunc (tx *Transaction) String() string {\n\tvar from, to string\n\tif f, err := tx.From(); err != nil {\n\t\tfrom = \"[invalid sender]\"\n\t} else {\n\t\tfrom = fmt.Sprintf(\"%x\", f[:])\n\t}\n\tif t := tx.To(); t == nil {\n\t\tto = \"[contract creation]\"\n\t} else {\n\t\tto = fmt.Sprintf(\"%x\", t[:])\n\t}\n\tenc, _ := rlp.EncodeToBytes(tx)\n\treturn fmt.Sprintf(`\n\tTX(%x)\n\tContract: %v\n\tFrom: %s\n\tTo: %s\n\tNonce: %v\n\tGasPrice: %v\n\tGasLimit %v\n\tValue: %v\n\tData: 0x%x\n\tV: 0x%x\n\tR: 0x%x\n\tS: 0x%x\n\tHex: %x\n`,\n\t\ttx.Hash(),\n\t\tlen(tx.Recipient) == 0,\n\t\tfrom,\n\t\tto,\n\t\ttx.AccountNonce,\n\t\ttx.Price,\n\t\ttx.GasLimit,\n\t\ttx.Amount,\n\t\ttx.Payload,\n\t\ttx.V,\n\t\ttx.R,\n\t\ttx.S,\n\t\tenc,\n\t)\n}\n\n\/\/ Transaction slice type for basic sorting\ntype Transactions []*Transaction\n\n\/\/ TODO: remove\nfunc (self Transactions) RlpData() interface{} {\n\t\/\/ Marshal the transactions of this block\n\tenc := make([]interface{}, len(self))\n\tfor i, tx := range self {\n\t\t\/\/ Cast it to a string (safe)\n\t\tenc[i] = tx.RlpData()\n\t}\n\n\treturn enc\n}\n\nfunc (s Transactions) Len() int { return len(s) }\nfunc (s Transactions) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\n\nfunc (s Transactions) GetRlp(i int) []byte {\n\tenc, _ := rlp.EncodeToBytes(s[i])\n\treturn enc\n}\n\ntype TxByNonce struct{ Transactions }\n\nfunc (s TxByNonce) Less(i, j int) bool {\n\treturn s.Transactions[i].AccountNonce < s.Transactions[j].AccountNonce\n}\ncore\/types: add Transaction.Sizepackage types\n\nimport (\n\t\"crypto\/ecdsa\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/big\"\n\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\t\"github.com\/ethereum\/go-ethereum\/crypto\"\n\t\"github.com\/ethereum\/go-ethereum\/logger\"\n\t\"github.com\/ethereum\/go-ethereum\/logger\/glog\"\n\t\"github.com\/ethereum\/go-ethereum\/rlp\"\n)\n\nfunc IsContractAddr(addr []byte) bool {\n\treturn len(addr) == 0\n}\n\ntype Transaction struct {\n\tAccountNonce uint64\n\tPrice *big.Int\n\tGasLimit *big.Int\n\tRecipient *common.Address `rlp:\"nil\"` \/\/ nil means contract creation\n\tAmount *big.Int\n\tPayload []byte\n\tV byte\n\tR, S *big.Int\n}\n\nfunc NewContractCreationTx(amount, gasLimit, gasPrice *big.Int, data []byte) *Transaction {\n\treturn &Transaction{\n\t\tRecipient: nil,\n\t\tAmount: amount,\n\t\tGasLimit: gasLimit,\n\t\tPrice: gasPrice,\n\t\tPayload: data,\n\t\tR: new(big.Int),\n\t\tS: new(big.Int),\n\t}\n}\n\nfunc NewTransactionMessage(to common.Address, amount, gasAmount, gasPrice *big.Int, data []byte) *Transaction {\n\treturn &Transaction{\n\t\tRecipient: &to,\n\t\tAmount: amount,\n\t\tGasLimit: gasAmount,\n\t\tPrice: gasPrice,\n\t\tPayload: data,\n\t\tR: new(big.Int),\n\t\tS: new(big.Int),\n\t}\n}\n\nfunc NewTransactionFromBytes(data []byte) *Transaction {\n\t\/\/ TODO: remove this function if possible. callers would\n\t\/\/ much better off decoding into transaction directly.\n\t\/\/ it's not that hard.\n\ttx := new(Transaction)\n\trlp.DecodeBytes(data, tx)\n\treturn tx\n}\n\nfunc (tx *Transaction) Hash() common.Hash {\n\treturn rlpHash([]interface{}{\n\t\ttx.AccountNonce, tx.Price, tx.GasLimit, tx.Recipient, tx.Amount, tx.Payload,\n\t})\n}\n\n\/\/ Size returns the encoded RLP size of tx.\nfunc (self *Transaction) Size() common.StorageSize {\n\tc := writeCounter(0)\n\trlp.Encode(&c, self)\n\treturn common.StorageSize(c)\n}\n\nfunc (self *Transaction) Data() []byte {\n\treturn self.Payload\n}\n\nfunc (self *Transaction) Gas() *big.Int {\n\treturn self.GasLimit\n}\n\nfunc (self *Transaction) GasPrice() *big.Int {\n\treturn self.Price\n}\n\nfunc (self *Transaction) Value() *big.Int {\n\treturn self.Amount\n}\n\nfunc (self *Transaction) Nonce() uint64 {\n\treturn self.AccountNonce\n}\n\nfunc (self *Transaction) SetNonce(AccountNonce uint64) {\n\tself.AccountNonce = AccountNonce\n}\n\nfunc (self *Transaction) From() (common.Address, error) {\n\tpubkey, err := self.PublicKey()\n\tif err != nil {\n\t\treturn common.Address{}, err\n\t}\n\n\tvar addr common.Address\n\tcopy(addr[:], crypto.Sha3(pubkey[1:])[12:])\n\treturn addr, nil\n}\n\n\/\/ To returns the recipient of the transaction.\n\/\/ If transaction is a contract creation (with no recipient address)\n\/\/ To returns nil.\nfunc (tx *Transaction) To() *common.Address {\n\treturn tx.Recipient\n}\n\nfunc (tx *Transaction) GetSignatureValues() (v byte, r []byte, s []byte) {\n\tv = byte(tx.V)\n\tr = common.LeftPadBytes(tx.R.Bytes(), 32)\n\ts = common.LeftPadBytes(tx.S.Bytes(), 32)\n\treturn\n}\n\nfunc (tx *Transaction) PublicKey() ([]byte, error) {\n\tif !crypto.ValidateSignatureValues(tx.V, tx.R, tx.S) {\n\t\treturn nil, errors.New(\"invalid v, r, s values\")\n\t}\n\n\thash := tx.Hash()\n\tv, r, s := tx.GetSignatureValues()\n\tsig := append(r, s...)\n\tsig = append(sig, v-27)\n\n\tp, err := crypto.SigToPub(hash[:], sig)\n\tif err != nil {\n\t\tglog.V(logger.Error).Infof(\"Could not get pubkey from signature: \", err)\n\t\treturn nil, err\n\t}\n\n\tpubkey := crypto.FromECDSAPub(p)\n\tif len(pubkey) == 0 || pubkey[0] != 4 {\n\t\treturn nil, errors.New(\"invalid public key\")\n\t}\n\treturn pubkey, nil\n}\n\nfunc (tx *Transaction) SetSignatureValues(sig []byte) error {\n\ttx.R = common.Bytes2Big(sig[:32])\n\ttx.S = common.Bytes2Big(sig[32:64])\n\ttx.V = sig[64] + 27\n\treturn nil\n}\n\nfunc (tx *Transaction) SignECDSA(prv *ecdsa.PrivateKey) error {\n\th := tx.Hash()\n\tsig, err := crypto.Sign(h[:], prv)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttx.SetSignatureValues(sig)\n\treturn nil\n}\n\n\/\/ TODO: remove\nfunc (tx *Transaction) RlpData() interface{} {\n\tdata := []interface{}{tx.AccountNonce, tx.Price, tx.GasLimit, tx.Recipient, tx.Amount, tx.Payload}\n\treturn append(data, tx.V, tx.R.Bytes(), tx.S.Bytes())\n}\n\nfunc (tx *Transaction) String() string {\n\tvar from, to string\n\tif f, err := tx.From(); err != nil {\n\t\tfrom = \"[invalid sender]\"\n\t} else {\n\t\tfrom = fmt.Sprintf(\"%x\", f[:])\n\t}\n\tif t := tx.To(); t == nil {\n\t\tto = \"[contract creation]\"\n\t} else {\n\t\tto = fmt.Sprintf(\"%x\", t[:])\n\t}\n\tenc, _ := rlp.EncodeToBytes(tx)\n\treturn fmt.Sprintf(`\n\tTX(%x)\n\tContract: %v\n\tFrom: %s\n\tTo: %s\n\tNonce: %v\n\tGasPrice: %v\n\tGasLimit %v\n\tValue: %v\n\tData: 0x%x\n\tV: 0x%x\n\tR: 0x%x\n\tS: 0x%x\n\tHex: %x\n`,\n\t\ttx.Hash(),\n\t\tlen(tx.Recipient) == 0,\n\t\tfrom,\n\t\tto,\n\t\ttx.AccountNonce,\n\t\ttx.Price,\n\t\ttx.GasLimit,\n\t\ttx.Amount,\n\t\ttx.Payload,\n\t\ttx.V,\n\t\ttx.R,\n\t\ttx.S,\n\t\tenc,\n\t)\n}\n\n\/\/ Transaction slice type for basic sorting\ntype Transactions []*Transaction\n\n\/\/ TODO: remove\nfunc (self Transactions) RlpData() interface{} {\n\t\/\/ Marshal the transactions of this block\n\tenc := make([]interface{}, len(self))\n\tfor i, tx := range self {\n\t\t\/\/ Cast it to a string (safe)\n\t\tenc[i] = tx.RlpData()\n\t}\n\n\treturn enc\n}\n\nfunc (s Transactions) Len() int { return len(s) }\nfunc (s Transactions) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\n\nfunc (s Transactions) GetRlp(i int) []byte {\n\tenc, _ := rlp.EncodeToBytes(s[i])\n\treturn enc\n}\n\ntype TxByNonce struct{ Transactions }\n\nfunc (s TxByNonce) Less(i, j int) bool {\n\treturn s.Transactions[i].AccountNonce < s.Transactions[j].AccountNonce\n}\n<|endoftext|>"} {"text":"\/\/ SelectionAlgo project main.go\npackage main\n\nimport (\n\t\"code.google.com\/p\/gorest\"\n\t\"fmt\"\n\t\"net\/http\"\n)\n\ntype Configuration struct {\n\tRedisIp string\n\tRedisDb int\n\tRedisPort string\n\tPort string\n}\n\ntype EnvConfiguration struct {\n\tRedisIp string\n\tRedisDb string\n\tRedisPort string\n\tPort string\n}\n\ntype AttributeData struct {\n\tAttributeCode []string\n\tAttributeClass string\n\tAttributeType string\n\tAttributeCategory string\n\tWeightPrecentage string\n}\n\ntype Request struct {\n\tCompany int\n\tTenant int\n\tClass string\n\tType string\n\tCategory string\n\tSessionId string\n\tAttributeInfo []AttributeData\n}\n\ntype ConcurrencyInfo struct {\n\tResourceId string\n\tLastConnectedTime string\n}\n\nfunc main() {\n\tfmt.Println(\"Initializting Main\")\n\tInitiateRedis()\n\tgorest.RegisterService(new(SelectionAlgo))\n\thttp.Handle(\"\/\", gorest.Handle())\n\thttp.ListenAndServe(\":2228\", nil)\n}\nadd custom-environment-variables\/\/ SelectionAlgo project main.go\npackage main\n\nimport (\n\t\"code.google.com\/p\/gorest\"\n\t\"fmt\"\n\t\"net\/http\"\n)\n\ntype Configuration struct {\n\tRedisIp string\n\tRedisDb int\n\tRedisPort string\n\tPort string\n}\n\ntype EnvConfiguration struct {\n\tRedisIp string\n\tRedisDb string\n\tRedisPort string\n\tPort string\n}\n\ntype AttributeData struct {\n\tAttributeCode []string\n\tAttributeClass string\n\tAttributeType string\n\tAttributeCategory string\n\tWeightPrecentage string\n}\n\ntype Request struct {\n\tCompany int\n\tTenant int\n\tClass string\n\tType string\n\tCategory string\n\tSessionId string\n\tAttributeInfo []AttributeData\n}\n\ntype ConcurrencyInfo struct {\n\tResourceId string\n\tLastConnectedTime string\n}\n\nfunc main() {\n\tfmt.Println(\"Initializting Main\")\n\tInitiateRedis()\n\tgorest.RegisterService(new(SelectionAlgo))\n\thttp.Handle(\"\/\", gorest.Handle())\n\taddr := fmt.Sprintf(\":%s\", port)\n\thttp.ListenAndServe(addr, nil)\n}\n<|endoftext|>"} {"text":"package drivers\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/sys\/unix\"\n\n\t\"github.com\/lxc\/lxd\/lxd\/migration\"\n\t\"github.com\/lxc\/lxd\/lxd\/operations\"\n\t\"github.com\/lxc\/lxd\/lxd\/util\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\t\"github.com\/lxc\/lxd\/shared\/units\"\n)\n\nvar btrfsVersion string\nvar btrfsLoaded bool\n\ntype btrfs struct {\n\tcommon\n}\n\n\/\/ load is used to run one-time action per-driver rather than per-pool.\nfunc (d *btrfs) load() error {\n\t\/\/ Register the patches.\n\td.patches = map[string]func() error{\n\t\t\"storage_create_vm\": nil,\n\t}\n\n\t\/\/ Done if previously loaded.\n\tif btrfsLoaded {\n\t\treturn nil\n\t}\n\n\t\/\/ Validate the required binaries.\n\tfor _, tool := range []string{\"btrfs\"} {\n\t\t_, err := exec.LookPath(tool)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Required tool '%s' is missing\", tool)\n\t\t}\n\t}\n\n\t\/\/ Detect and record the version.\n\tif btrfsVersion == \"\" {\n\t\tout, err := shared.RunCommand(\"btrfs\", \"version\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcount, err := fmt.Sscanf(strings.SplitN(out, \" \", 2)[1], \"v%s\\n\", &btrfsVersion)\n\t\tif err != nil || count != 1 {\n\t\t\treturn fmt.Errorf(\"The 'btrfs' tool isn't working properly\")\n\t\t}\n\t}\n\n\tbtrfsLoaded = true\n\treturn nil\n}\n\n\/\/ Info returns info about the driver and its environment.\nfunc (d *btrfs) Info() Info {\n\treturn Info{\n\t\tName: \"btrfs\",\n\t\tVersion: btrfsVersion,\n\t\tOptimizedImages: true,\n\t\tPreservesInodes: !d.state.OS.RunningInUserNS,\n\t\tRemote: false,\n\t\tVolumeTypes: []VolumeType{VolumeTypeCustom, VolumeTypeImage, VolumeTypeContainer, VolumeTypeVM},\n\t\tBlockBacking: false,\n\t\tRunningQuotaResize: true,\n\t\tRunningSnapshotFreeze: false,\n\t}\n}\n\n\/\/ Create is called during pool creation and is effectively using an empty driver struct.\n\/\/ WARNING: The Create() function cannot rely on any of the struct attributes being set.\nfunc (d *btrfs) Create() error {\n\t\/\/ Store the provided source as we are likely to be mangling it.\n\td.config[\"volatile.initial_source\"] = d.config[\"source\"]\n\n\tloopPath := loopFilePath(d.name)\n\tif d.config[\"source\"] == \"\" || d.config[\"source\"] == loopPath {\n\t\t\/\/ Create a loop based pool.\n\t\td.config[\"source\"] = loopPath\n\n\t\t\/\/ Create the loop file itself.\n\t\tsize, err := units.ParseByteSizeString(d.config[\"size\"])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = createSparseFile(d.config[\"source\"], size)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed to create the sparse file\")\n\t\t}\n\n\t\t\/\/ Format the file.\n\t\t_, err = makeFSType(d.config[\"source\"], \"btrfs\", &mkfsOptions{Label: d.name})\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed to format sparse file\")\n\t\t}\n\t} else if shared.IsBlockdevPath(d.config[\"source\"]) {\n\t\t\/\/ Format the block device.\n\t\t_, err := makeFSType(d.config[\"source\"], \"btrfs\", &mkfsOptions{Label: d.name})\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed to format block device\")\n\t\t}\n\n\t\t\/\/ Record the UUID as the source.\n\t\tdevUUID, err := fsUUID(d.config[\"source\"])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Confirm that the symlink is appearing (give it 10s).\n\t\tif tryExists(fmt.Sprintf(\"\/dev\/disk\/by-uuid\/%s\", devUUID)) {\n\t\t\t\/\/ Override the config to use the UUID.\n\t\t\td.config[\"source\"] = devUUID\n\t\t}\n\t} else if d.config[\"source\"] != \"\" {\n\t\thostPath := shared.HostPath(d.config[\"source\"])\n\t\tif d.isSubvolume(hostPath) {\n\t\t\t\/\/ Existing btrfs subvolume.\n\t\t\tsubvols, err := d.getSubvolumes(hostPath)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"Could not determine if existing btrfs subvolume is empty\")\n\t\t\t}\n\n\t\t\t\/\/ Check that the provided subvolume is empty.\n\t\t\tif len(subvols) > 0 {\n\t\t\t\treturn fmt.Errorf(\"Requested btrfs subvolume exists but is not empty\")\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ New btrfs subvolume on existing btrfs filesystem.\n\t\t\tcleanSource := filepath.Clean(hostPath)\n\t\t\tlxdDir := shared.VarPath()\n\n\t\t\tif shared.PathExists(hostPath) && !hasFilesystem(hostPath, util.FilesystemSuperMagicBtrfs) {\n\t\t\t\treturn fmt.Errorf(\"Provided path does not reside on a btrfs filesystem\")\n\t\t\t} else if strings.HasPrefix(cleanSource, lxdDir) {\n\t\t\t\tif cleanSource != GetPoolMountPath(d.name) {\n\t\t\t\t\treturn fmt.Errorf(\"Only allowed source path under %s is %s\", shared.VarPath(), GetPoolMountPath(d.name))\n\t\t\t\t} else if !hasFilesystem(shared.VarPath(\"storage-pools\"), util.FilesystemSuperMagicBtrfs) {\n\t\t\t\t\treturn fmt.Errorf(\"Provided path does not reside on a btrfs filesystem\")\n\t\t\t\t}\n\n\t\t\t\t\/\/ Delete the current directory to replace by subvolume.\n\t\t\t\terr := os.Remove(cleanSource)\n\t\t\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\t\t\treturn errors.Wrapf(err, \"Failed to remove '%s'\", cleanSource)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Create the subvolume.\n\t\t\t_, err := shared.RunCommand(\"btrfs\", \"subvolume\", \"create\", hostPath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"Invalid \\\"source\\\" property\")\n\t}\n\n\treturn nil\n}\n\n\/\/ Delete removes the storage pool from the storage device.\nfunc (d *btrfs) Delete(op *operations.Operation) error {\n\t\/\/ If the user completely destroyed it, call it done.\n\tif !shared.PathExists(GetPoolMountPath(d.name)) {\n\t\treturn nil\n\t}\n\n\t\/\/ Delete potential intermediate btrfs subvolumes.\n\tfor _, volType := range d.Info().VolumeTypes {\n\t\tfor _, dir := range BaseDirectories[volType] {\n\t\t\tpath := filepath.Join(GetPoolMountPath(d.name), dir)\n\t\t\tif !shared.PathExists(path) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !d.isSubvolume(path) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\terr := d.deleteSubvolume(path, true)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Could not delete btrfs subvolume: %s\", path)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ On delete, wipe everything in the directory.\n\terr := wipeDirectory(GetPoolMountPath(d.name))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Unmount the path.\n\t_, err = d.Unmount()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ If the pool path is a subvolume itself, delete it.\n\tif d.isSubvolume(GetPoolMountPath(d.name)) {\n\t\terr := d.deleteSubvolume(GetPoolMountPath(d.name), false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ And re-create as an empty directory to make the backend happy.\n\t\terr = os.Mkdir(GetPoolMountPath(d.name), 0700)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Failed to create directory '%s'\", GetPoolMountPath(d.name))\n\t\t}\n\t}\n\n\t\/\/ Delete any loop file we may have used.\n\tloopPath := loopFilePath(d.name)\n\terr = os.Remove(loopPath)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn errors.Wrapf(err, \"Failed to remove '%s'\", loopPath)\n\t}\n\n\treturn nil\n}\n\n\/\/ Validate checks that all provide keys are supported and that no conflicting or missing configuration is present.\nfunc (d *btrfs) Validate(config map[string]string) error {\n\treturn nil\n}\n\n\/\/ Update applies any driver changes required from a configuration change.\nfunc (d *btrfs) Update(changedConfig map[string]string) error {\n\t\/\/ We only care about btrfs.mount_options.\n\tval, ok := changedConfig[\"btrfs.mount_options\"]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\t\/\/ Trigger a re-mount.\n\td.config[\"btrfs.mount_options\"] = val\n\tmntFlags, mntOptions := resolveMountOptions(d.getMountOptions())\n\tmntFlags |= unix.MS_REMOUNT\n\n\terr := TryMount(\"\", GetPoolMountPath(d.name), \"none\", mntFlags, mntOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Mount mounts the storage pool.\nfunc (d *btrfs) Mount() (bool, error) {\n\t\/\/ Check if already mounted.\n\tif shared.IsMountPoint(GetPoolMountPath(d.name)) {\n\t\treturn false, nil\n\t}\n\n\t\/\/ Setup mount options.\n\tloopPath := loopFilePath(d.name)\n\tmntSrc := \"\"\n\tmntDst := GetPoolMountPath(d.name)\n\tmntFilesystem := \"btrfs\"\n\tif d.config[\"source\"] == loopPath {\n\t\t\/\/ Bring up the loop device.\n\t\tloopF, err := PrepareLoopDev(d.config[\"source\"], LoFlagsAutoclear)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tdefer loopF.Close()\n\n\t\tmntSrc = loopF.Name()\n\t} else if filepath.IsAbs(d.config[\"source\"]) {\n\t\t\/\/ Bring up an existing device or path.\n\t\tmntSrc = shared.HostPath(d.config[\"source\"])\n\n\t\tif !shared.IsBlockdevPath(mntSrc) {\n\t\t\tmntFilesystem = \"none\"\n\n\t\t\tif !hasFilesystem(mntSrc, util.FilesystemSuperMagicBtrfs) {\n\t\t\t\treturn false, fmt.Errorf(\"Source path '%s' isn't btrfs\", mntSrc)\n\t\t\t}\n\t\t}\n\t} else {\n\t\t\/\/ Mount using UUID.\n\t\tmntSrc = fmt.Sprintf(\"\/dev\/disk\/by-uuid\/%s\", d.config[\"source\"])\n\t}\n\n\t\/\/ Get the custom mount flags\/options.\n\tmntFlags, mntOptions := resolveMountOptions(d.getMountOptions())\n\n\t\/\/ Handle bind-mounts first.\n\tif mntFilesystem == \"none\" {\n\t\t\/\/ Setup the bind-mount itself.\n\t\terr := TryMount(mntSrc, mntDst, mntFilesystem, unix.MS_BIND, \"\")\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\t\/\/ Now apply the custom options.\n\t\tmntFlags |= unix.MS_REMOUNT\n\t\terr = TryMount(\"\", mntDst, mntFilesystem, mntFlags, mntOptions)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\treturn true, nil\n\t}\n\n\t\/\/ Handle traditional mounts.\n\terr := TryMount(mntSrc, mntDst, mntFilesystem, mntFlags, mntOptions)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\n\/\/ Unmount unmounts the storage pool.\nfunc (d *btrfs) Unmount() (bool, error) {\n\t\/\/ Unmount the pool.\n\tourUnmount, err := forceUnmount(GetPoolMountPath(d.name))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t\/\/ If loop backed, force release the loop device.\n\tloopPath := loopFilePath(d.name)\n\tif d.config[\"source\"] == loopPath {\n\t\treleaseLoopDev(loopPath)\n\t}\n\n\treturn ourUnmount, nil\n}\n\n\/\/ GetResources returns the pool resource usage information.\nfunc (d *btrfs) GetResources() (*api.ResourcesStoragePool, error) {\n\treturn d.vfsGetResources()\n}\n\n\/\/ MigrationType returns the type of transfer methods to be used when doing migrations between pools in preference order.\nfunc (d *btrfs) MigrationTypes(contentType ContentType, refresh bool) []migration.Type {\n\tif contentType != ContentTypeFS {\n\t\treturn nil\n\t}\n\n\t\/\/ Only use rsync for refreshes and if running in an unprivileged container.\n\tif refresh || d.state.OS.RunningInUserNS {\n\t\treturn []migration.Type{\n\t\t\t{\n\t\t\t\tFSType: migration.MigrationFSType_RSYNC,\n\t\t\t\tFeatures: []string{\"xattrs\", \"delete\", \"compress\", \"bidirectional\"},\n\t\t\t},\n\t\t}\n\t}\n\n\treturn []migration.Type{\n\t\t{\n\t\t\tFSType: migration.MigrationFSType_BTRFS,\n\t\t},\n\t\t{\n\t\t\tFSType: migration.MigrationFSType_RSYNC,\n\t\t\tFeatures: []string{\"xattrs\", \"delete\", \"compress\", \"bidirectional\"},\n\t\t},\n\t}\n}\nlxd\/storage\/btrfs: Fix usage inside containerspackage drivers\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/sys\/unix\"\n\n\t\"github.com\/lxc\/lxd\/lxd\/migration\"\n\t\"github.com\/lxc\/lxd\/lxd\/operations\"\n\t\"github.com\/lxc\/lxd\/lxd\/util\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\t\"github.com\/lxc\/lxd\/shared\/units\"\n)\n\nvar btrfsVersion string\nvar btrfsLoaded bool\n\ntype btrfs struct {\n\tcommon\n}\n\n\/\/ load is used to run one-time action per-driver rather than per-pool.\nfunc (d *btrfs) load() error {\n\t\/\/ Register the patches.\n\td.patches = map[string]func() error{\n\t\t\"storage_create_vm\": nil,\n\t}\n\n\t\/\/ Done if previously loaded.\n\tif btrfsLoaded {\n\t\treturn nil\n\t}\n\n\t\/\/ Validate the required binaries.\n\tfor _, tool := range []string{\"btrfs\"} {\n\t\t_, err := exec.LookPath(tool)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Required tool '%s' is missing\", tool)\n\t\t}\n\t}\n\n\t\/\/ Detect and record the version.\n\tif btrfsVersion == \"\" {\n\t\tout, err := shared.RunCommand(\"btrfs\", \"version\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcount, err := fmt.Sscanf(strings.SplitN(out, \" \", 2)[1], \"v%s\\n\", &btrfsVersion)\n\t\tif err != nil || count != 1 {\n\t\t\treturn fmt.Errorf(\"The 'btrfs' tool isn't working properly\")\n\t\t}\n\t}\n\n\tbtrfsLoaded = true\n\treturn nil\n}\n\n\/\/ Info returns info about the driver and its environment.\nfunc (d *btrfs) Info() Info {\n\treturn Info{\n\t\tName: \"btrfs\",\n\t\tVersion: btrfsVersion,\n\t\tOptimizedImages: true,\n\t\tPreservesInodes: !d.state.OS.RunningInUserNS,\n\t\tRemote: false,\n\t\tVolumeTypes: []VolumeType{VolumeTypeCustom, VolumeTypeImage, VolumeTypeContainer, VolumeTypeVM},\n\t\tBlockBacking: false,\n\t\tRunningQuotaResize: true,\n\t\tRunningSnapshotFreeze: false,\n\t}\n}\n\n\/\/ Create is called during pool creation and is effectively using an empty driver struct.\n\/\/ WARNING: The Create() function cannot rely on any of the struct attributes being set.\nfunc (d *btrfs) Create() error {\n\t\/\/ Store the provided source as we are likely to be mangling it.\n\td.config[\"volatile.initial_source\"] = d.config[\"source\"]\n\n\tloopPath := loopFilePath(d.name)\n\tif d.config[\"source\"] == \"\" || d.config[\"source\"] == loopPath {\n\t\t\/\/ Create a loop based pool.\n\t\td.config[\"source\"] = loopPath\n\n\t\t\/\/ Create the loop file itself.\n\t\tsize, err := units.ParseByteSizeString(d.config[\"size\"])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = createSparseFile(d.config[\"source\"], size)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed to create the sparse file\")\n\t\t}\n\n\t\t\/\/ Format the file.\n\t\t_, err = makeFSType(d.config[\"source\"], \"btrfs\", &mkfsOptions{Label: d.name})\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed to format sparse file\")\n\t\t}\n\t} else if shared.IsBlockdevPath(d.config[\"source\"]) {\n\t\t\/\/ Format the block device.\n\t\t_, err := makeFSType(d.config[\"source\"], \"btrfs\", &mkfsOptions{Label: d.name})\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed to format block device\")\n\t\t}\n\n\t\t\/\/ Record the UUID as the source.\n\t\tdevUUID, err := fsUUID(d.config[\"source\"])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Confirm that the symlink is appearing (give it 10s).\n\t\tif tryExists(fmt.Sprintf(\"\/dev\/disk\/by-uuid\/%s\", devUUID)) {\n\t\t\t\/\/ Override the config to use the UUID.\n\t\t\td.config[\"source\"] = devUUID\n\t\t}\n\t} else if d.config[\"source\"] != \"\" {\n\t\thostPath := shared.HostPath(d.config[\"source\"])\n\t\tif d.isSubvolume(hostPath) {\n\t\t\t\/\/ Existing btrfs subvolume.\n\t\t\tsubvols, err := d.getSubvolumes(hostPath)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"Could not determine if existing btrfs subvolume is empty\")\n\t\t\t}\n\n\t\t\t\/\/ Check that the provided subvolume is empty.\n\t\t\tif len(subvols) > 0 {\n\t\t\t\treturn fmt.Errorf(\"Requested btrfs subvolume exists but is not empty\")\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ New btrfs subvolume on existing btrfs filesystem.\n\t\t\tcleanSource := filepath.Clean(hostPath)\n\t\t\tlxdDir := shared.VarPath()\n\n\t\t\tif shared.PathExists(hostPath) && !hasFilesystem(hostPath, util.FilesystemSuperMagicBtrfs) {\n\t\t\t\treturn fmt.Errorf(\"Provided path does not reside on a btrfs filesystem\")\n\t\t\t} else if strings.HasPrefix(cleanSource, lxdDir) {\n\t\t\t\tif cleanSource != GetPoolMountPath(d.name) {\n\t\t\t\t\treturn fmt.Errorf(\"Only allowed source path under %s is %s\", shared.VarPath(), GetPoolMountPath(d.name))\n\t\t\t\t} else if !hasFilesystem(shared.VarPath(\"storage-pools\"), util.FilesystemSuperMagicBtrfs) {\n\t\t\t\t\treturn fmt.Errorf(\"Provided path does not reside on a btrfs filesystem\")\n\t\t\t\t}\n\n\t\t\t\t\/\/ Delete the current directory to replace by subvolume.\n\t\t\t\terr := os.Remove(cleanSource)\n\t\t\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\t\t\treturn errors.Wrapf(err, \"Failed to remove '%s'\", cleanSource)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Create the subvolume.\n\t\t\t_, err := shared.RunCommand(\"btrfs\", \"subvolume\", \"create\", hostPath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"Invalid \\\"source\\\" property\")\n\t}\n\n\treturn nil\n}\n\n\/\/ Delete removes the storage pool from the storage device.\nfunc (d *btrfs) Delete(op *operations.Operation) error {\n\t\/\/ If the user completely destroyed it, call it done.\n\tif !shared.PathExists(GetPoolMountPath(d.name)) {\n\t\treturn nil\n\t}\n\n\t\/\/ Delete potential intermediate btrfs subvolumes.\n\tfor _, volType := range d.Info().VolumeTypes {\n\t\tfor _, dir := range BaseDirectories[volType] {\n\t\t\tpath := filepath.Join(GetPoolMountPath(d.name), dir)\n\t\t\tif !shared.PathExists(path) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !d.isSubvolume(path) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\terr := d.deleteSubvolume(path, true)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Could not delete btrfs subvolume: %s\", path)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ On delete, wipe everything in the directory.\n\terr := wipeDirectory(GetPoolMountPath(d.name))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Unmount the path.\n\t_, err = d.Unmount()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ If the pool path is a subvolume itself, delete it.\n\tif d.isSubvolume(GetPoolMountPath(d.name)) {\n\t\terr := d.deleteSubvolume(GetPoolMountPath(d.name), false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ And re-create as an empty directory to make the backend happy.\n\t\terr = os.Mkdir(GetPoolMountPath(d.name), 0700)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Failed to create directory '%s'\", GetPoolMountPath(d.name))\n\t\t}\n\t}\n\n\t\/\/ Delete any loop file we may have used.\n\tloopPath := loopFilePath(d.name)\n\terr = os.Remove(loopPath)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn errors.Wrapf(err, \"Failed to remove '%s'\", loopPath)\n\t}\n\n\treturn nil\n}\n\n\/\/ Validate checks that all provide keys are supported and that no conflicting or missing configuration is present.\nfunc (d *btrfs) Validate(config map[string]string) error {\n\treturn nil\n}\n\n\/\/ Update applies any driver changes required from a configuration change.\nfunc (d *btrfs) Update(changedConfig map[string]string) error {\n\t\/\/ We only care about btrfs.mount_options.\n\tval, ok := changedConfig[\"btrfs.mount_options\"]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\t\/\/ Custom mount options don't work inside containers\n\tif d.state.OS.RunningInUserNS {\n\t\treturn nil\n\t}\n\n\t\/\/ Trigger a re-mount.\n\td.config[\"btrfs.mount_options\"] = val\n\tmntFlags, mntOptions := resolveMountOptions(d.getMountOptions())\n\tmntFlags |= unix.MS_REMOUNT\n\n\terr := TryMount(\"\", GetPoolMountPath(d.name), \"none\", mntFlags, mntOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Mount mounts the storage pool.\nfunc (d *btrfs) Mount() (bool, error) {\n\t\/\/ Check if already mounted.\n\tif shared.IsMountPoint(GetPoolMountPath(d.name)) {\n\t\treturn false, nil\n\t}\n\n\t\/\/ Setup mount options.\n\tloopPath := loopFilePath(d.name)\n\tmntSrc := \"\"\n\tmntDst := GetPoolMountPath(d.name)\n\tmntFilesystem := \"btrfs\"\n\tif d.config[\"source\"] == loopPath {\n\t\t\/\/ Bring up the loop device.\n\t\tloopF, err := PrepareLoopDev(d.config[\"source\"], LoFlagsAutoclear)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tdefer loopF.Close()\n\n\t\tmntSrc = loopF.Name()\n\t} else if filepath.IsAbs(d.config[\"source\"]) {\n\t\t\/\/ Bring up an existing device or path.\n\t\tmntSrc = shared.HostPath(d.config[\"source\"])\n\n\t\tif !shared.IsBlockdevPath(mntSrc) {\n\t\t\tmntFilesystem = \"none\"\n\n\t\t\tif !hasFilesystem(mntSrc, util.FilesystemSuperMagicBtrfs) {\n\t\t\t\treturn false, fmt.Errorf(\"Source path '%s' isn't btrfs\", mntSrc)\n\t\t\t}\n\t\t}\n\t} else {\n\t\t\/\/ Mount using UUID.\n\t\tmntSrc = fmt.Sprintf(\"\/dev\/disk\/by-uuid\/%s\", d.config[\"source\"])\n\t}\n\n\t\/\/ Get the custom mount flags\/options.\n\tmntFlags, mntOptions := resolveMountOptions(d.getMountOptions())\n\n\t\/\/ Handle bind-mounts first.\n\tif mntFilesystem == \"none\" {\n\t\t\/\/ Setup the bind-mount itself.\n\t\terr := TryMount(mntSrc, mntDst, mntFilesystem, unix.MS_BIND, \"\")\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\t\/\/ Custom mount options don't work inside containers\n\t\tif !d.state.OS.RunningInUserNS {\n\t\t\treturn true, nil\n\t\t}\n\n\t\t\/\/ Now apply the custom options.\n\t\tmntFlags |= unix.MS_REMOUNT\n\t\terr = TryMount(\"\", mntDst, mntFilesystem, mntFlags, mntOptions)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\treturn true, nil\n\t}\n\n\t\/\/ Handle traditional mounts.\n\terr := TryMount(mntSrc, mntDst, mntFilesystem, mntFlags, mntOptions)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\n\/\/ Unmount unmounts the storage pool.\nfunc (d *btrfs) Unmount() (bool, error) {\n\t\/\/ Unmount the pool.\n\tourUnmount, err := forceUnmount(GetPoolMountPath(d.name))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t\/\/ If loop backed, force release the loop device.\n\tloopPath := loopFilePath(d.name)\n\tif d.config[\"source\"] == loopPath {\n\t\treleaseLoopDev(loopPath)\n\t}\n\n\treturn ourUnmount, nil\n}\n\n\/\/ GetResources returns the pool resource usage information.\nfunc (d *btrfs) GetResources() (*api.ResourcesStoragePool, error) {\n\treturn d.vfsGetResources()\n}\n\n\/\/ MigrationType returns the type of transfer methods to be used when doing migrations between pools in preference order.\nfunc (d *btrfs) MigrationTypes(contentType ContentType, refresh bool) []migration.Type {\n\tif contentType != ContentTypeFS {\n\t\treturn nil\n\t}\n\n\t\/\/ Only use rsync for refreshes and if running in an unprivileged container.\n\tif refresh || d.state.OS.RunningInUserNS {\n\t\treturn []migration.Type{\n\t\t\t{\n\t\t\t\tFSType: migration.MigrationFSType_RSYNC,\n\t\t\t\tFeatures: []string{\"xattrs\", \"delete\", \"compress\", \"bidirectional\"},\n\t\t\t},\n\t\t}\n\t}\n\n\treturn []migration.Type{\n\t\t{\n\t\t\tFSType: migration.MigrationFSType_BTRFS,\n\t\t},\n\t\t{\n\t\t\tFSType: migration.MigrationFSType_RSYNC,\n\t\t\tFeatures: []string{\"xattrs\", \"delete\", \"compress\", \"bidirectional\"},\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"package command\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"sync\"\n)\n\ntype CommandsInfo struct {\n\tPort int\n}\n\ntype CommandInfo map[string]*struct {\n\tToken string\n}\n\ntype CommandRuntimeInfo struct {\n\tToken string\n\tHandler interface{}\n}\n\ntype CommandServer struct {\n\tCommon CommandsInfo\n\tCommand CommandInfo\n\tHandlers map[string]*CommandRuntimeInfo\n}\n\nfunc NewServer(commands CommandsInfo, command CommandInfo) *CommandServer {\n\tserver := &CommandServer{commands, command, map[string]*CommandRuntimeInfo{}}\n\n\tfor k, v := range command {\n\t\tserver.Handlers[k] = &CommandRuntimeInfo{v.Token, nil}\n\t}\n\n\tserver.registHandler(\"\/echo\", EchoCommand)\n\tserver.registHandler(\"\/namu\", NamuCommand)\n\tserver.registHandler(\"\/zzal\", ZzalCommand)\n\n\treturn server\n}\n\nfunc (server *CommandServer) registHandler(key string, handler interface{}) {\n\tif val, ok := server.Handlers[key]; ok {\n\t\tval.Handler = handler\n\t} else {\n\t\tlog.Println(\"Warning : config not found for \", key)\n\t\tserver.Handlers[key] = &CommandRuntimeInfo{\"\", handler}\n\t}\n}\n\nfunc requestFormToRequestObj(r *http.Request) *Request {\n\tret := new(Request)\n\n\tval := reflect.Indirect(reflect.ValueOf(ret))\n\ttyp := reflect.TypeOf(*ret)\n\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tfield := val.Field(i)\n\t\tfield_info := typ.Field(i)\n\t\tfield_name := field_info.Tag.Get(\"param\")\n\t\tfield.Set(reflect.ValueOf(r.FormValue(field_name)))\n\t}\n\n\treturn ret\n}\n\nfunc (server *CommandServer) commandHandler(w http.ResponseWriter, r *http.Request) {\n\treq := requestFormToRequestObj(r)\n\thandlerInfo := server.Handlers[req.Command]\n\n\tif handlerInfo != nil {\n\t\tif handlerInfo.Token == \"\" || handlerInfo.Token == req.Token {\n\t\t\tfun := reflect.ValueOf(handlerInfo.Handler)\n\t\t\tin := make([]reflect.Value, 1)\n\t\t\tin[0] = reflect.ValueOf(*req)\n\t\t\tresponse := fun.Call(in)[0].Interface().(*Response)\n\n\t\t\tvar e error\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\t\tif response.ResponseType != deffered_in_channel {\n\t\t\t\tencoder := json.NewEncoder(w)\n\t\t\t\te = encoder.Encode(response)\n\t\t\t} else {\n\t\t\t\tvar buf []byte\n\t\t\t\tbuf, e = json.Marshal(response)\n\t\t\t\thttp.Post(req.ResponseUrl, \"application\/json\", bytes.NewBuffer(buf))\n\t\t\t\tlog.Println(\"Deffered : \", string(buf))\n\t\t\t}\n\n\t\t\tif e != nil {\n\t\t\t\tlog.Println(\"Error occured : \", req, e)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (server *CommandServer) Start(wg *sync.WaitGroup) {\n\thttp.HandleFunc(\"\/\", server.commandHandler)\n\thttp.ListenAndServe(fmt.Sprintf(\":%d\", server.Common.Port), nil)\n\n\twg.Done()\n}\nAdd Command handler initializerpackage command\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n)\n\ntype CommandsInfo struct {\n\tPort int\n}\n\ntype CommandInfo map[string]*struct {\n\tToken string\n\tOptions []string\n}\n\ntype CommandRuntimeInfo struct {\n\tToken string\n\tHandler interface{}\n\tOptions map[string]string\n}\n\ntype CommandServer struct {\n\tCommon CommandsInfo\n\tCommand CommandInfo\n\tHandlers map[string]*CommandRuntimeInfo\n}\n\nfunc NewServer(commands CommandsInfo, command CommandInfo) *CommandServer {\n\tserver := &CommandServer{commands, command, map[string]*CommandRuntimeInfo{}}\n\n\tfor k, v := range command {\n\t\tvar parsed_options map[string]string\n\t\tfor _, val := range v.Options {\n\t\t\tvals := strings.Split(val, \":\")\n\t\t\tparsed_options[vals[0]] = vals[1]\n\t\t}\n\t\tserver.Handlers[k] = &CommandRuntimeInfo{v.Token, nil, parsed_options}\n\t}\n\n\tserver.registHandler(\"\/echo\", EchoCommand, nil)\n\tserver.registHandler(\"\/namu\", NamuCommand, nil)\n\tserver.registHandler(\"\/zzal\", ZzalCommand, nil)\n\n\treturn server\n}\n\ntype HandlerInitializer func(*map[string]string)\n\nfunc (server *CommandServer) registHandler(key string, handler interface{}, initializer HandlerInitializer) {\n\tif val, ok := server.Handlers[key]; ok {\n\t\tval.Handler = handler\n\t} else {\n\t\tlog.Println(\"Warning : config not found for \", key)\n\t\tserver.Handlers[key] = &CommandRuntimeInfo{\"\", handler, nil}\n\t}\n\tif initializer != nil {\n\t\tinitializer(&server.Handlers[key].Options)\n\t}\n}\n\nfunc requestFormToRequestObj(r *http.Request) *Request {\n\tret := new(Request)\n\n\tval := reflect.Indirect(reflect.ValueOf(ret))\n\ttyp := reflect.TypeOf(*ret)\n\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tfield := val.Field(i)\n\t\tfield_info := typ.Field(i)\n\t\tfield_name := field_info.Tag.Get(\"param\")\n\t\tfield.Set(reflect.ValueOf(r.FormValue(field_name)))\n\t}\n\n\treturn ret\n}\n\nfunc (server *CommandServer) commandHandler(w http.ResponseWriter, r *http.Request) {\n\treq := requestFormToRequestObj(r)\n\thandlerInfo := server.Handlers[req.Command]\n\n\tif handlerInfo != nil {\n\t\tif handlerInfo.Token == \"\" || handlerInfo.Token == req.Token {\n\t\t\tfun := reflect.ValueOf(handlerInfo.Handler)\n\t\t\tin := make([]reflect.Value, 1)\n\t\t\tin[0] = reflect.ValueOf(*req)\n\t\t\tresponse := fun.Call(in)[0].Interface().(*Response)\n\n\t\t\tvar e error\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\t\tif response.ResponseType != deffered_in_channel {\n\t\t\t\tencoder := json.NewEncoder(w)\n\t\t\t\te = encoder.Encode(response)\n\t\t\t} else {\n\t\t\t\tvar buf []byte\n\t\t\t\tbuf, e = json.Marshal(response)\n\t\t\t\thttp.Post(req.ResponseUrl, \"application\/json\", bytes.NewBuffer(buf))\n\t\t\t\tlog.Println(\"Deffered : \", string(buf))\n\t\t\t}\n\n\t\t\tif e != nil {\n\t\t\t\tlog.Println(\"Error occured : \", req, e)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (server *CommandServer) Start(wg *sync.WaitGroup) {\n\thttp.HandleFunc(\"\/\", server.commandHandler)\n\thttp.ListenAndServe(fmt.Sprintf(\":%d\", server.Common.Port), nil)\n\n\twg.Done()\n}\n<|endoftext|>"} {"text":"package command\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n)\n\ntype deviceData struct {\n\tProjectId uint64 `json:\"project_id\"`\n\tDeviceId string `json:\"device_id,omitempty\"`\n\tDeviceName string `json:\"device_name,omitempty\"`\n\tDeviceType string `json:\"device_type,omitempty\"`\n\tCreated string `json:\"created,omitempty\"`\n\t\/\/ Private fields, not marshalled into JSON\n\tisUpdate bool\n}\n\nfunc (d *deviceData) IsValid() bool {\n\tif d.isUpdate {\n\t\treturn len(d.DeviceId) > 0 &&\n\t\t\t(len(d.DeviceName) > 0 || len(d.DeviceType) > 0)\n\t}\n\treturn d.ProjectId != 0\n}\n\n\/\/ deviceId is a simpler struct for calls that just consist of a device id\n\/\/ and optionally projectId\ntype deviceId struct {\n\tid string\n\tprojectId uint64\n}\n\nfunc (d *deviceId) IsValid() bool {\n\treturn len(d.id) > 0\n}\n\n\/\/ NewDevicesCommand returns the base 'device' command.\nfunc NewDevicesCommand(ctx *Context) *Command {\n\tcmd := &Command{\n\t\tName: \"device\",\n\t\tUsage: \"Commands for managing devices.\",\n\t\tSubCommands: Mux{\n\t\t\t\"create\": newCreateDeviceCmd(ctx),\n\t\t\t\"delete\": newDeleteDeviceCmd(ctx),\n\t\t\t\"get\": newGetDeviceCmd(ctx),\n\t\t\t\"list\": newListDevicesCmd(ctx),\n\t\t\t\"update\": newUpdateDeviceCmd(ctx),\n\t\t},\n\t}\n\tcmd.NewFlagSet(\"iobeam device\")\n\n\treturn cmd\n}\n\nfunc newCreateOrUpdateDeviceCmd(ctx *Context, update bool, name string, action Action) *Command {\n\tdevice := deviceData{\n\t\tisUpdate: update,\n\t}\n\n\tcmd := &Command{\n\t\tName: name,\n\t\tApiPath: \"\/v1\/devices\",\n\t\tUsage: name + \" device\",\n\t\tData: &device,\n\t\tAction: action,\n\t}\n\tflags := cmd.NewFlagSet(\"iobeam device \" + name)\n\tvar idDesc string\n\tif update {\n\t\tidDesc = \"ID of the device to be updated\"\n\t} else {\n\t\tidDesc = \"Device ID, if omitted a random one will be assigned (must be > 16 chars)\"\n\t}\n\tflags.StringVar(&device.DeviceId, \"id\", \"\", idDesc)\n\tflags.StringVar(&device.DeviceName, \"name\", \"\", \"The device name\")\n\tflags.StringVar(&device.DeviceType, \"type\", \"\", \"The type of device\")\n\tflags.Uint64Var(&device.ProjectId, \"projectId\", ctx.Profile.ActiveProject, \"Project ID associated with the device (if omitted, defaults to active project).\")\n\n\treturn cmd\n}\n\nfunc newCreateDeviceCmd(ctx *Context) *Command {\n\treturn newCreateOrUpdateDeviceCmd(ctx, false, \"create\", createDevice)\n}\n\nfunc newUpdateDeviceCmd(ctx *Context) *Command {\n\treturn newCreateOrUpdateDeviceCmd(ctx, true, \"update\", updateDevice)\n}\n\nfunc createDevice(c *Command, ctx *Context) error {\n\tdata := c.Data.(*deviceData)\n\t_, err := ctx.Client.\n\t\tPost(c.ApiPath).\n\t\tExpect(201).\n\t\tProjectToken(ctx.Profile, data.ProjectId).\n\t\tBody(data).\n\t\tResponseBody(c.Data).\n\t\tResponseBodyHandler(func(body interface{}) error {\n\n\t\tdevice := body.(*deviceData)\n\t\tfmt.Println(\"New device created.\")\n\t\tfmt.Printf(\"Device ID: %v\\n\", device.DeviceId)\n\t\tfmt.Printf(\"Device Name: %v\\n\", device.DeviceName)\n\t\tfmt.Println()\n\n\t\treturn nil\n\t}).Execute()\n\n\treturn err\n}\n\nfunc updateDevice(c *Command, ctx *Context) error {\n\n\tdevice := c.Data.(*deviceData)\n\n\trsp, err := ctx.Client.\n\t\tPatch(c.ApiPath+\"\/\"+device.DeviceId).\n\t\tExpect(200).\n\t\tProjectToken(ctx.Profile, device.ProjectId).\n\t\tBody(c.Data).\n\t\tExecute()\n\n\tif err == nil {\n\t\tfmt.Println(\"Device successfully updated\")\n\t} else if rsp.Http().StatusCode == 204 {\n\t\tfmt.Println(\"Device not modified\")\n\t\treturn nil\n\t}\n\n\treturn err\n}\n\nfunc newGetDeviceCmd(ctx *Context) *Command {\n\tdata := new(deviceId)\n\n\tcmd := &Command{\n\t\tName: \"get\",\n\t\tApiPath: \"\/v1\/devices\",\n\t\tUsage: \"get device information\",\n\t\tData: data,\n\t\tAction: getDevice,\n\t}\n\tflags := cmd.NewFlagSet(\"iobeam device get\")\n\tflags.StringVar(&data.id, \"id\", \"\", \"Device ID to query (REQUIRED)\")\n\tflags.Uint64Var(&data.projectId, \"projectId\", ctx.Profile.ActiveProject,\n\t\t\"Project ID to get devices from (if omitted, defaults to active project)\")\n\n\treturn cmd\n}\n\nfunc getDevice(c *Command, ctx *Context) error {\n\tdata := c.Data.(*deviceId)\n\tpath := c.ApiPath + \"\/\" + data.id\n\n\tdevice := new(deviceData)\n\t_, err := ctx.Client.\n\t\tGet(path).\n\t\tExpect(200).\n\t\tProjectToken(ctx.Profile, data.projectId).\n\t\tResponseBody(device).\n\t\tResponseBodyHandler(func(body interface{}) error {\n\t\tdevice = body.(*deviceData)\n\t\tfmt.Printf(\"Device name: %v\\n\"+\n\t\t\t\"Device ID: %v\\n\"+\n\t\t\t\"Project ID: %v\\n\"+\n\t\t\t\"Type: %v\\n\"+\n\t\t\t\"Created: %v\\n\",\n\t\t\tdevice.DeviceName,\n\t\t\tdevice.DeviceId,\n\t\t\tdevice.ProjectId,\n\t\t\tdevice.DeviceType,\n\t\t\tdevice.Created)\n\n\t\treturn nil\n\t}).Execute()\n\n\treturn err\n}\n\nconst (\n\torderName = \"name\"\n\torderNameReverse = \"name-r\"\n\torderId = \"id\"\n\torderIdReverse = \"id-r\"\n\torderDate = \"date\"\n\torderDateReverse = \"date-r\"\n)\n\nvar orders = []string{orderName, orderNameReverse, orderId, orderIdReverse,\n\torderDate, orderDateReverse}\n\ntype listData struct {\n\tprojectId uint64\n\torder string\n}\n\nfunc (d *listData) IsValid() bool {\n\tpidOk := d.projectId > 0\n\torderOk := isInList(d.order, orders)\n\treturn pidOk && orderOk\n}\n\nfunc newListDevicesCmd(ctx *Context) *Command {\n\tdata := new(listData)\n\n\tcmd := &Command{\n\t\tName: \"list\",\n\t\tApiPath: \"\/v1\/devices\",\n\t\tUsage: \"List devices for a given project.\",\n\t\tData: data,\n\t\tAction: listDevices,\n\t}\n\tflags := cmd.NewFlagSet(\"iobeam device list\")\n\tflags.Uint64Var(&data.projectId, \"projectId\", ctx.Profile.ActiveProject,\n\t\t\"Project ID to get devices from (if omitted, defaults to active project)\")\n\tflags.StringVar(&data.order, \"order\", orderDate,\n\t\t\"Sort order for results. Valid values: date(-r), id(-r), name(-r). Values ending with -r are reverse ordering.\")\n\n\treturn cmd\n}\n\ntype deviceSort struct {\n\titems []deviceData\n\torder string\n}\n\nfunc (a deviceSort) Len() int { return len(a.items) }\nfunc (a deviceSort) Swap(i, j int) { a.items[i], a.items[j] = a.items[j], a.items[i] }\nfunc (a deviceSort) Less(i, j int) bool {\n\tswitch a.order {\n\tcase \"name\":\n\t\treturn a.items[i].DeviceName < a.items[j].DeviceName\n\tcase \"name-r\":\n\t\treturn a.items[j].DeviceName < a.items[i].DeviceName\n\tcase \"id\":\n\t\treturn a.items[i].DeviceId < a.items[j].DeviceId\n\tcase \"id-r\":\n\t\treturn a.items[i].DeviceId < a.items[j].DeviceId\n\tcase \"date-r\":\n\t\treturn a.items[j].Created < a.items[i].Created\n\tcase \"date\":\n\t\tfallthrough\n\tdefault:\n\t\treturn a.items[i].Created < a.items[j].Created\n\t}\n\treturn false\n}\n\nfunc listDevices(c *Command, ctx *Context) error {\n\ttype deviceList struct {\n\t\tDevices []deviceData\n\t}\n\n\tcmdArgs := c.Data.(*listData)\n\tpid := cmdArgs.projectId\n\n\t_, err := ctx.Client.\n\t\tGet(c.ApiPath).\n\t\tParamUint64(\"project_id\", pid).\n\t\tExpect(200).\n\t\tProjectToken(ctx.Profile, pid).\n\t\tResponseBody(new(deviceList)).\n\t\tResponseBodyHandler(func(body interface{}) error {\n\n\t\tlist := body.(*deviceList)\n\n\t\tfmt.Printf(\"Devices in project %v\\n\", pid)\n\t\tfmt.Println(\"-----\")\n\n\t\tsorted := &deviceSort{items: list.Devices, order: cmdArgs.order}\n\t\tsort.Sort(sorted)\n\t\tfor _, device := range sorted.items {\n\n\t\t\tfmt.Printf(\"Name: %v\\n\"+\n\t\t\t\t\"Device ID: %v\\n\"+\n\t\t\t\t\"Type: %v\\n\"+\n\t\t\t\t\"Created: %v\\n\\n\",\n\t\t\t\tdevice.DeviceName,\n\t\t\t\tdevice.DeviceId,\n\t\t\t\tdevice.DeviceType,\n\t\t\t\tdevice.Created)\n\t\t}\n\n\t\treturn nil\n\t}).Execute()\n\n\treturn err\n}\n\nfunc newDeleteDeviceCmd(ctx *Context) *Command {\n\tdata := new(deviceId)\n\n\tcmd := &Command{\n\t\tName: \"delete\",\n\t\tApiPath: \"\/v1\/devices\",\n\t\tUsage: \"delete device\",\n\t\tData: data,\n\t\tAction: deleteDevice,\n\t}\n\tflags := cmd.NewFlagSet(\"iobeam device delete\")\n\tflags.StringVar(&data.id, \"id\", \"\", \"The ID of the device to delete (REQUIRED)\")\n\tflags.Uint64Var(&data.projectId, \"projectId\", ctx.Profile.ActiveProject, \"The ID of the project the device belongs to (defaults to active project)\")\n\n\treturn cmd\n}\n\nfunc deleteDevice(c *Command, ctx *Context) error {\n\tdata := c.Data.(*deviceId)\n\tpath := c.ApiPath + \"\/\" + data.id\n\t_, err := ctx.Client.\n\t\tDelete(path).\n\t\tExpect(204).\n\t\tProjectToken(ctx.Profile, data.projectId).\n\t\tExecute()\n\n\tif err == nil {\n\t\tfmt.Println(\"Device successfully deleted\")\n\t}\n\n\treturn err\n}\nUse constants for device sort matchingpackage command\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n)\n\ntype deviceData struct {\n\tProjectId uint64 `json:\"project_id\"`\n\tDeviceId string `json:\"device_id,omitempty\"`\n\tDeviceName string `json:\"device_name,omitempty\"`\n\tDeviceType string `json:\"device_type,omitempty\"`\n\tCreated string `json:\"created,omitempty\"`\n\t\/\/ Private fields, not marshalled into JSON\n\tisUpdate bool\n}\n\nfunc (d *deviceData) IsValid() bool {\n\tif d.isUpdate {\n\t\treturn len(d.DeviceId) > 0 &&\n\t\t\t(len(d.DeviceName) > 0 || len(d.DeviceType) > 0)\n\t}\n\treturn d.ProjectId != 0\n}\n\n\/\/ deviceId is a simpler struct for calls that just consist of a device id\n\/\/ and optionally projectId\ntype deviceId struct {\n\tid string\n\tprojectId uint64\n}\n\nfunc (d *deviceId) IsValid() bool {\n\treturn len(d.id) > 0\n}\n\n\/\/ NewDevicesCommand returns the base 'device' command.\nfunc NewDevicesCommand(ctx *Context) *Command {\n\tcmd := &Command{\n\t\tName: \"device\",\n\t\tUsage: \"Commands for managing devices.\",\n\t\tSubCommands: Mux{\n\t\t\t\"create\": newCreateDeviceCmd(ctx),\n\t\t\t\"delete\": newDeleteDeviceCmd(ctx),\n\t\t\t\"get\": newGetDeviceCmd(ctx),\n\t\t\t\"list\": newListDevicesCmd(ctx),\n\t\t\t\"update\": newUpdateDeviceCmd(ctx),\n\t\t},\n\t}\n\tcmd.NewFlagSet(\"iobeam device\")\n\n\treturn cmd\n}\n\nfunc newCreateOrUpdateDeviceCmd(ctx *Context, update bool, name string, action Action) *Command {\n\tdevice := deviceData{\n\t\tisUpdate: update,\n\t}\n\n\tcmd := &Command{\n\t\tName: name,\n\t\tApiPath: \"\/v1\/devices\",\n\t\tUsage: name + \" device\",\n\t\tData: &device,\n\t\tAction: action,\n\t}\n\tflags := cmd.NewFlagSet(\"iobeam device \" + name)\n\tvar idDesc string\n\tif update {\n\t\tidDesc = \"ID of the device to be updated\"\n\t} else {\n\t\tidDesc = \"Device ID, if omitted a random one will be assigned (must be > 16 chars)\"\n\t}\n\tflags.StringVar(&device.DeviceId, \"id\", \"\", idDesc)\n\tflags.StringVar(&device.DeviceName, \"name\", \"\", \"The device name\")\n\tflags.StringVar(&device.DeviceType, \"type\", \"\", \"The type of device\")\n\tflags.Uint64Var(&device.ProjectId, \"projectId\", ctx.Profile.ActiveProject, \"Project ID associated with the device (if omitted, defaults to active project).\")\n\n\treturn cmd\n}\n\nfunc newCreateDeviceCmd(ctx *Context) *Command {\n\treturn newCreateOrUpdateDeviceCmd(ctx, false, \"create\", createDevice)\n}\n\nfunc newUpdateDeviceCmd(ctx *Context) *Command {\n\treturn newCreateOrUpdateDeviceCmd(ctx, true, \"update\", updateDevice)\n}\n\nfunc createDevice(c *Command, ctx *Context) error {\n\tdata := c.Data.(*deviceData)\n\t_, err := ctx.Client.\n\t\tPost(c.ApiPath).\n\t\tExpect(201).\n\t\tProjectToken(ctx.Profile, data.ProjectId).\n\t\tBody(data).\n\t\tResponseBody(c.Data).\n\t\tResponseBodyHandler(func(body interface{}) error {\n\n\t\tdevice := body.(*deviceData)\n\t\tfmt.Println(\"New device created.\")\n\t\tfmt.Printf(\"Device ID: %v\\n\", device.DeviceId)\n\t\tfmt.Printf(\"Device Name: %v\\n\", device.DeviceName)\n\t\tfmt.Println()\n\n\t\treturn nil\n\t}).Execute()\n\n\treturn err\n}\n\nfunc updateDevice(c *Command, ctx *Context) error {\n\n\tdevice := c.Data.(*deviceData)\n\n\trsp, err := ctx.Client.\n\t\tPatch(c.ApiPath+\"\/\"+device.DeviceId).\n\t\tExpect(200).\n\t\tProjectToken(ctx.Profile, device.ProjectId).\n\t\tBody(c.Data).\n\t\tExecute()\n\n\tif err == nil {\n\t\tfmt.Println(\"Device successfully updated\")\n\t} else if rsp.Http().StatusCode == 204 {\n\t\tfmt.Println(\"Device not modified\")\n\t\treturn nil\n\t}\n\n\treturn err\n}\n\nfunc newGetDeviceCmd(ctx *Context) *Command {\n\tdata := new(deviceId)\n\n\tcmd := &Command{\n\t\tName: \"get\",\n\t\tApiPath: \"\/v1\/devices\",\n\t\tUsage: \"get device information\",\n\t\tData: data,\n\t\tAction: getDevice,\n\t}\n\tflags := cmd.NewFlagSet(\"iobeam device get\")\n\tflags.StringVar(&data.id, \"id\", \"\", \"Device ID to query (REQUIRED)\")\n\tflags.Uint64Var(&data.projectId, \"projectId\", ctx.Profile.ActiveProject,\n\t\t\"Project ID to get devices from (if omitted, defaults to active project)\")\n\n\treturn cmd\n}\n\nfunc getDevice(c *Command, ctx *Context) error {\n\tdata := c.Data.(*deviceId)\n\tpath := c.ApiPath + \"\/\" + data.id\n\n\tdevice := new(deviceData)\n\t_, err := ctx.Client.\n\t\tGet(path).\n\t\tExpect(200).\n\t\tProjectToken(ctx.Profile, data.projectId).\n\t\tResponseBody(device).\n\t\tResponseBodyHandler(func(body interface{}) error {\n\t\tdevice = body.(*deviceData)\n\t\tfmt.Printf(\"Device name: %v\\n\"+\n\t\t\t\"Device ID: %v\\n\"+\n\t\t\t\"Project ID: %v\\n\"+\n\t\t\t\"Type: %v\\n\"+\n\t\t\t\"Created: %v\\n\",\n\t\t\tdevice.DeviceName,\n\t\t\tdevice.DeviceId,\n\t\t\tdevice.ProjectId,\n\t\t\tdevice.DeviceType,\n\t\t\tdevice.Created)\n\n\t\treturn nil\n\t}).Execute()\n\n\treturn err\n}\n\nconst (\n\torderName = \"name\"\n\torderNameReverse = orderName + \"-r\"\n\torderId = \"id\"\n\torderIdReverse = orderId + \"-r\"\n\torderDate = \"date\"\n\torderDateReverse = orderDate + \"-r\"\n)\n\nvar orders = []string{orderName, orderNameReverse, orderId, orderIdReverse,\n\torderDate, orderDateReverse}\n\ntype listData struct {\n\tprojectId uint64\n\torder string\n}\n\nfunc (d *listData) IsValid() bool {\n\tpidOk := d.projectId > 0\n\torderOk := isInList(d.order, orders)\n\treturn pidOk && orderOk\n}\n\nfunc newListDevicesCmd(ctx *Context) *Command {\n\tdata := new(listData)\n\n\tcmd := &Command{\n\t\tName: \"list\",\n\t\tApiPath: \"\/v1\/devices\",\n\t\tUsage: \"List devices for a given project.\",\n\t\tData: data,\n\t\tAction: listDevices,\n\t}\n\tflags := cmd.NewFlagSet(\"iobeam device list\")\n\tflags.Uint64Var(&data.projectId, \"projectId\", ctx.Profile.ActiveProject,\n\t\t\"Project ID to get devices from (if omitted, defaults to active project)\")\n\tflags.StringVar(&data.order, \"order\", orderDate,\n\t\t\"Sort order for results. Valid values: date(-r), id(-r), name(-r). Values ending with -r are reverse ordering.\")\n\n\treturn cmd\n}\n\ntype deviceSort struct {\n\titems []deviceData\n\torder string\n}\n\nfunc (a deviceSort) Len() int { return len(a.items) }\nfunc (a deviceSort) Swap(i, j int) { a.items[i], a.items[j] = a.items[j], a.items[i] }\nfunc (a deviceSort) Less(i, j int) bool {\n\tswitch a.order {\n\tcase orderName:\n\t\treturn a.items[i].DeviceName < a.items[j].DeviceName\n\tcase orderNameReverse:\n\t\treturn a.items[j].DeviceName < a.items[i].DeviceName\n\tcase orderId:\n\t\treturn a.items[i].DeviceId < a.items[j].DeviceId\n\tcase orderIdReverse:\n\t\treturn a.items[i].DeviceId < a.items[j].DeviceId\n\tcase orderDateReverse:\n\t\treturn a.items[j].Created < a.items[i].Created\n\tcase orderDate:\n\t\tfallthrough\n\tdefault:\n\t\treturn a.items[i].Created < a.items[j].Created\n\t}\n\treturn false\n}\n\nfunc listDevices(c *Command, ctx *Context) error {\n\ttype deviceList struct {\n\t\tDevices []deviceData\n\t}\n\n\tcmdArgs := c.Data.(*listData)\n\tpid := cmdArgs.projectId\n\n\t_, err := ctx.Client.\n\t\tGet(c.ApiPath).\n\t\tParamUint64(\"project_id\", pid).\n\t\tExpect(200).\n\t\tProjectToken(ctx.Profile, pid).\n\t\tResponseBody(new(deviceList)).\n\t\tResponseBodyHandler(func(body interface{}) error {\n\n\t\tlist := body.(*deviceList)\n\n\t\tfmt.Printf(\"Devices in project %v\\n\", pid)\n\t\tfmt.Println(\"-----\")\n\n\t\tsorted := &deviceSort{items: list.Devices, order: cmdArgs.order}\n\t\tsort.Sort(sorted)\n\t\tfor _, device := range sorted.items {\n\n\t\t\tfmt.Printf(\"Name: %v\\n\"+\n\t\t\t\t\"Device ID: %v\\n\"+\n\t\t\t\t\"Type: %v\\n\"+\n\t\t\t\t\"Created: %v\\n\\n\",\n\t\t\t\tdevice.DeviceName,\n\t\t\t\tdevice.DeviceId,\n\t\t\t\tdevice.DeviceType,\n\t\t\t\tdevice.Created)\n\t\t}\n\n\t\treturn nil\n\t}).Execute()\n\n\treturn err\n}\n\nfunc newDeleteDeviceCmd(ctx *Context) *Command {\n\tdata := new(deviceId)\n\n\tcmd := &Command{\n\t\tName: \"delete\",\n\t\tApiPath: \"\/v1\/devices\",\n\t\tUsage: \"delete device\",\n\t\tData: data,\n\t\tAction: deleteDevice,\n\t}\n\tflags := cmd.NewFlagSet(\"iobeam device delete\")\n\tflags.StringVar(&data.id, \"id\", \"\", \"The ID of the device to delete (REQUIRED)\")\n\tflags.Uint64Var(&data.projectId, \"projectId\", ctx.Profile.ActiveProject, \"The ID of the project the device belongs to (defaults to active project)\")\n\n\treturn cmd\n}\n\nfunc deleteDevice(c *Command, ctx *Context) error {\n\tdata := c.Data.(*deviceId)\n\tpath := c.ApiPath + \"\/\" + data.id\n\t_, err := ctx.Client.\n\t\tDelete(path).\n\t\tExpect(204).\n\t\tProjectToken(ctx.Profile, data.projectId).\n\t\tExecute()\n\n\tif err == nil {\n\t\tfmt.Println(\"Device successfully deleted\")\n\t}\n\n\treturn err\n}\n<|endoftext|>"} {"text":"package command\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/nomad\/api\"\n\t\"github.com\/hashicorp\/nomad\/nomad\/structs\"\n\t\"github.com\/mitchellh\/cli\"\n)\n\nconst (\n\t\/\/ dateFmt is the format we use when printing the date in\n\t\/\/ status update messages during monitoring.\n\tdateFmt = \"2006\/01\/02 15:04:05\"\n)\n\n\/\/ monitor wraps an evaluation monitor and holds metadata and\n\/\/ state information.\ntype monitor struct {\n\tui cli.Ui\n\tclient *api.Client\n\tstate *evalState\n\n\tsync.Mutex\n}\n\n\/\/ newMonitor returns a new monitor. The returned monitor will\n\/\/ write output information to the provided ui.\nfunc newMonitor(ui cli.Ui, client *api.Client) *monitor {\n\treturn &monitor{\n\t\tui: &cli.PrefixedUi{\n\t\t\tInfoPrefix: \"==> \",\n\t\t\tOutputPrefix: \" \",\n\t\t\tErrorPrefix: \"==> \",\n\t\t\tUi: ui,\n\t\t},\n\t\tclient: client,\n\t\tstate: &evalState{\n\t\t\tallocs: make(map[string]*allocState),\n\t\t},\n\t}\n}\n\n\/\/ output is used to write informational messages to the ui.\nfunc (m *monitor) output(msg string) {\n\tm.ui.Output(fmt.Sprintf(\"%s %s\", time.Now().Format(dateFmt), msg))\n}\n\n\/\/ evalState is used to store the current \"state of the world\"\n\/\/ in the context of monitoring an evaluation.\ntype evalState struct {\n\tstatus string\n\tdesc string\n\tnodeID string\n\tallocs map[string]*allocState\n\twait time.Duration\n\tindex uint64\n}\n\n\/\/ allocState is used to track the state of an allocation\ntype allocState struct {\n\tid string\n\tgroup string\n\tnode string\n\tdesired string\n\tdesiredDesc string\n\tclient string\n\tindex uint64\n}\n\n\/\/ update is used to update our monitor with new state. It can be\n\/\/ called whether the passed information is new or not, and will\n\/\/ only dump update messages when state changes.\nfunc (m *monitor) update(eval *api.Evaluation, allocs []*api.AllocationListStub) {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\texisting := m.state\n\n\t\/\/ Create the new state\n\tupdate := &evalState{\n\t\tstatus: eval.Status,\n\t\tdesc: eval.StatusDescription,\n\t\tnodeID: eval.NodeID,\n\t\tallocs: make(map[string]*allocState),\n\t\twait: eval.Wait,\n\t\tindex: eval.CreateIndex,\n\t}\n\tfor _, alloc := range allocs {\n\t\tupdate.allocs[alloc.ID] = &allocState{\n\t\t\tid: alloc.ID,\n\t\t\tgroup: alloc.TaskGroup,\n\t\t\tnode: alloc.NodeID,\n\t\t\tdesired: alloc.DesiredStatus,\n\t\t\tdesiredDesc: alloc.DesiredDescription,\n\t\t\tclient: alloc.ClientStatus,\n\t\t\tindex: alloc.CreateIndex,\n\t\t}\n\t}\n\tdefer func() { m.state = update }()\n\n\t\/\/ Check the allocations\n\tfor allocID, alloc := range update.allocs {\n\t\tif existing, ok := existing.allocs[allocID]; !ok {\n\t\t\tswitch {\n\t\t\tcase alloc.desired == structs.AllocDesiredStatusFailed:\n\t\t\t\t\/\/ New allocs with desired state failed indicate\n\t\t\t\t\/\/ scheduling failure.\n\t\t\t\tm.output(fmt.Sprintf(\"Scheduling error for group %q (%s)\",\n\t\t\t\t\talloc.group, alloc.desiredDesc))\n\n\t\t\tcase alloc.index < update.index:\n\t\t\t\t\/\/ New alloc with create index lower than the eval\n\t\t\t\t\/\/ create index indicates modification\n\t\t\t\tm.output(fmt.Sprintf(\n\t\t\t\t\t\"Allocation %q modified: node %q, group %q\",\n\t\t\t\t\talloc.id, alloc.node, alloc.group))\n\n\t\t\tcase alloc.desired == structs.AllocDesiredStatusRun:\n\t\t\t\t\/\/ New allocation with desired status running\n\t\t\t\tm.output(fmt.Sprintf(\n\t\t\t\t\t\"Allocation %q created: node %q, group %q\",\n\t\t\t\t\talloc.id, alloc.node, alloc.group))\n\t\t\t}\n\t\t} else {\n\t\t\tswitch {\n\t\t\tcase existing.client != alloc.client:\n\t\t\t\t\/\/ Allocation status has changed\n\t\t\t\tm.output(fmt.Sprintf(\n\t\t\t\t\t\"Allocation %q status changed: %q -> %q\",\n\t\t\t\t\talloc.id, existing.client, alloc.client))\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Check if the status changed\n\tif existing.status != update.status {\n\t\tm.output(fmt.Sprintf(\"Evaluation status changed: %q -> %q\",\n\t\t\texisting.status, eval.Status))\n\t}\n\n\t\/\/ Check if the wait time is different\n\tif existing.wait == 0 && update.wait != 0 {\n\t\tm.output(fmt.Sprintf(\"Waiting %s before running eval\",\n\t\t\teval.Wait))\n\t}\n\n\t\/\/ Check if the nodeID changed\n\tif existing.nodeID == \"\" && update.nodeID != \"\" {\n\t\tm.output(fmt.Sprintf(\"Evaluation was assigned node ID %q\",\n\t\t\teval.NodeID))\n\t}\n}\n\n\/\/ monitor is used to start monitoring the given evaluation ID. It\n\/\/ writes output directly to the monitor's ui, and returns the\n\/\/ exit code for the command. The return code indicates monitoring\n\/\/ success or failure ONLY. It is no indication of the outcome of\n\/\/ the evaluation, since conflating these values obscures things.\nfunc (m *monitor) monitor(evalID string) int {\n\t\/\/ Check if the eval has already completed and fast-path it.\n\teval, _, err := m.client.Evaluations().Info(evalID, nil)\n\tif err != nil {\n\t\tm.ui.Error(fmt.Sprintf(\"Error reading evaluation: %s\", err))\n\t\treturn 1\n\t}\n\tswitch eval.Status {\n\tcase structs.EvalStatusComplete, structs.EvalStatusFailed:\n\t\tm.ui.Info(fmt.Sprintf(\"Evaluation %q already finished with status %q\",\n\t\t\tevalID, eval.Status))\n\t\treturn 0\n\t}\n\n\tm.ui.Info(fmt.Sprintf(\"Monitoring evaluation %q\", evalID))\n\tfor {\n\t\t\/\/ Query the evaluation\n\t\teval, _, err := m.client.Evaluations().Info(evalID, nil)\n\t\tif err != nil {\n\t\t\tm.ui.Error(fmt.Sprintf(\"Error reading evaluation: %s\", err))\n\t\t\treturn 1\n\t\t}\n\n\t\t\/\/ Query the allocations associated with the evaluation\n\t\tallocs, _, err := m.client.Evaluations().Allocations(evalID, nil)\n\t\tif err != nil {\n\t\t\tm.ui.Error(fmt.Sprintf(\"Error reading allocations: %s\", err))\n\t\t\treturn 1\n\t\t}\n\n\t\t\/\/ Update the state\n\t\tm.update(eval, allocs)\n\n\t\tswitch eval.Status {\n\t\tcase structs.EvalStatusComplete, structs.EvalStatusFailed:\n\t\t\tm.ui.Info(fmt.Sprintf(\"Evaluation %q finished with status %q\",\n\t\t\t\teval.ID, eval.Status))\n\t\tdefault:\n\t\t\t\/\/ Wait for the next update\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Monitor the next eval, if it exists.\n\t\tif eval.NextEval != \"\" {\n\t\t\tmon := newMonitor(m.ui, m.client)\n\t\t\treturn mon.monitor(eval.NextEval)\n\t\t}\n\t\tbreak\n\t}\n\n\treturn 0\n}\ncommand\/monitor: cleanuppackage command\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/nomad\/api\"\n\t\"github.com\/hashicorp\/nomad\/nomad\/structs\"\n\t\"github.com\/mitchellh\/cli\"\n)\n\nconst (\n\t\/\/ updateWait is the amount of time to wait between status\n\t\/\/ updates. Because the monitor is poll-based, we use this\n\t\/\/ delay to avoid overwhelming the API server.\n\tupdateWait = time.Second\n)\n\n\/\/ evalState is used to store the current \"state of the world\"\n\/\/ in the context of monitoring an evaluation.\ntype evalState struct {\n\tstatus string\n\tdesc string\n\tnodeID string\n\tallocs map[string]*allocState\n\twait time.Duration\n\tindex uint64\n}\n\n\/\/ allocState is used to track the state of an allocation\ntype allocState struct {\n\tid string\n\tgroup string\n\tnode string\n\tdesired string\n\tdesiredDesc string\n\tclient string\n\tindex uint64\n}\n\n\/\/ monitor wraps an evaluation monitor and holds metadata and\n\/\/ state information.\ntype monitor struct {\n\tui cli.Ui\n\tclient *api.Client\n\tstate *evalState\n\n\tsync.Mutex\n}\n\n\/\/ newMonitor returns a new monitor. The returned monitor will\n\/\/ write output information to the provided ui.\nfunc newMonitor(ui cli.Ui, client *api.Client) *monitor {\n\tmon := &monitor{\n\t\tui: &cli.PrefixedUi{\n\t\t\tInfoPrefix: \"==> \",\n\t\t\tOutputPrefix: \" \",\n\t\t\tErrorPrefix: \"==> \",\n\t\t\tUi: ui,\n\t\t},\n\t\tclient: client,\n\t}\n\tmon.init()\n\treturn mon\n}\n\n\/\/ init allocates substructures\nfunc (m *monitor) init() {\n\tm.state = &evalState{\n\t\tallocs: make(map[string]*allocState),\n\t}\n}\n\n\/\/ update is used to update our monitor with new state. It can be\n\/\/ called whether the passed information is new or not, and will\n\/\/ only dump update messages when state changes.\nfunc (m *monitor) update(eval *api.Evaluation, allocs []*api.AllocationListStub) {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\texisting := m.state\n\n\t\/\/ Create the new state\n\tupdate := &evalState{\n\t\tstatus: eval.Status,\n\t\tdesc: eval.StatusDescription,\n\t\tnodeID: eval.NodeID,\n\t\tallocs: make(map[string]*allocState),\n\t\twait: eval.Wait,\n\t\tindex: eval.CreateIndex,\n\t}\n\tfor _, alloc := range allocs {\n\t\tupdate.allocs[alloc.ID] = &allocState{\n\t\t\tid: alloc.ID,\n\t\t\tgroup: alloc.TaskGroup,\n\t\t\tnode: alloc.NodeID,\n\t\t\tdesired: alloc.DesiredStatus,\n\t\t\tdesiredDesc: alloc.DesiredDescription,\n\t\t\tclient: alloc.ClientStatus,\n\t\t\tindex: alloc.CreateIndex,\n\t\t}\n\t}\n\tdefer func() { m.state = update }()\n\n\t\/\/ Check the allocations\n\tfor allocID, alloc := range update.allocs {\n\t\tif existing, ok := existing.allocs[allocID]; !ok {\n\t\t\tswitch {\n\t\t\tcase alloc.desired == structs.AllocDesiredStatusFailed:\n\t\t\t\t\/\/ New allocs with desired state failed indicate\n\t\t\t\t\/\/ scheduling failure.\n\t\t\t\tm.ui.Output(fmt.Sprintf(\"Scheduling error for group %q (%s)\",\n\t\t\t\t\talloc.group, alloc.desiredDesc))\n\n\t\t\tcase alloc.index < update.index:\n\t\t\t\t\/\/ New alloc with create index lower than the eval\n\t\t\t\t\/\/ create index indicates modification\n\t\t\t\tm.ui.Output(fmt.Sprintf(\n\t\t\t\t\t\"Allocation %q modified: node %q, group %q\",\n\t\t\t\t\talloc.id, alloc.node, alloc.group))\n\n\t\t\tcase alloc.desired == structs.AllocDesiredStatusRun:\n\t\t\t\t\/\/ New allocation with desired status running\n\t\t\t\tm.ui.Output(fmt.Sprintf(\n\t\t\t\t\t\"Allocation %q created: node %q, group %q\",\n\t\t\t\t\talloc.id, alloc.node, alloc.group))\n\t\t\t}\n\t\t} else {\n\t\t\tswitch {\n\t\t\tcase existing.client != alloc.client:\n\t\t\t\t\/\/ Allocation status has changed\n\t\t\t\tm.ui.Output(fmt.Sprintf(\n\t\t\t\t\t\"Allocation %q status changed: %q -> %q\",\n\t\t\t\t\talloc.id, existing.client, alloc.client))\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Check if the status changed\n\tif existing.status != update.status {\n\t\tm.ui.Output(fmt.Sprintf(\"Evaluation status changed: %q -> %q\",\n\t\t\texisting.status, eval.Status))\n\t}\n\n\t\/\/ Check if the wait time is different\n\tif existing.wait == 0 && update.wait != 0 {\n\t\tm.ui.Output(fmt.Sprintf(\"Waiting %s before running eval\",\n\t\t\teval.Wait))\n\t}\n\n\t\/\/ Check if the nodeID changed\n\tif existing.nodeID == \"\" && update.nodeID != \"\" {\n\t\tm.ui.Output(fmt.Sprintf(\"Evaluation was assigned node ID %q\",\n\t\t\teval.NodeID))\n\t}\n}\n\n\/\/ monitor is used to start monitoring the given evaluation ID. It\n\/\/ writes output directly to the monitor's ui, and returns the\n\/\/ exit code for the command. The return code indicates monitoring\n\/\/ success or failure ONLY. It is no indication of the outcome of\n\/\/ the evaluation, since conflating these values obscures things.\nfunc (m *monitor) monitor(evalID string) int {\n\t\/\/ Check if the eval has already completed and fast-path it.\n\teval, _, err := m.client.Evaluations().Info(evalID, nil)\n\tif err != nil {\n\t\tm.ui.Error(fmt.Sprintf(\"Error reading evaluation: %s\", err))\n\t\treturn 1\n\t}\n\tswitch eval.Status {\n\tcase structs.EvalStatusComplete, structs.EvalStatusFailed:\n\t\tm.ui.Info(fmt.Sprintf(\"Evaluation %q finished with status %q\",\n\t\t\tevalID, eval.Status))\n\t\treturn 0\n\t}\n\n\tm.ui.Info(fmt.Sprintf(\"Monitoring evaluation %q\", evalID))\n\tfor {\n\t\t\/\/ Query the evaluation\n\t\teval, _, err := m.client.Evaluations().Info(evalID, nil)\n\t\tif err != nil {\n\t\t\tm.ui.Error(fmt.Sprintf(\"Error reading evaluation: %s\", err))\n\t\t\treturn 1\n\t\t}\n\n\t\t\/\/ Query the allocations associated with the evaluation\n\t\tallocs, _, err := m.client.Evaluations().Allocations(evalID, nil)\n\t\tif err != nil {\n\t\t\tm.ui.Error(fmt.Sprintf(\"Error reading allocations: %s\", err))\n\t\t\treturn 1\n\t\t}\n\n\t\t\/\/ Update the state\n\t\tm.update(eval, allocs)\n\n\t\tswitch eval.Status {\n\t\tcase structs.EvalStatusComplete, structs.EvalStatusFailed:\n\t\t\tm.ui.Info(fmt.Sprintf(\"Evaluation %q finished with status %q\",\n\t\t\t\teval.ID, eval.Status))\n\t\tdefault:\n\t\t\t\/\/ Wait for the next update\n\t\t\ttime.Sleep(updateWait)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Monitor the next eval, if it exists.\n\t\tif eval.NextEval != \"\" {\n\t\t\tm.init()\n\t\t\treturn m.monitor(eval.NextEval)\n\t\t}\n\t\tbreak\n\t}\n\n\treturn 0\n}\n<|endoftext|>"} {"text":"package commands\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/bndw\/pick\/utils\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/pflag\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(&cobra.Command{\n\t\tUse: \"export\",\n\t\tShort: \"Export decrypted credentials in JSON format\",\n\t\tLong: \"The export command is used to export decrypted credentials in JSON format.\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\trunCommand(Export, cmd, args)\n\t\t},\n\t})\n\n}\n\nfunc Export(args []string, flags *pflag.FlagSet) error {\n\tsafe, err := newSafeLoader().Load()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taccounts := safe.List()\n\tif len(accounts) < 1 {\n\t\treturn errors.New(\"No accounts to export\")\n\t}\n\n\tutils.PrettyPrint(accounts)\n\treturn nil\n}\nAdd confirmation prompt before exportingpackage commands\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/bndw\/pick\/utils\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/pflag\"\n)\n\nfunc init() {\n\trootCmd.AddCommand(&cobra.Command{\n\t\tUse: \"export\",\n\t\tShort: \"Export decrypted credentials in JSON format\",\n\t\tLong: \"The export command is used to export decrypted credentials in JSON format.\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\trunCommand(Export, cmd, args)\n\t\t},\n\t})\n\n}\n\nfunc Export(args []string, flags *pflag.FlagSet) error {\n\tif !utils.Confirm(\"Do you really want to dump your whole pick safe?\", false) {\n\t\treturn errors.New(\"Aborted as requested\")\n\t}\n\n\tsafe, err := newSafeLoader().Load()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taccounts := safe.List()\n\tif len(accounts) < 1 {\n\t\treturn errors.New(\"No accounts to export\")\n\t}\n\n\tutils.PrettyPrint(accounts)\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright (c) 2016 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage common\n\nimport (\n\t\"regexp\"\n\t\"sync\"\n\n\t\"github.com\/uber\/cherami-thrift\/.generated\/go\/metadata\"\n)\n\ntype (\n\t\/\/ UUIDResolver maps UUIDs to IP addrs and vice-versa\n\tUUIDResolver interface {\n\t\t\/\/ Lookup returns the host addr corresponding to the uuid\n\t\tLookup(uuid string) (string, error)\n\t\t\/\/ Reverse lookup returns the uuid corresponding to the host addr\n\t\tReverseLookup(addr string) (string, error)\n\t\t\/\/ Clears the in-memory cache\n\t\tClearCache()\n\t}\n\n\t\/\/ resolverImpl is an implementation of UUIDResolver that uses\n\t\/\/ cassandra as the underlying mapping store.\n\tresolverImpl struct {\n\t\trwLock sync.RWMutex\n\t\tcache map[string]string\n\t\tmClient metadata.TChanMetadataService\n\t}\n)\n\n\/\/ NewUUIDResolver returns an instance of UUIDResolver\n\/\/ that can be used to resovle host uuids to ip:port addresses\n\/\/ and vice-versa. The returned resolver uses Cassandra as the backend\n\/\/ store for persisting the mapping. The resolver also\n\/\/ maintains an in-memory cache for fast-lookups. Thread safe.\nfunc NewUUIDResolver(mClient metadata.TChanMetadataService) UUIDResolver {\n\tinstance := &resolverImpl{\n\t\tmClient: mClient,\n\t\tcache: make(map[string]string),\n\t}\n\treturn instance\n}\n\n\/\/ Resolve resolves the given uuid to a hostid\n\/\/ On success, returns the host:port for the uuid\n\/\/ On failure, error is returned\nfunc (r *resolverImpl) Lookup(uuid string) (string, error) {\n\n\tif addr, ok := r.cacheGet(uuid); ok {\n\t\treturn addr, nil\n\t}\n\n\taddr, err := r.mClient.UUIDToHostAddr(nil, uuid)\n\tif err == nil && len(addr) > 0 {\n\t\tr.cachePut(uuid, addr)\n\t\tr.cachePut(addr, uuid)\n\t\treturn addr, nil\n\t}\n\n\treturn \"\", err\n}\n\n\/\/ Resolve resolves the given addr to a uuid\n\/\/ On success, returns the uuid for the addr\n\/\/ On failure, error is returned\nfunc (r *resolverImpl) ReverseLookup(addr string) (string, error) {\n\n\tif uuid, ok := r.cacheGet(addr); ok {\n\t\treturn uuid, nil\n\t}\n\n\tuuid, err := r.mClient.HostAddrToUUID(nil, addr)\n\tif err == nil && len(uuid) > 0 {\n\t\tr.cachePut(uuid, addr)\n\t\tr.cachePut(addr, uuid)\n\t\treturn uuid, nil\n\t}\n\n\treturn \"\", err\n}\n\n\/\/ Clear caches clears the in-memory resolver cache\nfunc (r *resolverImpl) ClearCache() {\n\tr.rwLock.Lock()\n\tdefer r.rwLock.Unlock()\n\tr.cache = make(map[string]string)\n}\n\nfunc (r *resolverImpl) cacheGet(key string) (string, bool) {\n\tr.rwLock.RLock()\n\tdefer r.rwLock.RUnlock()\n\tv, ok := r.cache[key]\n\treturn v, ok\n}\n\nfunc (r *resolverImpl) cachePut(key string, value string) {\n\tr.rwLock.Lock()\n\tdefer r.rwLock.Unlock()\n\tr.cache[key] = value\n}\n\n\/\/ Paths and consumer groups are of the form \"\/foo.bar\/bax\". Although we don't\n\/\/ currently support \"folders\", relative paths, or other filesystem-like\n\/\/ operations, it is best to enforce this style of naming up front in case we would\n\/\/ like to in the future. We don't allow our clients to encroach directly on the\n\/\/ root, so that destinations and consumer groups are at least grouped under a team\n\/\/ or project name. We also require these names to have one letter character at least,\n\/\/ so names like \/.\/. aren't valid\n\n\/\/ PathRegex regex for destination path\nvar PathRegex = regexp.MustCompile(`^\/[\\w.]*[a-zA-Z][\\w.]*\/[\\w.]*[a-zA-Z][\\w.]*$`)\n\n\/\/ PathDLQRegex regex for dlq destination path\nvar PathDLQRegex = regexp.MustCompile(`^\/[\\w.]*[a-zA-Z][\\w.]*\/[\\w.]*[a-zA-Z][\\w.]*.dlq$`)\n\n\/\/ PathRegexAllowUUID For special destinations (e.g. Dead letter queues) we allow a string UUID as path\nvar PathRegexAllowUUID, _ = regexp.Compile(`^(\/[\\w.]*[a-zA-Z][\\w.]*\/[\\w.]*[a-zA-Z][\\w.]*|[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12})$`)\n\n\/\/ ConsumerGroupRegex regex for consumer group path\nvar ConsumerGroupRegex = PathRegex\n\n\/\/ UUIDRegex regex for uuid\nvar UUIDRegex, _ = regexp.Compile(`^[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12}$`)\nFix path-regex to allow for number only strings (#216)\/\/ Copyright (c) 2016 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage common\n\nimport (\n\t\"regexp\"\n\t\"sync\"\n\n\t\"github.com\/uber\/cherami-thrift\/.generated\/go\/metadata\"\n)\n\ntype (\n\t\/\/ UUIDResolver maps UUIDs to IP addrs and vice-versa\n\tUUIDResolver interface {\n\t\t\/\/ Lookup returns the host addr corresponding to the uuid\n\t\tLookup(uuid string) (string, error)\n\t\t\/\/ Reverse lookup returns the uuid corresponding to the host addr\n\t\tReverseLookup(addr string) (string, error)\n\t\t\/\/ Clears the in-memory cache\n\t\tClearCache()\n\t}\n\n\t\/\/ resolverImpl is an implementation of UUIDResolver that uses\n\t\/\/ cassandra as the underlying mapping store.\n\tresolverImpl struct {\n\t\trwLock sync.RWMutex\n\t\tcache map[string]string\n\t\tmClient metadata.TChanMetadataService\n\t}\n)\n\n\/\/ NewUUIDResolver returns an instance of UUIDResolver\n\/\/ that can be used to resovle host uuids to ip:port addresses\n\/\/ and vice-versa. The returned resolver uses Cassandra as the backend\n\/\/ store for persisting the mapping. The resolver also\n\/\/ maintains an in-memory cache for fast-lookups. Thread safe.\nfunc NewUUIDResolver(mClient metadata.TChanMetadataService) UUIDResolver {\n\tinstance := &resolverImpl{\n\t\tmClient: mClient,\n\t\tcache: make(map[string]string),\n\t}\n\treturn instance\n}\n\n\/\/ Resolve resolves the given uuid to a hostid\n\/\/ On success, returns the host:port for the uuid\n\/\/ On failure, error is returned\nfunc (r *resolverImpl) Lookup(uuid string) (string, error) {\n\n\tif addr, ok := r.cacheGet(uuid); ok {\n\t\treturn addr, nil\n\t}\n\n\taddr, err := r.mClient.UUIDToHostAddr(nil, uuid)\n\tif err == nil && len(addr) > 0 {\n\t\tr.cachePut(uuid, addr)\n\t\tr.cachePut(addr, uuid)\n\t\treturn addr, nil\n\t}\n\n\treturn \"\", err\n}\n\n\/\/ Resolve resolves the given addr to a uuid\n\/\/ On success, returns the uuid for the addr\n\/\/ On failure, error is returned\nfunc (r *resolverImpl) ReverseLookup(addr string) (string, error) {\n\n\tif uuid, ok := r.cacheGet(addr); ok {\n\t\treturn uuid, nil\n\t}\n\n\tuuid, err := r.mClient.HostAddrToUUID(nil, addr)\n\tif err == nil && len(uuid) > 0 {\n\t\tr.cachePut(uuid, addr)\n\t\tr.cachePut(addr, uuid)\n\t\treturn uuid, nil\n\t}\n\n\treturn \"\", err\n}\n\n\/\/ Clear caches clears the in-memory resolver cache\nfunc (r *resolverImpl) ClearCache() {\n\tr.rwLock.Lock()\n\tdefer r.rwLock.Unlock()\n\tr.cache = make(map[string]string)\n}\n\nfunc (r *resolverImpl) cacheGet(key string) (string, bool) {\n\tr.rwLock.RLock()\n\tdefer r.rwLock.RUnlock()\n\tv, ok := r.cache[key]\n\treturn v, ok\n}\n\nfunc (r *resolverImpl) cachePut(key string, value string) {\n\tr.rwLock.Lock()\n\tdefer r.rwLock.Unlock()\n\tr.cache[key] = value\n}\n\n\/\/ Paths and consumer groups are of the form \"\/foo.bar\/bax\". Although we don't\n\/\/ currently support \"folders\", relative paths, or other filesystem-like\n\/\/ operations, it is best to enforce this style of naming up front in case we would\n\/\/ like to in the future. We don't allow our clients to encroach directly on the\n\/\/ root, so that destinations and consumer groups are at least grouped under a team\n\/\/ or project name. We also require these names to have one letter character at least,\n\/\/ so names like \/.\/. aren't valid\n\n\/\/ PathRegex regex for destination path\nvar PathRegex = regexp.MustCompile(`^\/[\\w.]*[[:alnum:]][\\w.]*\/[\\w.]*[[:alnum:]][\\w.]*$`)\n\n\/\/ PathDLQRegex regex for dlq destination path\nvar PathDLQRegex = regexp.MustCompile(`^\/[\\w.]*[[:alnum:]][\\w.]*\/[\\w.]*[[:alnum:]][\\w.]*.dlq$`)\n\n\/\/ PathRegexAllowUUID For special destinations (e.g. Dead letter queues) we allow a string UUID as path\nvar PathRegexAllowUUID, _ = regexp.Compile(`^(\/[\\w.]*[[:alnum:]][\\w.]*\/[\\w.]*[[:alnum:]][\\w.]*|[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12})$`)\n\n\/\/ ConsumerGroupRegex regex for consumer group path\nvar ConsumerGroupRegex = PathRegex\n\n\/\/ UUIDRegex regex for uuid\nvar UUIDRegex, _ = regexp.Compile(`^[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12}$`)\n<|endoftext|>"} {"text":"\/\/ +build linux\n\npackage daemon\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/containerd\/containerd\/pkg\/apparmor\"\n\tcontainertypes \"github.com\/docker\/docker\/api\/types\/container\"\n\t\"github.com\/docker\/docker\/container\"\n\t\"github.com\/docker\/docker\/daemon\/exec\"\n\tspecs \"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"gotest.tools\/v3\/assert\"\n)\n\nfunc TestExecSetPlatformOpt(t *testing.T) {\n\tif !apparmor.HostSupports() {\n\t\tt.Skip(\"requires AppArmor to be enabled\")\n\t}\n\td := &Daemon{}\n\tc := &container.Container{AppArmorProfile: \"my-custom-profile\"}\n\tec := &exec.Config{}\n\tp := &specs.Process{}\n\n\terr := d.execSetPlatformOpt(c, ec, p)\n\tassert.NilError(t, err)\n\tassert.Equal(t, \"my-custom-profile\", p.ApparmorProfile)\n}\n\n\/\/ TestExecSetPlatformOptPrivileged verifies that `docker exec --privileged`\n\/\/ does not disable AppArmor profiles. Exec currently inherits the `Privileged`\n\/\/ configuration of the container. See https:\/\/github.com\/moby\/moby\/pull\/31773#discussion_r105586900\n\/\/\n\/\/ This behavior may change in future, but test for the behavior to prevent it\n\/\/ from being changed accidentally.\nfunc TestExecSetPlatformOptPrivileged(t *testing.T) {\n\tif !apparmor.HostSupports() {\n\t\tt.Skip(\"requires AppArmor to be enabled\")\n\t}\n\td := &Daemon{}\n\tc := &container.Container{AppArmorProfile: \"my-custom-profile\"}\n\tec := &exec.Config{Privileged: true}\n\tp := &specs.Process{}\n\n\terr := d.execSetPlatformOpt(c, ec, p)\n\tassert.NilError(t, err)\n\tassert.Equal(t, \"my-custom-profile\", p.ApparmorProfile)\n\n\tc.HostConfig = &containertypes.HostConfig{Privileged: true}\n\terr = d.execSetPlatformOpt(c, ec, p)\n\tassert.NilError(t, err)\n\tassert.Equal(t, unconfinedAppArmorProfile, p.ApparmorProfile)\n}\nFix panic in TestExecSetPlatformOpt, TestExecSetPlatformOptPrivileged\/\/ +build linux\n\npackage daemon\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/containerd\/containerd\/pkg\/apparmor\"\n\tcontainertypes \"github.com\/docker\/docker\/api\/types\/container\"\n\t\"github.com\/docker\/docker\/container\"\n\t\"github.com\/docker\/docker\/daemon\/config\"\n\t\"github.com\/docker\/docker\/daemon\/exec\"\n\tspecs \"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"gotest.tools\/v3\/assert\"\n)\n\nfunc TestExecSetPlatformOptAppArmor(t *testing.T) {\n\tappArmorEnabled := apparmor.HostSupports()\n\n\ttests := []struct {\n\t\tdoc string\n\t\tprivileged bool\n\t\tappArmorProfile string\n\t\texpectedProfile string\n\t}{\n\t\t{\n\t\t\tdoc: \"default options\",\n\t\t\texpectedProfile: defaultAppArmorProfile,\n\t\t},\n\t\t{\n\t\t\tdoc: \"custom profile\",\n\t\t\tappArmorProfile: \"my-custom-profile\",\n\t\t\texpectedProfile: \"my-custom-profile\",\n\t\t},\n\t\t{\n\t\t\tdoc: \"privileged container\",\n\t\t\tprivileged: true,\n\t\t\texpectedProfile: unconfinedAppArmorProfile,\n\t\t},\n\t\t{\n\t\t\tdoc: \"privileged container, custom profile\",\n\t\t\tprivileged: true,\n\t\t\tappArmorProfile: \"my-custom-profile\",\n\t\t\texpectedProfile: \"my-custom-profile\",\n\t\t\t\/\/ FIXME: execSetPlatformOpts prefers custom profiles over \"privileged\",\n\t\t\t\/\/ which looks like a bug (--privileged on the container should\n\t\t\t\/\/ disable apparmor, seccomp, and selinux); see the code at:\n\t\t\t\/\/ https:\/\/github.com\/moby\/moby\/blob\/46cdcd206c56172b95ba5c77b827a722dab426c5\/daemon\/exec_linux.go#L32-L40\n\t\t\t\/\/ expectedProfile: unconfinedAppArmorProfile,\n\t\t},\n\t}\n\n\td := &Daemon{configStore: &config.Config{}}\n\n\t\/\/ Currently, `docker exec --privileged` inherits the Privileged configuration\n\t\/\/ of the container, and does not disable AppArmor.\n\t\/\/ See https:\/\/github.com\/moby\/moby\/pull\/31773#discussion_r105586900\n\t\/\/\n\t\/\/ This behavior may change in future, but to verify the current behavior,\n\t\/\/ we run the test both with \"exec\" and \"exec --privileged\", which should\n\t\/\/ both give the same result.\n\tfor _, execPrivileged := range []bool{false, true} {\n\t\tfor _, tc := range tests {\n\t\t\ttc := tc\n\t\t\tdoc := tc.doc\n\t\t\tif !appArmorEnabled {\n\t\t\t\t\/\/ no profile should be set if the host does not support AppArmor\n\t\t\t\tdoc += \" (apparmor disabled)\"\n\t\t\t\ttc.expectedProfile = \"\"\n\t\t\t}\n\t\t\tif execPrivileged {\n\t\t\t\tdoc += \" (exec privileged)\"\n\t\t\t}\n\t\t\tt.Run(doc, func(t *testing.T) {\n\t\t\t\tc := &container.Container{\n\t\t\t\t\tAppArmorProfile: tc.appArmorProfile,\n\t\t\t\t\tHostConfig: &containertypes.HostConfig{\n\t\t\t\t\t\tPrivileged: tc.privileged,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tec := &exec.Config{Privileged: execPrivileged}\n\t\t\t\tp := &specs.Process{}\n\n\t\t\t\terr := d.execSetPlatformOpt(c, ec, p)\n\t\t\t\tassert.NilError(t, err)\n\t\t\t\tassert.Equal(t, p.ApparmorProfile, tc.expectedProfile)\n\t\t\t})\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\texif \"github.com\/garyhouston\/exif44\"\n\tjseg \"github.com\/garyhouston\/jpegsegs\"\n\ttiff \"github.com\/garyhouston\/tiff66\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n)\n\nfunc processTIFF(infile io.Reader, outfile io.Writer) error {\n\tbuf, err := ioutil.ReadAll(infile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvalidTIFF, order, ifdPos := tiff.GetHeader(buf)\n\tif !validTIFF {\n\t\treturn errors.New(\"processTIFF: invalid TIFF header\")\n\t}\n\troot, err := tiff.GetIFDTree(buf, order, ifdPos, tiff.TIFFSpace)\n\tif err != nil {\n\t\treturn err\n\t}\n\troot.Fix()\n\tfileSize := tiff.HeaderSize + root.TreeSize()\n\tout := make([]byte, fileSize)\n\ttiff.PutHeader(out, order, tiff.HeaderSize)\n\t_, err = root.PutIFDTree(out, tiff.HeaderSize)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = outfile.Write(out)\n\treturn err\n}\n\nfunc processJPEG(infile io.Reader, outfile io.Writer) error {\n\tscanner, err := jseg.NewScanner(infile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdumper, err := jseg.NewDumper(outfile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor {\n\t\tmarker, buf, err := scanner.Scan()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif marker == jseg.SOS {\n\t\t\t\/\/ Start of scan data, no more metadata expected.\n\t\t\tif err := dumper.Dump(marker, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr := dumper.Copy(scanner)\n\t\t\treturn err\n\t\t}\n\t\tif marker == jseg.APP0+1 {\n\t\t\tisExif, next := exif.GetHeader(buf)\n\t\t\tif isExif {\n\t\t\t\ttree, err := exif.GetExifTree(buf[next:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ttree.Tree.Fix()\n\t\t\t\tapp1 := make([]byte, exif.HeaderSize+tree.TreeSize())\n\t\t\t\tnext := exif.PutHeader(app1)\n\t\t\t\t_, err = tree.Put(app1[next:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tbuf = app1\n\t\t\t}\n\n\t\t}\n\t\tif err := dumper.Dump(marker, buf); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\nconst (\n\tTIFFFile = 1\n\tJPEGFile = 2\n)\n\n\/\/ Determine if file is TIFF, JPEG or neither (error)\nfunc fileType(file io.Reader) (int, error) {\n\tbuf := make([]byte, tiff.HeaderSize)\n\tif _, err := io.ReadFull(file, buf); err != nil {\n\t\treturn 0, err\n\t}\n\tif jseg.IsJPEGHeader(buf) {\n\t\treturn JPEGFile, nil\n\t}\n\tif validTIFF, _, _ := tiff.GetHeader(buf); validTIFF {\n\t\treturn TIFFFile, nil\n\t}\n\treturn 0, errors.New(\"File doesn't have a TIFF or JPEG header\")\n}\n\n\/\/ Decode a TIFF file, or the Exif segment in a JPEG file, then re-encode\n\/\/ it and write to a new file.\nfunc main() {\n\tif len(os.Args) != 3 {\n\t\tfmt.Printf(\"Usage: %s file outfile\\n\", os.Args[0])\n\t\treturn\n\t}\n\tinfile, err := os.Open(os.Args[1])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer infile.Close()\n\tfileType, err := fileType(infile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif _, err := infile.Seek(0, 0); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\toutfile, err := os.Create(os.Args[2])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer outfile.Close()\n\tif fileType == TIFFFile {\n\t\terr = processTIFF(infile, outfile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t} else {\n\t\terr = processJPEG(infile, outfile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}\nadjust for changes to libpackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\texif \"github.com\/garyhouston\/exif44\"\n\tjseg \"github.com\/garyhouston\/jpegsegs\"\n\ttiff \"github.com\/garyhouston\/tiff66\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n)\n\nfunc processTIFF(infile io.Reader, outfile io.Writer) error {\n\tbuf, err := ioutil.ReadAll(infile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvalidTIFF, order, ifdPos := tiff.GetHeader(buf)\n\tif !validTIFF {\n\t\treturn errors.New(\"processTIFF: invalid TIFF header\")\n\t}\n\troot, err := tiff.GetIFDTree(buf, order, ifdPos, tiff.TIFFSpace)\n\tif err != nil {\n\t\treturn err\n\t}\n\troot.Fix()\n\tfileSize := tiff.HeaderSize + root.TreeSize()\n\tout := make([]byte, fileSize)\n\ttiff.PutHeader(out, order, tiff.HeaderSize)\n\t_, err = root.PutIFDTree(out, tiff.HeaderSize)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = outfile.Write(out)\n\treturn err\n}\n\nfunc processJPEG(infile io.Reader, outfile io.Writer) error {\n\tscanner, err := jseg.NewScanner(infile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdumper, err := jseg.NewDumper(outfile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor {\n\t\tmarker, buf, err := scanner.Scan()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif marker == jseg.SOS {\n\t\t\t\/\/ Start of scan data, no more metadata expected.\n\t\t\tif err := dumper.Dump(marker, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr := dumper.Copy(scanner)\n\t\t\treturn err\n\t\t}\n\t\tif marker == jseg.APP0+1 {\n\t\t\tisExif, next := exif.GetHeader(buf)\n\t\t\tif isExif {\n\t\t\t\ttree, err := exif.GetExifTree(buf[next:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ttree.TIFF.Fix()\n\t\t\t\tapp1 := make([]byte, exif.HeaderSize+tree.TreeSize())\n\t\t\t\tnext := exif.PutHeader(app1)\n\t\t\t\t_, err = tree.Put(app1[next:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tbuf = app1\n\t\t\t}\n\n\t\t}\n\t\tif err := dumper.Dump(marker, buf); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\nconst (\n\tTIFFFile = 1\n\tJPEGFile = 2\n)\n\n\/\/ Determine if file is TIFF, JPEG or neither (error)\nfunc fileType(file io.Reader) (int, error) {\n\tbuf := make([]byte, tiff.HeaderSize)\n\tif _, err := io.ReadFull(file, buf); err != nil {\n\t\treturn 0, err\n\t}\n\tif jseg.IsJPEGHeader(buf) {\n\t\treturn JPEGFile, nil\n\t}\n\tif validTIFF, _, _ := tiff.GetHeader(buf); validTIFF {\n\t\treturn TIFFFile, nil\n\t}\n\treturn 0, errors.New(\"File doesn't have a TIFF or JPEG header\")\n}\n\n\/\/ Decode a TIFF file, or the Exif segment in a JPEG file, then re-encode\n\/\/ it and write to a new file.\nfunc main() {\n\tif len(os.Args) != 3 {\n\t\tfmt.Printf(\"Usage: %s file outfile\\n\", os.Args[0])\n\t\treturn\n\t}\n\tinfile, err := os.Open(os.Args[1])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer infile.Close()\n\tfileType, err := fileType(infile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif _, err := infile.Seek(0, 0); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\toutfile, err := os.Create(os.Args[2])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer outfile.Close()\n\tif fileType == TIFFFile {\n\t\terr = processTIFF(infile, outfile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t} else {\n\t\terr = processJPEG(infile, outfile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package service\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/slok\/ragnarok\/master\/model\"\n)\n\n\/\/ FailureRepository is the way the master keeps track of the failures.\ntype FailureRepository interface {\n\t\/\/ Store adds a failure to the registry.\n\tStore(failure *model.Failure) error\n\n\t\/\/ Delete deletes a failure from the registry.\n\tDelete(id string)\n\n\t\/\/ Get gets a failure from the registry.\n\tGet(id string) (*model.Failure, bool)\n\n\t\/\/ GetAll gets all the failures from the registry.\n\tGetAll() map[string]*model.Failure\n\n\t\/\/ GetAllByNode gets all the failures of a node from the registry.\n\tGetAllByNode(nodeID string) map[string]*model.Failure\n}\n\n\/\/ MemFailureRepository is a represententation of the failure regsitry using a memory map.\ntype MemFailureRepository struct {\n\treg map[string]*model.Failure\n\tregByNode map[string]map[string]*model.Failure\n\tsync.Mutex\n}\n\n\/\/ NewMemFailureRepository returns a new MemFailureRepository\nfunc NewMemFailureRepository() *MemFailureRepository {\n\treturn &MemFailureRepository{\n\t\treg: map[string]*model.Failure{},\n\t\tregByNode: map[string]map[string]*model.Failure{},\n\t}\n}\n\n\/\/ Store satisfies FailureRepository interface.\nfunc (m *MemFailureRepository) Store(failure *model.Failure) error {\n\tm.Lock()\n\tdefer m.Unlock()\n\tm.reg[failure.ID] = failure\n\tif _, ok := m.regByNode[failure.NodeID]; !ok {\n\t\tm.regByNode[failure.NodeID] = map[string]*model.Failure{}\n\t}\n\tm.regByNode[failure.NodeID][failure.ID] = failure\n\n\treturn nil\n}\n\n\/\/ Delete satisfies FailureRepository interface.\nfunc (m *MemFailureRepository) Delete(id string) {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tf, ok := m.reg[id]\n\tif !ok {\n\t\treturn\n\t}\n\n\tdelete(m.reg, id)\n\tdelete(m.regByNode[f.NodeID], id)\n}\n\n\/\/ Get satisfies FailureRepository interface.\nfunc (m *MemFailureRepository) Get(id string) (*model.Failure, bool) {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tf, ok := m.reg[id]\n\n\treturn f, ok\n}\n\n\/\/ GetAll satisfies FailureRepository interface.\nfunc (m *MemFailureRepository) GetAll() map[string]*model.Failure {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\treturn m.reg\n}\n\n\/\/ GetAllByNode satisfies FailureRepository interface.\nfunc (m *MemFailureRepository) GetAllByNode(nodeID string) map[string]*model.Failure {\n\tm.Lock()\n\tdefer m.Unlock()\n\treg, ok := m.regByNode[nodeID]\n\tif !ok {\n\t\treg = make(map[string]*model.Failure)\n\t}\n\treturn reg\n}\nReturn slice instead of map on failure list getterspackage service\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/slok\/ragnarok\/master\/model\"\n)\n\n\/\/ FailureRepository is the way the master keeps track of the failures.\ntype FailureRepository interface {\n\t\/\/ Store adds a failure to the registry.\n\tStore(failure *model.Failure) error\n\n\t\/\/ Delete deletes a failure from the registry.\n\tDelete(id string)\n\n\t\/\/ Get gets a failure from the registry.\n\tGet(id string) (*model.Failure, bool)\n\n\t\/\/ GetAll gets all the failures from the registry.\n\tGetAll() []*model.Failure\n\n\t\/\/ GetAllByNode gets all the failures of a node from the registry.\n\tGetAllByNode(nodeID string) []*model.Failure\n}\n\n\/\/ MemFailureRepository is a represententation of the failure regsitry using a memory map.\ntype MemFailureRepository struct {\n\treg map[string]*model.Failure\n\tregByNode map[string]map[string]*model.Failure\n\tsync.Mutex\n}\n\n\/\/ NewMemFailureRepository returns a new MemFailureRepository\nfunc NewMemFailureRepository() *MemFailureRepository {\n\treturn &MemFailureRepository{\n\t\treg: map[string]*model.Failure{},\n\t\tregByNode: map[string]map[string]*model.Failure{},\n\t}\n}\n\n\/\/ Store satisfies FailureRepository interface.\nfunc (m *MemFailureRepository) Store(failure *model.Failure) error {\n\tm.Lock()\n\tdefer m.Unlock()\n\tm.reg[failure.ID] = failure\n\tif _, ok := m.regByNode[failure.NodeID]; !ok {\n\t\tm.regByNode[failure.NodeID] = map[string]*model.Failure{}\n\t}\n\tm.regByNode[failure.NodeID][failure.ID] = failure\n\n\treturn nil\n}\n\n\/\/ Delete satisfies FailureRepository interface.\nfunc (m *MemFailureRepository) Delete(id string) {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tf, ok := m.reg[id]\n\tif !ok {\n\t\treturn\n\t}\n\n\tdelete(m.reg, id)\n\tdelete(m.regByNode[f.NodeID], id)\n}\n\n\/\/ Get satisfies FailureRepository interface.\nfunc (m *MemFailureRepository) Get(id string) (*model.Failure, bool) {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tf, ok := m.reg[id]\n\n\treturn f, ok\n}\n\n\/\/ GetAll satisfies FailureRepository interface.\nfunc (m *MemFailureRepository) GetAll() []*model.Failure {\n\tm.Lock()\n\tdefer m.Unlock()\n\tres := []*model.Failure{}\n\tfor _, f := range m.reg {\n\t\tres = append(res, f)\n\t}\n\treturn res\n}\n\n\/\/ GetAllByNode satisfies FailureRepository interface.\nfunc (m *MemFailureRepository) GetAllByNode(nodeID string) []*model.Failure {\n\tm.Lock()\n\tdefer m.Unlock()\n\tres := []*model.Failure{}\n\ttmpReg, ok := m.regByNode[nodeID]\n\tif ok {\n\t\tfor _, f := range tmpReg {\n\t\t\tres = append(res, f)\n\t\t}\n\t}\n\treturn res\n}\n<|endoftext|>"} {"text":"package molecule\n\nimport (\n\t\"sync\"\n\n\tcmn \"github.com\/RxnWeaver\/rxnweaver\/common\"\n)\n\n\/\/ nextMolIdHolder is a synchronised struct used to assign a\n\/\/ globally-unique ID to each molecule.\ntype nextMolIdHolder struct {\n\tmu sync.Mutex\n\tnextId uint32\n}\n\nvar nextMolId nextMolIdHolder\n\nfunc nextMoleculeId() uint32 {\n\tnextMolId.mu.Lock()\n\tdefer nextMolId.mu.Unlock()\n\n\tnextMolId.nextId++\n\treturn nextMolId.nextId\n}\n\n\/\/ Attribute represents a (key, value) pair that annotates this\n\/\/ molecule.\n\/\/\n\/\/ A given molecule can have zero or more such attributes.\ntype Attribute struct {\n\tname string\n\tvalue string\n}\n\n\/\/ Molecule represents a chemical molecule.\n\/\/\n\/\/ It holds information concerning its atom, bonds, rings, etc. Note\n\/\/ that a molecule is expected to be a single connected component.\ntype Molecule struct {\n\tid uint32 \/\/ The globally-unique ID of this molecule.\n\n\tatoms []*_Atom \/\/ List of atoms in this molecule.\n\tbonds []*_Bond \/\/ List of bonds in this molecule.\n\trings []*_Ring \/\/ List of rings in this molecule.\n\tringSystems []*_RingSystem \/\/ List of ring systems in this molecule.\n\n\tnextAtomIid uint16 \/\/ Running number for atom input IDs.\n\tnextBondId uint16 \/\/ Running number for bond IDs.\n\tnextRingId uint8 \/\/ Running number for ring IDs.\n\tnextRingSystemId uint8 \/\/ Running number for ring system IDs.\n\n\tvendor string \/\/ Optional string identifying the supplier.\n\tvendorMoleculeId string \/\/ Optional supplier-specified ID.\n\n\tattributes []Attribute \/\/ Optional list of annotations.\n\n\tdists [][]int \/\/ Matrix of pair-wise distances between atoms.\n\tpaths [][]int \/\/ Lists of pair-wise paths between atoms.\n}\n\n\/\/ New creates and initialises a molecule.\nfunc New() *Molecule {\n\tmol := new(Molecule)\n\tmol.id = nextMoleculeId()\n\n\tmol.atoms = make([]*_Atom, 0, cmn.ListSizeLarge)\n\tmol.bonds = make([]*_Bond, 0, cmn.ListSizeLarge)\n\tmol.rings = make([]*_Ring, 0, cmn.ListSizeSmall)\n\tmol.ringSystems = make([]*_RingSystem, 0, cmn.ListSizeSmall)\n\n\tmol.nextAtomIid = 1\n\tmol.nextBondId = 1\n\tmol.nextRingId = 1\n\tmol.nextRingSystemId = 1\n\n\tmol.attributes = make([]Attribute, 0, cmn.ListSizeTiny)\n\n\treturn mol\n}\n\n\/\/ NewAtomBuilder answers a new atom builder.\nfunc (m *Molecule) NewAtomBuilder() *AtomBuilder {\n\treturn &AtomBuilder{m, nil}\n}\n\n\/\/ Id answers the globally-unique ID of this molecule.\nfunc (m *Molecule) Id() uint32 {\n\treturn m.id\n}\n\n\/\/ atomWithIid answers the atom for the given input ID, if found.\n\/\/ Answers `nil` otherwise.\nfunc (m *Molecule) atomWithIid(id uint16) *_Atom {\n\tfor _, a := range m.atoms {\n\t\tif a.iId == id {\n\t\t\treturn a\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ atomWithNid answers the atom for the given normalised ID, if found.\n\/\/ Answers `nil` otherwise.\nfunc (m *Molecule) atomWithNid(id uint16) *_Atom {\n\tfor _, a := range m.atoms {\n\t\tif a.nId == id {\n\t\t\treturn a\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ bondWithId answers the bond for the given ID, if found. Answers\n\/\/ `nil` otherwise.\nfunc (m *Molecule) bondWithId(id uint16) *_Bond {\n\tfor _, b := range m.bonds {\n\t\tif b.id == id {\n\t\t\treturn b\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ ringWithId answers the ring for the given ID, if found. Answers\n\/\/ `nil` otherwise.\nfunc (m *Molecule) ringWithId(id uint8) *_Ring {\n\tfor _, r := range m.rings {\n\t\tif r.id == id {\n\t\t\treturn r\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ bondBetween answers the bond between the two given atoms, if one\n\/\/ such exists. Answers `nil` otherwise.\n\/\/\n\/\/ Note that the two given atoms are represented by their input IDs,\n\/\/ NOT normalised IDs.\nfunc (m *Molecule) bondBetween(a1id, a2id uint16) *_Bond {\n\tfor _, b := range m.bonds {\n\t\tif (b.a1 == a1id && b.a2 == a2id) || (b.a2 == a1id && b.a1 == a2id) {\n\t\t\treturn b\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ bondCount answers the total number of bonds of the given type in\n\/\/ this molecule.\nfunc (m *Molecule) bondCount(typ cmn.BondType) int {\n\tc := 0\n\tfor _, b := range m.bonds {\n\t\tif b.bType == typ {\n\t\t\tc++\n\t\t}\n\t}\n\n\treturn c\n}\n\n\/\/ singleBondCount answers the total number of single bonds in this\n\/\/ molecule.\nfunc (m *Molecule) singleBondCount() int {\n\treturn m.bondCount(cmn.BondTypeSingle)\n}\n\n\/\/ doubleBondCount answers the total number of double bonds in this\n\/\/ molecule.\nfunc (m *Molecule) doubleBondCount() int {\n\treturn m.bondCount(cmn.BondTypeDouble)\n}\n\n\/\/ tripleBondCount answers the total number of triple bonds in this\n\/\/ molecule.\nfunc (m *Molecule) tripleBondCount() int {\n\treturn m.bondCount(cmn.BondTypeTriple)\n}\n\n\/\/ aromaticRingCount answers the number of aromatic rings in this\n\/\/ molecule.\nfunc (m *Molecule) aromaticRingCount() int {\n\tc := 0\n\tfor _, r := range m.rings {\n\t\tif r.isAro {\n\t\t\tc++\n\t\t}\n\t}\n\n\treturn c\n}\n\n\/\/ aromaticRingSystemCount answers the number of aromatic ring systems\n\/\/ in this molecule.\nfunc (m *Molecule) aromaticRingSystemCount() int {\n\tc := 0\n\tfor _, rs := range m.ringSystems {\n\t\tif rs.isAro {\n\t\t\tc++\n\t\t}\n\t}\n\n\treturn c\n}\nAdd a method to `Molecule` to create a new bond builderpackage molecule\n\nimport (\n\t\"sync\"\n\n\tcmn \"github.com\/RxnWeaver\/rxnweaver\/common\"\n)\n\n\/\/ nextMolIdHolder is a synchronised struct used to assign a\n\/\/ globally-unique ID to each molecule.\ntype nextMolIdHolder struct {\n\tmu sync.Mutex\n\tnextId uint32\n}\n\nvar nextMolId nextMolIdHolder\n\nfunc nextMoleculeId() uint32 {\n\tnextMolId.mu.Lock()\n\tdefer nextMolId.mu.Unlock()\n\n\tnextMolId.nextId++\n\treturn nextMolId.nextId\n}\n\n\/\/ Attribute represents a (key, value) pair that annotates this\n\/\/ molecule.\n\/\/\n\/\/ A given molecule can have zero or more such attributes.\ntype Attribute struct {\n\tname string\n\tvalue string\n}\n\n\/\/ Molecule represents a chemical molecule.\n\/\/\n\/\/ It holds information concerning its atom, bonds, rings, etc. Note\n\/\/ that a molecule is expected to be a single connected component.\ntype Molecule struct {\n\tid uint32 \/\/ The globally-unique ID of this molecule.\n\n\tatoms []*_Atom \/\/ List of atoms in this molecule.\n\tbonds []*_Bond \/\/ List of bonds in this molecule.\n\trings []*_Ring \/\/ List of rings in this molecule.\n\tringSystems []*_RingSystem \/\/ List of ring systems in this molecule.\n\n\tnextAtomIid uint16 \/\/ Running number for atom input IDs.\n\tnextBondId uint16 \/\/ Running number for bond IDs.\n\tnextRingId uint8 \/\/ Running number for ring IDs.\n\tnextRingSystemId uint8 \/\/ Running number for ring system IDs.\n\n\tvendor string \/\/ Optional string identifying the supplier.\n\tvendorMoleculeId string \/\/ Optional supplier-specified ID.\n\n\tattributes []Attribute \/\/ Optional list of annotations.\n\n\tdists [][]int \/\/ Matrix of pair-wise distances between atoms.\n\tpaths [][]int \/\/ Lists of pair-wise paths between atoms.\n}\n\n\/\/ New creates and initialises a molecule.\nfunc New() *Molecule {\n\tmol := new(Molecule)\n\tmol.id = nextMoleculeId()\n\n\tmol.atoms = make([]*_Atom, 0, cmn.ListSizeLarge)\n\tmol.bonds = make([]*_Bond, 0, cmn.ListSizeLarge)\n\tmol.rings = make([]*_Ring, 0, cmn.ListSizeSmall)\n\tmol.ringSystems = make([]*_RingSystem, 0, cmn.ListSizeSmall)\n\n\tmol.nextAtomIid = 1\n\tmol.nextBondId = 1\n\tmol.nextRingId = 1\n\tmol.nextRingSystemId = 1\n\n\tmol.attributes = make([]Attribute, 0, cmn.ListSizeTiny)\n\n\treturn mol\n}\n\n\/\/ NewAtomBuilder answers a new atom builder.\nfunc (m *Molecule) NewAtomBuilder() *AtomBuilder {\n\treturn &AtomBuilder{m, nil}\n}\n\n\/\/ NewBondBuilder answers a new bond builder.\nfunc (m *Molecule) NewBondBuilder() *BondBuilder {\n\treturn &BondBuilder{m, nil}\n}\n\n\/\/ Id answers the globally-unique ID of this molecule.\nfunc (m *Molecule) Id() uint32 {\n\treturn m.id\n}\n\n\/\/ atomWithIid answers the atom for the given input ID, if found.\n\/\/ Answers `nil` otherwise.\nfunc (m *Molecule) atomWithIid(id uint16) *_Atom {\n\tfor _, a := range m.atoms {\n\t\tif a.iId == id {\n\t\t\treturn a\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ atomWithNid answers the atom for the given normalised ID, if found.\n\/\/ Answers `nil` otherwise.\nfunc (m *Molecule) atomWithNid(id uint16) *_Atom {\n\tfor _, a := range m.atoms {\n\t\tif a.nId == id {\n\t\t\treturn a\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ bondWithId answers the bond for the given ID, if found. Answers\n\/\/ `nil` otherwise.\nfunc (m *Molecule) bondWithId(id uint16) *_Bond {\n\tfor _, b := range m.bonds {\n\t\tif b.id == id {\n\t\t\treturn b\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ ringWithId answers the ring for the given ID, if found. Answers\n\/\/ `nil` otherwise.\nfunc (m *Molecule) ringWithId(id uint8) *_Ring {\n\tfor _, r := range m.rings {\n\t\tif r.id == id {\n\t\t\treturn r\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ bondBetween answers the bond between the two given atoms, if one\n\/\/ such exists. Answers `nil` otherwise.\n\/\/\n\/\/ Note that the two given atoms are represented by their input IDs,\n\/\/ NOT normalised IDs.\nfunc (m *Molecule) bondBetween(a1id, a2id uint16) *_Bond {\n\tfor _, b := range m.bonds {\n\t\tif (b.a1 == a1id && b.a2 == a2id) || (b.a2 == a1id && b.a1 == a2id) {\n\t\t\treturn b\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ bondCount answers the total number of bonds of the given type in\n\/\/ this molecule.\nfunc (m *Molecule) bondCount(typ cmn.BondType) int {\n\tc := 0\n\tfor _, b := range m.bonds {\n\t\tif b.bType == typ {\n\t\t\tc++\n\t\t}\n\t}\n\n\treturn c\n}\n\n\/\/ singleBondCount answers the total number of single bonds in this\n\/\/ molecule.\nfunc (m *Molecule) singleBondCount() int {\n\treturn m.bondCount(cmn.BondTypeSingle)\n}\n\n\/\/ doubleBondCount answers the total number of double bonds in this\n\/\/ molecule.\nfunc (m *Molecule) doubleBondCount() int {\n\treturn m.bondCount(cmn.BondTypeDouble)\n}\n\n\/\/ tripleBondCount answers the total number of triple bonds in this\n\/\/ molecule.\nfunc (m *Molecule) tripleBondCount() int {\n\treturn m.bondCount(cmn.BondTypeTriple)\n}\n\n\/\/ aromaticRingCount answers the number of aromatic rings in this\n\/\/ molecule.\nfunc (m *Molecule) aromaticRingCount() int {\n\tc := 0\n\tfor _, r := range m.rings {\n\t\tif r.isAro {\n\t\t\tc++\n\t\t}\n\t}\n\n\treturn c\n}\n\n\/\/ aromaticRingSystemCount answers the number of aromatic ring systems\n\/\/ in this molecule.\nfunc (m *Molecule) aromaticRingSystemCount() int {\n\tc := 0\n\tfor _, rs := range m.ringSystems {\n\t\tif rs.isAro {\n\t\t\tc++\n\t\t}\n\t}\n\n\treturn c\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage featuretests\n\nimport (\n\t\"flag\"\n\t\"testing\"\n\n\tgc \"gopkg.in\/check.v1\"\n\n\tcoretesting \"github.com\/juju\/juju\/testing\"\n)\n\nvar runFeatureTests = flag.Bool(\"featuretests\", true, \"Run long-running feature tests.\")\n\nfunc init() {\n\n\tflag.Parse()\n\n\tif *runFeatureTests == false {\n\t\treturn\n\t}\n\t\/\/ Initialize all suites here.\n\tgc.Suite(&cmdJujuSuite{})\n\tgc.Suite(&annotationsSuite{})\n\tgc.Suite(&apiEnvironmentSuite{})\n\tgc.Suite(&blockSuite{})\n\tgc.Suite(&apiCharmsSuite{})\n\tgc.Suite(&cmdEnvironmentSuite{})\n\tgc.Suite(&cmdStorageSuite{})\n\tgc.Suite(&cmdSystemSuite{})\n\tgc.Suite(&dblogSuite{})\n\tgc.Suite(&cloudImageMetadataSuite{})\n\tgc.Suite(&cmdSpaceSuite{})\n\tgc.Suite(&cmdSubnetSuite{})\n\tgc.Suite(&dumpLogsCommandSuite{})\n}\n\nfunc Test(t *testing.T) {\n\tcoretesting.MgoTestPackage(t)\n}\nfeaturetests: disable test under -race\/\/ Copyright 2015 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage featuretests\n\nimport (\n\t\"flag\"\n\tstdtesting \"testing\"\n\n\t\"github.com\/juju\/testing\"\n\tgc \"gopkg.in\/check.v1\"\n\n\tcoretesting \"github.com\/juju\/juju\/testing\"\n)\n\nvar runFeatureTests = flag.Bool(\"featuretests\", true, \"Run long-running feature tests.\")\n\nfunc init() {\n\n\tflag.Parse()\n\n\tif *runFeatureTests == false {\n\t\treturn\n\t}\n\t\/\/ Initialize all suites here.\n\tgc.Suite(&cmdJujuSuite{})\n\tgc.Suite(&annotationsSuite{})\n\tgc.Suite(&apiEnvironmentSuite{})\n\tgc.Suite(&blockSuite{})\n\tgc.Suite(&apiCharmsSuite{})\n\tgc.Suite(&cmdEnvironmentSuite{})\n\tgc.Suite(&cmdStorageSuite{})\n\tgc.Suite(&cmdSystemSuite{})\n\tgc.Suite(&dblogSuite{})\n\tgc.Suite(&cloudImageMetadataSuite{})\n\tgc.Suite(&cmdSpaceSuite{})\n\tgc.Suite(&cmdSubnetSuite{})\n\tgc.Suite(&dumpLogsCommandSuite{})\n}\n\nfunc TestPackage(t *stdtesting.T) {\n\tif testing.RaceEnabled {\n\t\tt.Skip(\"skipping package under -race, see LP 1519183\")\n\t}\n\tcoretesting.MgoTestPackage(t)\n}\n<|endoftext|>"} {"text":"\/*\n * Copyright (c) 2016, 1&1 Internet SE\n * Written by Jörg Pernfuß \n * All rights reserved.\n *\/\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\n\t\"gopkg.in\/resty.v0\"\n\n\t\"github.com\/asaskevich\/govalidator\"\n\t\"github.com\/julienschmidt\/httprouter\"\n)\n\ntype NotifyMessage struct {\n\tUuid string `json:\"uuid\" valid:\"uuidv4\"`\n\tPath string `json:\"path\" valid:\"abspath\"`\n}\n\nfunc FetchConfigurationItems(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tvar (\n\t\tdec *json.Decoder\n\t\tmsg NotifyMessage\n\t\terr error\n\t\tsoma *url.URL\n\t\tclient *resty.Client\n\t\tresp *resty.Response\n\t\tres proto.Result\n\t)\n\tdec = json.NewDecoder(r.Body)\n\tif err = dec.Decode(msg); err != nil {\n\t\tdispatchBadRequest(&w, err.Error())\n\t\treturn\n\t}\n\tgovalidator.SetFieldsRequiredByDefault(true)\n\tgovalidator.TagMap[\"abspath\"] = govalidator.Validator(func(str string) bool {\n\t\treturn filepath.IsAbs(str)\n\t})\n\tif ok, err := govalidator.ValidateStruct(msg); !ok {\n\t\tdispatchBadRequest(&w, err.Error())\n\t\treturn\n\t}\n\n\tsoma, _ = url.Parse(Eye.Soma.url.String())\n\tsoma.Path = fmt.Sprintf(\"%s\/%s\", msg.Path, msg.Uuid)\n\tclient = resty.New().SetTimeout(500 * time.Millisecond)\n\tif resp, err = client.R().Get(soma.String()); err != nil || resp.StatusCode() > 299 {\n\t\tif err == nil {\n\t\t\terr = fmt.Errorf(resp.Status())\n\t\t}\n\t\tdispatchPrecondition(&w, err.Error())\n\t\treturn\n\t}\n\tif err = json.Unmarshal(resp.Body(), res); err != nil {\n\t\tdispatchUnprocessable(&w, err.Error())\n\t\treturn\n\t}\n\tif res.StatusCode != 200 {\n\t\tdispatchGone(&w, err.Error())\n\t\treturn\n\t}\n\tif len(*res.Deployments) != 1 {\n\t\tdispatchPrecondition(&w, err.Error())\n\t\treturn\n\t}\n\tif err = CheckUpdateOrInsertOrDelete(&(*res.Deployments)[0]); err != nil {\n\t\tdispatchInternalServerError(&w, err.Error())\n\t\treturn\n\t}\n\tdispatchNoContent(&w)\n}\n\n\/\/ vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix\nFIX: json.Decode target must be pointer value\/*\n * Copyright (c) 2016, 1&1 Internet SE\n * Written by Jörg Pernfuß \n * All rights reserved.\n *\/\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\n\t\"gopkg.in\/resty.v0\"\n\n\t\"github.com\/asaskevich\/govalidator\"\n\t\"github.com\/julienschmidt\/httprouter\"\n)\n\ntype NotifyMessage struct {\n\tUuid string `json:\"uuid\" valid:\"uuidv4\"`\n\tPath string `json:\"path\" valid:\"abspath\"`\n}\n\nfunc FetchConfigurationItems(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tvar (\n\t\tdec *json.Decoder\n\t\tmsg NotifyMessage\n\t\terr error\n\t\tsoma *url.URL\n\t\tclient *resty.Client\n\t\tresp *resty.Response\n\t\tres proto.Result\n\t)\n\tdec = json.NewDecoder(r.Body)\n\tif err = dec.Decode(&msg); err != nil {\n\t\tdispatchBadRequest(&w, err.Error())\n\t\treturn\n\t}\n\tgovalidator.SetFieldsRequiredByDefault(true)\n\tgovalidator.TagMap[\"abspath\"] = govalidator.Validator(func(str string) bool {\n\t\treturn filepath.IsAbs(str)\n\t})\n\tif ok, err := govalidator.ValidateStruct(msg); !ok {\n\t\tdispatchBadRequest(&w, err.Error())\n\t\treturn\n\t}\n\n\tsoma, _ = url.Parse(Eye.Soma.url.String())\n\tsoma.Path = fmt.Sprintf(\"%s\/%s\", msg.Path, msg.Uuid)\n\tclient = resty.New().SetTimeout(500 * time.Millisecond)\n\tif resp, err = client.R().Get(soma.String()); err != nil || resp.StatusCode() > 299 {\n\t\tif err == nil {\n\t\t\terr = fmt.Errorf(resp.Status())\n\t\t}\n\t\tdispatchPrecondition(&w, err.Error())\n\t\treturn\n\t}\n\tif err = json.Unmarshal(resp.Body(), &res); err != nil {\n\t\tdispatchUnprocessable(&w, err.Error())\n\t\treturn\n\t}\n\tif res.StatusCode != 200 {\n\t\tdispatchGone(&w, err.Error())\n\t\treturn\n\t}\n\tif len(*res.Deployments) != 1 {\n\t\tdispatchPrecondition(&w, err.Error())\n\t\treturn\n\t}\n\tif err = CheckUpdateOrInsertOrDelete(&(*res.Deployments)[0]); err != nil {\n\t\tdispatchInternalServerError(&w, err.Error())\n\t\treturn\n\t}\n\tdispatchNoContent(&w)\n}\n\n\/\/ vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix\n<|endoftext|>"} {"text":"package column\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\n\/\/ Index is used to efficiently find the value for a database column\n\/\/ in the associated field within a structure.\n\/\/ In most cases an index is a single integer, which\n\/\/ represents the index of the relevant field in the structure. In the\n\/\/ case of fields in embedded structs, a field index consists of more than\n\/\/ one integer.\ntype Index []int\n\n\/\/ NewIndex returns an index with the specified values.\nfunc NewIndex(vals ...int) Index {\n\treturn Index(vals)\n}\n\n\/\/ Append a number to an existing index to create\n\/\/ a new index. The original index ix is unchanged.\n\/\/\n\/\/ If ix is nil, then Append returns an index\n\/\/ with a single index value.\nfunc (ix Index) Append(index int) Index {\n\tclone := ix.Clone()\n\treturn append(clone, index)\n}\n\n\/\/ Clone creates a deep copy of ix.\nfunc (ix Index) Clone() Index {\n\t\/\/ Because the main purpose of cloning is to append\n\t\/\/ another index, create the cloned field index to be\n\t\/\/ the same length, but with capacity for an additional index.\n\tclone := make(Index, len(ix), len(ix)+1)\n\tcopy(clone, ix)\n\treturn clone\n}\n\n\/\/ Equal returns true if ix is equal to v.\nfunc (ix Index) Equal(v Index) bool {\n\tif len(ix) != len(v) {\n\t\treturn false\n\t}\n\tfor i := range ix {\n\t\tif ix[i] != v[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ ValueRW returns the value of the field from the structure v.\n\/\/ If any referenced field in v contains a nil pointer, then an\n\/\/ empty value is created.\nfunc (ix Index) ValueRW(v reflect.Value) reflect.Value {\n\tfor _, i := range ix {\n\t\tv = reflect.Indirect(v).Field(i)\n\t\t\/\/ Create empty value for nil pointers, maps and slices.\n\t\tif v.Kind() == reflect.Ptr && v.IsNil() {\n\t\t\ta := reflect.New(v.Type().Elem())\n\t\t\tfmt.Printf(\"new a: %v\\n\", a.Type())\n\t\t\tv.Set(a)\n\t\t} else if v.Kind() == reflect.Map && v.IsNil() {\n\t\t\tv.Set(reflect.MakeMap(v.Type()))\n\t\t} else if v.Kind() == reflect.Slice && v.IsNil() {\n\t\t\tv.Set(reflect.MakeSlice(v.Type(), 0, 0))\n\t\t}\n\t}\n\treturn v\n}\n\n\/\/ ValueRO returns a value from the structure v without\n\/\/ checking for nil pointers.\nfunc (ix Index) ValueRO(v reflect.Value) reflect.Value {\n\tfor _, i := range ix {\n\t\tv = reflect.Indirect(v).Field(i)\n\t}\n\treturn v\n}\nRemove printf used for debugging.package column\n\nimport (\n\t\"reflect\"\n)\n\n\/\/ Index is used to efficiently find the value for a database column\n\/\/ in the associated field within a structure.\n\/\/ In most cases an index is a single integer, which\n\/\/ represents the index of the relevant field in the structure. In the\n\/\/ case of fields in embedded structs, a field index consists of more than\n\/\/ one integer.\ntype Index []int\n\n\/\/ NewIndex returns an index with the specified values.\nfunc NewIndex(vals ...int) Index {\n\treturn Index(vals)\n}\n\n\/\/ Append a number to an existing index to create\n\/\/ a new index. The original index ix is unchanged.\n\/\/\n\/\/ If ix is nil, then Append returns an index\n\/\/ with a single index value.\nfunc (ix Index) Append(index int) Index {\n\tclone := ix.Clone()\n\treturn append(clone, index)\n}\n\n\/\/ Clone creates a deep copy of ix.\nfunc (ix Index) Clone() Index {\n\t\/\/ Because the main purpose of cloning is to append\n\t\/\/ another index, create the cloned field index to be\n\t\/\/ the same length, but with capacity for an additional index.\n\tclone := make(Index, len(ix), len(ix)+1)\n\tcopy(clone, ix)\n\treturn clone\n}\n\n\/\/ Equal returns true if ix is equal to v.\nfunc (ix Index) Equal(v Index) bool {\n\tif len(ix) != len(v) {\n\t\treturn false\n\t}\n\tfor i := range ix {\n\t\tif ix[i] != v[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ ValueRW returns the value of the field from the structure v.\n\/\/ If any referenced field in v contains a nil pointer, then an\n\/\/ empty value is created.\nfunc (ix Index) ValueRW(v reflect.Value) reflect.Value {\n\tfor _, i := range ix {\n\t\tv = reflect.Indirect(v).Field(i)\n\t\t\/\/ Create empty value for nil pointers, maps and slices.\n\t\tif v.Kind() == reflect.Ptr && v.IsNil() {\n\t\t\ta := reflect.New(v.Type().Elem())\n\t\t\tv.Set(a)\n\t\t} else if v.Kind() == reflect.Map && v.IsNil() {\n\t\t\tv.Set(reflect.MakeMap(v.Type()))\n\t\t} else if v.Kind() == reflect.Slice && v.IsNil() {\n\t\t\tv.Set(reflect.MakeSlice(v.Type(), 0, 0))\n\t\t}\n\t}\n\treturn v\n}\n\n\/\/ ValueRO returns a value from the structure v without\n\/\/ checking for nil pointers.\nfunc (ix Index) ValueRO(v reflect.Value) reflect.Value {\n\tfor _, i := range ix {\n\t\tv = reflect.Indirect(v).Field(i)\n\t}\n\treturn v\n}\n<|endoftext|>"} {"text":"\/\/ Copyright © 2015-2017 Hilko Bengen \n\/\/ All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by the license that can be\n\/\/ found in the LICENSE file.\n\n\/\/+build !yara3.3,!yara3.4,!yara3.5,!yara3.6\n\npackage yara\n\n\/*\n#include \n#include \n\nchar* includeCallback(char*, char*, char*, void*);\nvoid freeCallback(char*, void*);\n*\/\nimport \"C\"\nimport (\n\t\"unsafe\"\n)\n\n\/\/ CompilerIncludeFunc is the type of the function that can be\n\/\/ registered through SetIncludeCallback. It is called for every\n\/\/ include statement encountered by the compiler. The argument \"name\"\n\/\/ specifies the rule file to be included, \"filename\" specifies the\n\/\/ name of the rule file where the include statement has been\n\/\/ encountered, and \"namespace\" specifies the rule namespace. The sole\n\/\/ return value is a byte slice containing the contents of the\n\/\/ included file. A return value of nil signals an error to the YARA\n\/\/ compiler.\n\/\/\n\/\/ See yr_compiler_set_include_callback\ntype CompilerIncludeFunc func(name, filename, namespace string) []byte\n\n\/\/ DisableIncludes disables all include statements in the compiler.\n\/\/ See yr_compiler_set_include_callbacks.\nfunc (c *Compiler) DisableIncludes() {\n\tC.yr_compiler_set_include_callback(c.compiler.cptr, nil, nil, nil)\n\tkeepAlive(c)\n\treturn\n}\n\n\/\/export includeCallback\nfunc includeCallback(name, filename, namespace *C.char, user_data unsafe.Pointer) *C.char {\n\tid := *((*uintptr)(user_data))\n\tcallbackFunc := callbackData.Get(id).(CompilerIncludeFunc)\n\tif buf := callbackFunc(\n\t\tC.GoString(name), C.GoString(filename), C.GoString(namespace),\n\t); buf != nil {\n\t\tbuf = append(buf, 0)\n\t\treturn (*C.char)(C.CBytes(buf))\n\t}\n\treturn nil\n}\n\n\/\/export freeCallback\nfunc freeCallback(callback_result_ptr *C.char, user_data unsafe.Pointer) {\n\tif callback_result_ptr != nil {\n\t\tC.free(unsafe.Pointer(callback_result_ptr))\n\t}\n\treturn\n}\n\n\/\/ SetIncludeCallback sets up cb as an include callback that is called\n\/\/ (through Go glue code) by the YARA compiler for every include\n\/\/ statement.\nfunc (c *Compiler) SetIncludeCallback(cb CompilerIncludeFunc) {\n\tif cb == nil {\n\t\tc.DisableIncludes()\n\t\treturn\n\t}\n\tid := callbackData.Put(cb)\n\tC.yr_compiler_set_include_callback(\n\t\tc.compiler.cptr,\n\t\tC.YR_COMPILER_INCLUDE_CALLBACK_FUNC(C.includeCallback),\n\t\tC.YR_COMPILER_INCLUDE_FREE_FUNC(C.freeCallback),\n\t\tunsafe.Pointer(&id),\n\t)\n\tkeepAlive(c)\n\treturn\n}\nMake include callback functionality compatible with Go 1.6\/\/ Copyright © 2015-2017 Hilko Bengen \n\/\/ All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by the license that can be\n\/\/ found in the LICENSE file.\n\n\/\/+build !yara3.3,!yara3.4,!yara3.5,!yara3.6\n\npackage yara\n\n\/*\n#include \n#include \n#include \n\nchar* includeCallback(char*, char*, char*, void*);\nvoid freeCallback(char*, void*);\n*\/\nimport \"C\"\nimport (\n\t\"unsafe\"\n)\n\n\/\/ CompilerIncludeFunc is the type of the function that can be\n\/\/ registered through SetIncludeCallback. It is called for every\n\/\/ include statement encountered by the compiler. The argument \"name\"\n\/\/ specifies the rule file to be included, \"filename\" specifies the\n\/\/ name of the rule file where the include statement has been\n\/\/ encountered, and \"namespace\" specifies the rule namespace. The sole\n\/\/ return value is a byte slice containing the contents of the\n\/\/ included file. A return value of nil signals an error to the YARA\n\/\/ compiler.\n\/\/\n\/\/ See yr_compiler_set_include_callback\ntype CompilerIncludeFunc func(name, filename, namespace string) []byte\n\n\/\/ DisableIncludes disables all include statements in the compiler.\n\/\/ See yr_compiler_set_include_callbacks.\nfunc (c *Compiler) DisableIncludes() {\n\tC.yr_compiler_set_include_callback(c.compiler.cptr, nil, nil, nil)\n\tkeepAlive(c)\n\treturn\n}\n\n\/\/export includeCallback\nfunc includeCallback(name, filename, namespace *C.char, user_data unsafe.Pointer) *C.char {\n\tid := *((*uintptr)(user_data))\n\tcallbackFunc := callbackData.Get(id).(CompilerIncludeFunc)\n\tif buf := callbackFunc(\n\t\tC.GoString(name), C.GoString(filename), C.GoString(namespace),\n\t); buf != nil {\n\t\toutbuf := C.calloc(1, C.size_t(len(buf)+1))\n\t\tC.memcpy(outbuf, unsafe.Pointer(&buf[0]), C.size_t(len(buf)))\n\t\treturn (*C.char)(outbuf)\n\t}\n\treturn nil\n}\n\n\/\/export freeCallback\nfunc freeCallback(callback_result_ptr *C.char, user_data unsafe.Pointer) {\n\tif callback_result_ptr != nil {\n\t\tC.free(unsafe.Pointer(callback_result_ptr))\n\t}\n\treturn\n}\n\n\/\/ SetIncludeCallback sets up cb as an include callback that is called\n\/\/ (through Go glue code) by the YARA compiler for every include\n\/\/ statement.\nfunc (c *Compiler) SetIncludeCallback(cb CompilerIncludeFunc) {\n\tif cb == nil {\n\t\tc.DisableIncludes()\n\t\treturn\n\t}\n\tid := callbackData.Put(cb)\n\tC.yr_compiler_set_include_callback(\n\t\tc.compiler.cptr,\n\t\tC.YR_COMPILER_INCLUDE_CALLBACK_FUNC(C.includeCallback),\n\t\tC.YR_COMPILER_INCLUDE_FREE_FUNC(C.freeCallback),\n\t\tunsafe.Pointer(&id),\n\t)\n\tkeepAlive(c)\n\treturn\n}\n<|endoftext|>"} {"text":"package boardgame\n\n\/\/TODO: consider making ComponentChest be an interface again (in some cases it\n\/\/might be nice to be able to cast the Deck directly to its underlying type to\n\/\/minimize later casts)\n\n\/\/Each game has one ComponentChest, which is an immutable set of all\n\/\/components in this game, configured into 0 or more Decks. A chest has two\n\/\/phases: construction and serving. During consruction, decks may be added but\n\/\/non may be retrieved. After consruction decks may be retrieved but not\n\/\/added. This helps ensure that Decks always give a consistent view of the\n\/\/world.\ntype ComponentChest struct {\n\tinitialized bool\n\tdeckNames []string\n\tdecks map[string]*Deck\n\tenums *EnumSet\n\n\tmanager *GameManager\n}\n\nfunc NewComponentChest(enums *EnumSet) *ComponentChest {\n\tif enums == nil {\n\t\tenums = NewEnumSet()\n\t}\n\tenums.Finish()\n\treturn &ComponentChest{\n\t\tenums: enums,\n\t}\n}\n\nfunc (c *ComponentChest) Enums() *EnumSet {\n\treturn c.enums\n}\n\nfunc (c *ComponentChest) Manager() *GameManager {\n\treturn c.manager\n}\n\n\/\/DeckNames returns all of the valid deck names, if the chest has finished initalization.\nfunc (c *ComponentChest) DeckNames() []string {\n\t\/\/If it's not finished being initalized then no decks are valid.\n\tif !c.initialized {\n\t\treturn nil\n\t}\n\treturn c.deckNames\n}\n\n\/\/Deck returns the deck with a given name, if the chest has finished initalization.\nfunc (c *ComponentChest) Deck(name string) *Deck {\n\tif !c.initialized {\n\t\treturn nil\n\t}\n\treturn c.decks[name]\n}\n\n\/\/AddDeck adds a deck with a given name, but only if Freeze() has not yet been called.\nfunc (c *ComponentChest) AddDeck(name string, deck *Deck) {\n\t\/\/Only add the deck if we haven't finished initalizing\n\tif c.initialized {\n\t\treturn\n\t}\n\tif c.decks == nil {\n\t\tc.decks = make(map[string]*Deck)\n\t}\n\n\tif name == \"\" {\n\t\tname = \"NONAMEPROVIDED\"\n\t}\n\n\t\/\/Tell the deck that no more items will be added to it.\n\tdeck.finish(c, name)\n\n\tc.decks[name] = deck\n\n}\n\n\/\/Finish switches the chest from constructing to serving. Before freeze is\n\/\/called, decks may be added but not retrieved. After it is called, decks may\n\/\/be retrieved but not added. Finish() is called automatically when a Chest is\n\/\/added to a game via SetChest(), but you can call it before then if you'd\n\/\/like.\nfunc (c *ComponentChest) Finish() {\n\n\t\/\/Check if Finish() has already been called\n\tif c.initialized {\n\t\treturn\n\t}\n\n\tc.initialized = true\n\n\t\/\/Now that no more decks are coming, we can create deckNames once and be\n\t\/\/done with it.\n\tc.deckNames = make([]string, len(c.decks))\n\n\ti := 0\n\n\tfor name, _ := range c.decks {\n\t\tc.deckNames[i] = name\n\t\ti++\n\t}\n}\nUpdated doc for NewComponentChest. Part of #457.package boardgame\n\n\/\/TODO: consider making ComponentChest be an interface again (in some cases it\n\/\/might be nice to be able to cast the Deck directly to its underlying type to\n\/\/minimize later casts)\n\n\/\/Each game has one ComponentChest, which is an immutable set of all\n\/\/components in this game, configured into 0 or more Decks. A chest has two\n\/\/phases: construction and serving. During consruction, decks may be added but\n\/\/non may be retrieved. After consruction decks may be retrieved but not\n\/\/added. This helps ensure that Decks always give a consistent view of the\n\/\/world.\ntype ComponentChest struct {\n\tinitialized bool\n\tdeckNames []string\n\tdecks map[string]*Deck\n\tenums *EnumSet\n\n\tmanager *GameManager\n}\n\n\/\/NewComponentChest returns a new ComponentChest with the given enumset. If no\n\/\/enumset is provided, an empty one will be created. Calls Finish() on the\n\/\/enumset to verify that it cannot be modified.\nfunc NewComponentChest(enums *EnumSet) *ComponentChest {\n\tif enums == nil {\n\t\tenums = NewEnumSet()\n\t}\n\tenums.Finish()\n\treturn &ComponentChest{\n\t\tenums: enums,\n\t}\n}\n\nfunc (c *ComponentChest) Enums() *EnumSet {\n\treturn c.enums\n}\n\nfunc (c *ComponentChest) Manager() *GameManager {\n\treturn c.manager\n}\n\n\/\/DeckNames returns all of the valid deck names, if the chest has finished initalization.\nfunc (c *ComponentChest) DeckNames() []string {\n\t\/\/If it's not finished being initalized then no decks are valid.\n\tif !c.initialized {\n\t\treturn nil\n\t}\n\treturn c.deckNames\n}\n\n\/\/Deck returns the deck with a given name, if the chest has finished initalization.\nfunc (c *ComponentChest) Deck(name string) *Deck {\n\tif !c.initialized {\n\t\treturn nil\n\t}\n\treturn c.decks[name]\n}\n\n\/\/AddDeck adds a deck with a given name, but only if Freeze() has not yet been called.\nfunc (c *ComponentChest) AddDeck(name string, deck *Deck) {\n\t\/\/Only add the deck if we haven't finished initalizing\n\tif c.initialized {\n\t\treturn\n\t}\n\tif c.decks == nil {\n\t\tc.decks = make(map[string]*Deck)\n\t}\n\n\tif name == \"\" {\n\t\tname = \"NONAMEPROVIDED\"\n\t}\n\n\t\/\/Tell the deck that no more items will be added to it.\n\tdeck.finish(c, name)\n\n\tc.decks[name] = deck\n\n}\n\n\/\/Finish switches the chest from constructing to serving. Before freeze is\n\/\/called, decks may be added but not retrieved. After it is called, decks may\n\/\/be retrieved but not added. Finish() is called automatically when a Chest is\n\/\/added to a game via SetChest(), but you can call it before then if you'd\n\/\/like.\nfunc (c *ComponentChest) Finish() {\n\n\t\/\/Check if Finish() has already been called\n\tif c.initialized {\n\t\treturn\n\t}\n\n\tc.initialized = true\n\n\t\/\/Now that no more decks are coming, we can create deckNames once and be\n\t\/\/done with it.\n\tc.deckNames = make([]string, len(c.decks))\n\n\ti := 0\n\n\tfor name, _ := range c.decks {\n\t\tc.deckNames[i] = name\n\t\ti++\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"code.google.com\/p\/go.net\/websocket\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n)\n\nfunc startTail(file string, ch chan string) error {\n\tf, err := os.Open(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfileInfo, err := f.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfileSize := fileInfo.Size()\n\tvar bufSizeMax int64 = 1024\n\tvar bufSize int64\n\tif fileSize > bufSizeMax {\n\t\tbufSize = bufSizeMax\n\t} else {\n\t\tbufSize = fileSize\n\t}\n\tgo func() {\n\t\tfmt.Println(\"tail start\")\n\t\tch <- file\n\t\tbuf := make([]byte, bufSize)\n\t\tvar offset int64 = 0\n\t\t{\n\t\t\tn, err := f.ReadAt(buf, offset+bufSize)\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\tpanic(\"reader.ReadString(): \" + err.Error())\n\t\t\t}\n\t\t\tline := string(buf[0:n])\n\t\t\tfmt.Printf(\"read[%v:%v]\\n\", n, line)\n\t\t\tch <- line\n\t\t}\n\t\tfor {\n\t\t\tn, err := f.Read(buf)\n\t\t\tif err == io.EOF && n == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tpanic(\"reader.ReadString(): \" + err.Error())\n\t\t\t}\n\t\t\tline := string(buf[0:n])\n\t\t\tfmt.Printf(\"read[%v:%v]\\n\", n, line)\n\t\t\tch <- line\n\t\t}\n\t\tfmt.Println(\"tail end\")\n\t}()\n\treturn nil\n}\n\nfunc makeWebsocketHandlerWithChannel(ch chan string, f func(chan string, *websocket.Conn)) func(*websocket.Conn) {\n\treturn func(ws *websocket.Conn) {\n\t\tf(ch, ws)\n\t}\n}\n\ntype Data struct {\n\tKey string `json:\"key\"`\n\tValue string `json:\"value\"`\n}\n\nfunc websocketTailHandler(ch chan string, ws *websocket.Conn) {\n\tfmt.Printf(\"tailHandler %v\\n\", ws)\n\t\/\/ send first line as file name\n\tfileName := <-ch\n\tif err := websocket.JSON.Send(ws, Data{\"filename\", fileName}); err != nil {\n\t\tfmt.Println(\"ERR:websoket.Message.Send(): \" + err.Error())\n\t}\n\tfor {\n\t\tline := <-ch\n\t\tif err := websocket.JSON.Send(ws, Data{\"msg\", line}); err != nil {\n\t\t\tfmt.Println(\"ERR:websoket.Message.Send(): \" + err.Error())\n\t\t}\n\t\tfmt.Printf(\"tailHandler write[%v]\\n\", line)\n\t}\n\tfmt.Println(\"tailHandler finished\")\n}\n\n\/\/ for debug\nfunc pseudoSubscriber(ch chan string) {\n\tfor {\n\t\tline := <-ch\n\t\tfmt.Println(\"[sub]: \" + line)\n\t}\n}\n\nfunc main() {\n\tch := make(chan string)\n\thttp.Handle(\"\/tail\", websocket.Handler(makeWebsocketHandlerWithChannel(ch, websocketTailHandler)))\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(\"..\/view\")))\n\n\tif err := startTail(os.Args[1], ch); err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(\"start wstail...\")\n\terr := http.ListenAndServe(\":23456\", nil)\n\tif err != nil {\n\t\tpanic(\"ListenAndServe: \" + err.Error())\n\t}\n\t\/\/pseudoSubscriber(ch)\n}\nadd view-dir flagpackage main\n\nimport (\n\t\"code.google.com\/p\/go.net\/websocket\"\n\t\"flag\"\n\t\/\/\"fmt\"\n\t\/\/\"html\/template\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n)\n\nvar (\n\tviewDir = flag.String(\"view-dir\", \"\", \"path to view directory\")\n)\n\n\/*\nvar templates *template.Template\n\nfunc loadTemplate() error {\n\tvar err error\n\tt := template.New(\"wstail\")\n\ttemplates, err = t.ParseGlob(fmt.Sprintf(\"%s\/*.html\", *viewDir))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n*\/\nfunc startTail(file string, ch chan string) error {\n\tf, err := os.Open(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfileInfo, err := f.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfileSize := fileInfo.Size()\n\tvar bufSizeMax int64 = 1024\n\tvar bufSize int64\n\tif fileSize > bufSizeMax {\n\t\tbufSize = bufSizeMax\n\t} else {\n\t\tbufSize = fileSize\n\t}\n\tgo func() {\n\t\tlog.Println(\"tail start\")\n\t\tch <- file\n\t\tbuf := make([]byte, bufSize)\n\t\tvar offset int64 = 0\n\t\t{\n\t\t\tn, err := f.ReadAt(buf, offset+bufSize)\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\tpanic(\"reader.ReadString(): \" + err.Error())\n\t\t\t}\n\t\t\tline := string(buf[0:n])\n\t\t\tlog.Printf(\"read[%v:%v]\\n\", n, line)\n\t\t\tch <- line\n\t\t}\n\t\tfor {\n\t\t\tn, err := f.Read(buf)\n\t\t\tif err == io.EOF && n == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tpanic(\"reader.ReadString(): \" + err.Error())\n\t\t\t}\n\t\t\tline := string(buf[0:n])\n\t\t\tlog.Printf(\"read[%v:%v]\\n\", n, line)\n\t\t\tch <- line\n\t\t}\n\t\tlog.Println(\"tail end\")\n\t}()\n\treturn nil\n}\n\nfunc makeWebsocketHandlerWithChannel(ch chan string, f func(chan string, *websocket.Conn)) func(*websocket.Conn) {\n\treturn func(ws *websocket.Conn) {\n\t\tf(ch, ws)\n\t}\n}\n\ntype Data struct {\n\tKey string `json:\"key\"`\n\tValue string `json:\"value\"`\n}\n\nfunc websocketTailHandler(ch chan string, ws *websocket.Conn) {\n\tlog.Printf(\"tailHandler %v\\n\", ws)\n\t\/\/ send first line as file name\n\tfileName := <-ch\n\tif err := websocket.JSON.Send(ws, Data{\"filename\", fileName}); err != nil {\n\t\tlog.Println(\"ERR:websoket.Message.Send(): \" + err.Error())\n\t}\n\tfor {\n\t\tline := <-ch\n\t\tif err := websocket.JSON.Send(ws, Data{\"msg\", line}); err != nil {\n\t\t\tlog.Println(\"ERR:websoket.Message.Send(): \" + err.Error())\n\t\t}\n\t\tlog.Printf(\"tailHandler write[%v]\\n\", line)\n\t}\n\tlog.Println(\"tailHandler finished\")\n}\n\n\/\/ for debug\nfunc pseudoSubscriber(ch chan string) {\n\tfor {\n\t\tline := <-ch\n\t\tlog.Println(\"[sub]: \" + line)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\tif *viewDir == \"\" {\n\t\tfor _, defaultPath := range []string{\"..\/view\", \"view\", \"\/usr\/local\/share\/wstail\/view\"} {\n\t\t\tif info, err := os.Stat(defaultPath); err == nil && info.IsDir() {\n\t\t\t\t*viewDir = defaultPath\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif *viewDir == \"\" {\n\t\tlog.Fatalf(\"view dir not found\")\n\t}\n\t\/\/loadTemplate()\n\n\tch := make(chan string)\n\thttp.Handle(\"\/tail\", websocket.Handler(makeWebsocketHandlerWithChannel(ch, websocketTailHandler)))\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(*viewDir)))\n\n\tfile := flag.Args()[0]\n\tif err := startTail(file, ch); err != nil {\n\t\tpanic(err)\n\t}\n\n\tlog.Println(\"start wstail...\")\n\terr := http.ListenAndServe(\":23456\", nil)\n\tif err != nil {\n\t\tpanic(\"ListenAndServe: \" + err.Error())\n\t}\n\t\/\/pseudoSubscriber(ch)\n}\n<|endoftext|>"} {"text":"package storage\n\nimport (\n\t\"io\"\n\t\"time\"\n\n\t\"github.com\/lxc\/lxd\/lxd\/backup\"\n\t\"github.com\/lxc\/lxd\/lxd\/cluster\/request\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\"\n\t\"github.com\/lxc\/lxd\/lxd\/migration\"\n\t\"github.com\/lxc\/lxd\/lxd\/operations\"\n\t\"github.com\/lxc\/lxd\/lxd\/revert\"\n\t\"github.com\/lxc\/lxd\/lxd\/state\"\n\t\"github.com\/lxc\/lxd\/lxd\/storage\/drivers\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\t\"github.com\/lxc\/lxd\/shared\/instancewriter\"\n\t\"github.com\/lxc\/lxd\/shared\/logger\"\n)\n\ntype mockBackend struct {\n\tname string\n\tstate *state.State\n\tlogger logger.Logger\n\tdriver drivers.Driver\n}\n\nfunc (b *mockBackend) ID() int64 {\n\treturn 1 \/\/ The tests expect the storage pool ID to be 1.\n}\n\nfunc (b *mockBackend) Name() string {\n\treturn b.name\n}\n\nfunc (b *mockBackend) Description() string {\n\treturn \"\"\n}\n\nfunc (b *mockBackend) ValidateName(value string) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) Validate(config map[string]string) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) Status() string {\n\treturn api.NetworkStatusUnknown\n}\n\nfunc (b *mockBackend) LocalStatus() string {\n\treturn api.NetworkStatusUnknown\n}\n\nfunc (b *mockBackend) ToAPI() api.StoragePool {\n\treturn api.StoragePool{}\n}\n\nfunc (b *mockBackend) Driver() drivers.Driver {\n\treturn b.driver\n}\n\nfunc (b *mockBackend) MigrationTypes(contentType drivers.ContentType, refresh bool) []migration.Type {\n\treturn []migration.Type{\n\t\t{\n\t\t\tFSType: FallbackMigrationType(contentType),\n\t\t\tFeatures: []string{\"xattrs\", \"delete\", \"compress\", \"bidirectional\"},\n\t\t},\n\t}\n}\n\nfunc (b *mockBackend) GetResources() (*api.ResourcesStoragePool, error) {\n\treturn nil, nil\n}\n\nfunc (b *mockBackend) IsUsed() (bool, error) {\n\treturn false, nil\n}\n\nfunc (b *mockBackend) Delete(clientType request.ClientType, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) Update(clientType request.ClientType, newDescription string, newConfig map[string]string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) Create(clientType request.ClientType, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) Mount() (bool, error) {\n\treturn true, nil\n}\n\nfunc (b *mockBackend) Unmount() (bool, error) {\n\treturn true, nil\n}\n\nfunc (b *mockBackend) ApplyPatch(name string) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) GetVolume(volType drivers.VolumeType, contentType drivers.ContentType, volName string, volConfig map[string]string) drivers.Volume {\n\treturn drivers.Volume{}\n}\n\nfunc (b *mockBackend) CreateInstance(inst instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateInstanceFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) (func(instance.Instance) error, revert.Hook, error) {\n\treturn nil, nil, nil\n}\n\nfunc (b *mockBackend) CreateInstanceFromCopy(inst instance.Instance, src instance.Instance, snapshots bool, allowInconsistent bool, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateInstanceFromImage(inst instance.Instance, fingerprint string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateInstanceFromMigration(inst instance.Instance, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RenameInstance(inst instance.Instance, newName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) DeleteInstance(inst instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) UpdateInstance(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) GenerateInstanceBackupConfig(inst instance.Instance, snapshots bool, op *operations.Operation) (*backup.Config, error) {\n\treturn nil, nil\n}\n\nfunc (b *mockBackend) UpdateInstanceBackupFile(inst instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CheckInstanceBackupFileSnapshots(backupConf *backup.Config, projectName string, deleteMissing bool, op *operations.Operation) ([]*api.InstanceSnapshot, error) {\n\treturn nil, nil\n}\n\nfunc (b *mockBackend) ListUnknownVolumes(op *operations.Operation) (map[string][]*backup.Config, error) {\n\treturn nil, nil\n}\n\nfunc (b *mockBackend) ImportInstance(inst instance.Instance, poolVol *backup.Config, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RefreshCustomVolume(projectName string, srcProjectName string, volName string, desc string, config map[string]string, srcPoolName, srcVolName string, srcVolOnly bool, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RefreshInstance(inst instance.Instance, src instance.Instance, srcSnapshots []instance.Instance, allowInconsistent bool, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) BackupInstance(inst instance.Instance, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) GetInstanceUsage(inst instance.Instance) (int64, error) {\n\treturn 0, nil\n}\n\nfunc (b *mockBackend) SetInstanceQuota(inst instance.Instance, size string, vmStateSize string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) MountInstance(inst instance.Instance, op *operations.Operation) (*MountInfo, error) {\n\treturn &MountInfo{}, nil\n}\n\nfunc (b *mockBackend) UnmountInstance(inst instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateInstanceSnapshot(i instance.Instance, src instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RenameInstanceSnapshot(inst instance.Instance, newName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) DeleteInstanceSnapshot(inst instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RestoreInstanceSnapshot(inst instance.Instance, src instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) MountInstanceSnapshot(inst instance.Instance, op *operations.Operation) (*MountInfo, error) {\n\treturn &MountInfo{}, nil\n}\n\nfunc (b *mockBackend) UnmountInstanceSnapshot(inst instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) UpdateInstanceSnapshot(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) EnsureImage(fingerprint string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) DeleteImage(fingerprint string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) UpdateImage(fingerprint, newDesc string, newConfig map[string]string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateCustomVolume(projectName string, volName string, desc string, config map[string]string, contentType drivers.ContentType, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateCustomVolumeFromCopy(projectName string, srcProjectName string, volName string, desc string, config map[string]string, srcPoolName string, srcVolName string, srcVolOnly bool, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RenameCustomVolume(projectName string, volName string, newName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) UpdateCustomVolume(projectName string, volName string, newDesc string, newConfig map[string]string, op *operations.Operation) error {\n\treturn drivers.ErrNotImplemented\n}\n\nfunc (b *mockBackend) DeleteCustomVolume(projectName string, volName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) MigrateCustomVolume(projectName string, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateCustomVolumeFromMigration(projectName string, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) GetCustomVolumeDisk(projectName string, volName string) (string, error) {\n\treturn \"\", nil\n}\n\nfunc (b *mockBackend) GetCustomVolumeUsage(projectName string, volName string) (int64, error) {\n\treturn 0, nil\n}\n\nfunc (b *mockBackend) MountCustomVolume(projectName string, volName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) UnmountCustomVolume(projectName string, volName string, op *operations.Operation) (bool, error) {\n\treturn true, nil\n}\n\nfunc (b *mockBackend) ImportCustomVolume(projectName string, poolVol *backup.Config, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateCustomVolumeSnapshot(projectName string, volName string, newSnapshotName string, expiryDate time.Time, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RenameCustomVolumeSnapshot(projectName string, volName string, newName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) DeleteCustomVolumeSnapshot(projectName string, volName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) UpdateCustomVolumeSnapshot(projectName string, volName string, newDesc string, newConfig map[string]string, expiryDate time.Time, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RestoreCustomVolume(projectName string, volName string, snapshotName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) BackupCustomVolume(projectName string, volName string, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) error {\n\treturn nil\n}\nlxd\/storage\/backend\/mock: Adds GenerateCustomVolumeBackupConfig functionpackage storage\n\nimport (\n\t\"io\"\n\t\"time\"\n\n\t\"github.com\/lxc\/lxd\/lxd\/backup\"\n\t\"github.com\/lxc\/lxd\/lxd\/cluster\/request\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\"\n\t\"github.com\/lxc\/lxd\/lxd\/migration\"\n\t\"github.com\/lxc\/lxd\/lxd\/operations\"\n\t\"github.com\/lxc\/lxd\/lxd\/revert\"\n\t\"github.com\/lxc\/lxd\/lxd\/state\"\n\t\"github.com\/lxc\/lxd\/lxd\/storage\/drivers\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\t\"github.com\/lxc\/lxd\/shared\/instancewriter\"\n\t\"github.com\/lxc\/lxd\/shared\/logger\"\n)\n\ntype mockBackend struct {\n\tname string\n\tstate *state.State\n\tlogger logger.Logger\n\tdriver drivers.Driver\n}\n\nfunc (b *mockBackend) ID() int64 {\n\treturn 1 \/\/ The tests expect the storage pool ID to be 1.\n}\n\nfunc (b *mockBackend) Name() string {\n\treturn b.name\n}\n\nfunc (b *mockBackend) Description() string {\n\treturn \"\"\n}\n\nfunc (b *mockBackend) ValidateName(value string) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) Validate(config map[string]string) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) Status() string {\n\treturn api.NetworkStatusUnknown\n}\n\nfunc (b *mockBackend) LocalStatus() string {\n\treturn api.NetworkStatusUnknown\n}\n\nfunc (b *mockBackend) ToAPI() api.StoragePool {\n\treturn api.StoragePool{}\n}\n\nfunc (b *mockBackend) Driver() drivers.Driver {\n\treturn b.driver\n}\n\nfunc (b *mockBackend) MigrationTypes(contentType drivers.ContentType, refresh bool) []migration.Type {\n\treturn []migration.Type{\n\t\t{\n\t\t\tFSType: FallbackMigrationType(contentType),\n\t\t\tFeatures: []string{\"xattrs\", \"delete\", \"compress\", \"bidirectional\"},\n\t\t},\n\t}\n}\n\nfunc (b *mockBackend) GetResources() (*api.ResourcesStoragePool, error) {\n\treturn nil, nil\n}\n\nfunc (b *mockBackend) IsUsed() (bool, error) {\n\treturn false, nil\n}\n\nfunc (b *mockBackend) Delete(clientType request.ClientType, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) Update(clientType request.ClientType, newDescription string, newConfig map[string]string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) Create(clientType request.ClientType, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) Mount() (bool, error) {\n\treturn true, nil\n}\n\nfunc (b *mockBackend) Unmount() (bool, error) {\n\treturn true, nil\n}\n\nfunc (b *mockBackend) ApplyPatch(name string) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) GetVolume(volType drivers.VolumeType, contentType drivers.ContentType, volName string, volConfig map[string]string) drivers.Volume {\n\treturn drivers.Volume{}\n}\n\nfunc (b *mockBackend) CreateInstance(inst instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateInstanceFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) (func(instance.Instance) error, revert.Hook, error) {\n\treturn nil, nil, nil\n}\n\nfunc (b *mockBackend) CreateInstanceFromCopy(inst instance.Instance, src instance.Instance, snapshots bool, allowInconsistent bool, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateInstanceFromImage(inst instance.Instance, fingerprint string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateInstanceFromMigration(inst instance.Instance, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RenameInstance(inst instance.Instance, newName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) DeleteInstance(inst instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) UpdateInstance(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) GenerateCustomVolumeBackupConfig(projectName string, volName string, snapshots bool, op *operations.Operation) (*backup.Config, error) {\n\treturn nil, nil\n}\n\nfunc (b *mockBackend) GenerateInstanceBackupConfig(inst instance.Instance, snapshots bool, op *operations.Operation) (*backup.Config, error) {\n\treturn nil, nil\n}\n\nfunc (b *mockBackend) UpdateInstanceBackupFile(inst instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CheckInstanceBackupFileSnapshots(backupConf *backup.Config, projectName string, deleteMissing bool, op *operations.Operation) ([]*api.InstanceSnapshot, error) {\n\treturn nil, nil\n}\n\nfunc (b *mockBackend) ListUnknownVolumes(op *operations.Operation) (map[string][]*backup.Config, error) {\n\treturn nil, nil\n}\n\nfunc (b *mockBackend) ImportInstance(inst instance.Instance, poolVol *backup.Config, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RefreshCustomVolume(projectName string, srcProjectName string, volName string, desc string, config map[string]string, srcPoolName, srcVolName string, srcVolOnly bool, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RefreshInstance(inst instance.Instance, src instance.Instance, srcSnapshots []instance.Instance, allowInconsistent bool, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) BackupInstance(inst instance.Instance, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) GetInstanceUsage(inst instance.Instance) (int64, error) {\n\treturn 0, nil\n}\n\nfunc (b *mockBackend) SetInstanceQuota(inst instance.Instance, size string, vmStateSize string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) MountInstance(inst instance.Instance, op *operations.Operation) (*MountInfo, error) {\n\treturn &MountInfo{}, nil\n}\n\nfunc (b *mockBackend) UnmountInstance(inst instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateInstanceSnapshot(i instance.Instance, src instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RenameInstanceSnapshot(inst instance.Instance, newName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) DeleteInstanceSnapshot(inst instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RestoreInstanceSnapshot(inst instance.Instance, src instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) MountInstanceSnapshot(inst instance.Instance, op *operations.Operation) (*MountInfo, error) {\n\treturn &MountInfo{}, nil\n}\n\nfunc (b *mockBackend) UnmountInstanceSnapshot(inst instance.Instance, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) UpdateInstanceSnapshot(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) EnsureImage(fingerprint string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) DeleteImage(fingerprint string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) UpdateImage(fingerprint, newDesc string, newConfig map[string]string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateCustomVolume(projectName string, volName string, desc string, config map[string]string, contentType drivers.ContentType, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateCustomVolumeFromCopy(projectName string, srcProjectName string, volName string, desc string, config map[string]string, srcPoolName string, srcVolName string, srcVolOnly bool, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RenameCustomVolume(projectName string, volName string, newName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) UpdateCustomVolume(projectName string, volName string, newDesc string, newConfig map[string]string, op *operations.Operation) error {\n\treturn drivers.ErrNotImplemented\n}\n\nfunc (b *mockBackend) DeleteCustomVolume(projectName string, volName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) MigrateCustomVolume(projectName string, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateCustomVolumeFromMigration(projectName string, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) GetCustomVolumeDisk(projectName string, volName string) (string, error) {\n\treturn \"\", nil\n}\n\nfunc (b *mockBackend) GetCustomVolumeUsage(projectName string, volName string) (int64, error) {\n\treturn 0, nil\n}\n\nfunc (b *mockBackend) MountCustomVolume(projectName string, volName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) UnmountCustomVolume(projectName string, volName string, op *operations.Operation) (bool, error) {\n\treturn true, nil\n}\n\nfunc (b *mockBackend) ImportCustomVolume(projectName string, poolVol *backup.Config, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateCustomVolumeSnapshot(projectName string, volName string, newSnapshotName string, expiryDate time.Time, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RenameCustomVolumeSnapshot(projectName string, volName string, newName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) DeleteCustomVolumeSnapshot(projectName string, volName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) UpdateCustomVolumeSnapshot(projectName string, volName string, newDesc string, newConfig map[string]string, expiryDate time.Time, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) RestoreCustomVolume(projectName string, volName string, snapshotName string, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) BackupCustomVolume(projectName string, volName string, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {\n\treturn nil\n}\n\nfunc (b *mockBackend) CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) error {\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/\/ Package editor enables users to create edit views from their content\n\/\/ structs so that admins can manage content\npackage editor\n\nimport (\n\t\"bytes\"\n)\n\n\/\/ Editable ensures data is editable\ntype Editable interface {\n\tSetContentID(id int)\n\tContentID() int\n\tContentName() string\n\tSetSlug(slug string)\n\tEditor() *Editor\n\tMarshalEditor() ([]byte, error)\n}\n\n\/\/ Editor is a view containing fields to manage content\ntype Editor struct {\n\tViewBuf *bytes.Buffer\n}\n\n\/\/ Field is used to create the editable view for a field\n\/\/ within a particular content struct\ntype Field struct {\n\tView []byte\n}\n\n\/\/ Form takes editable content and any number of Field funcs to describe the edit\n\/\/ page for any content struct added by a user\nfunc Form(post Editable, fields ...Field) ([]byte, error) {\n\teditor := post.Editor()\n\n\teditor.ViewBuf = &bytes.Buffer{}\n\teditor.ViewBuf.Write([]byte(`
`))\n\n\tfor _, f := range fields {\n\t\taddFieldToEditorView(editor, f)\n\t}\n\n\teditor.ViewBuf.Write([]byte(`<\/td><\/tr>`))\n\n\t\/\/ content items with Item embedded have some default fields we need to render\n\teditor.ViewBuf.Write([]byte(`
`))\n\taddPostDefaultFieldsToEditorView(post, editor)\n\n\tsubmit := `\n
\n\t