{"text":"\/*\nCopyright 2020 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage generator\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"k8s.io\/kube-state-metrics\/pkg\/allow\"\n\t\"k8s.io\/kube-state-metrics\/pkg\/metric\"\n)\n\nfunc TestFilterMetricFamiliesLabels(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tallowLabels allow.Labels\n\t\tfamilyGenerators []FamilyGenerator\n\t\tresults []FamilyGenerator\n\t}{\n\t\t{\n\t\t\tname: \"Returns all the metric's keys and values if not annotation\/label metric by default\",\n\t\t\tallowLabels: allow.Labels(map[string][]string{}),\n\t\t\tfamilyGenerators: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_info\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"one\", \"two\", \"three\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-one\", \"value-two\", \"value-three\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tresults: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_info\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"one\", \"two\", \"three\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-one\", \"value-two\", \"value-three\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Returns no labels if it's an annotation metric and no allowed labels specified\",\n\t\t\tallowLabels: allow.Labels(map[string][]string{}),\n\t\t\tfamilyGenerators: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_annotations\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"one\", \"two\", \"three\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-one\", \"value-two\", \"value-three\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tresults: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_annotations\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Returns no labels if it's an label metric and no allowed labels specified\",\n\t\t\tallowLabels: allow.Labels(map[string][]string{}),\n\t\t\tfamilyGenerators: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_labels\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"one\", \"two\", \"three\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-one\", \"value-two\", \"value-three\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tresults: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_labels\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Returns allowed labels for metric and label and value pairs are correct\",\n\t\t\tallowLabels: allow.Labels(map[string][]string{\n\t\t\t\t\"node_info\": {\n\t\t\t\t\t\"two\",\n\t\t\t\t\t\"one\",\n\t\t\t\t},\n\t\t\t}),\n\t\t\tfamilyGenerators: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_info\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"one\", \"two\", \"three\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-one\", \"value-two\", \"value-three\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tresults: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_info\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"two\", \"one\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-two\", \"value-one\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Returns allowed labels for metric\",\n\t\t\tallowLabels: allow.Labels(map[string][]string{\n\t\t\t\t\"node_labels\": {\n\t\t\t\t\t\"one\",\n\t\t\t\t\t\"two\",\n\t\t\t\t},\n\t\t\t}),\n\t\t\tfamilyGenerators: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_labels\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"one\", \"two\", \"three\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-one\", \"value-two\", \"value-three\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tresults: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_labels\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"one\", \"two\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-one\", \"value-two\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tresults := FilterMetricFamiliesLabels(test.allowLabels, test.familyGenerators)\n\t\t\tif len(results) != len(test.results) {\n\t\t\t\tt.Fatalf(\"expected %v, got %v\", len(test.results), len(results))\n\t\t\t}\n\n\t\t\tfor i := range results {\n\t\t\t\tresult := results[i].GenerateFunc(nil)\n\t\t\t\texpected := test.results[i].GenerateFunc(nil)\n\t\t\t\tif !reflect.DeepEqual(result, expected) {\n\t\t\t\t\tt.Fatalf(\"Families don't equal, got %v, expected %v\", result, expected)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\nFix test as sometimes metrics keys and values are generated in different order\/*\nCopyright 2020 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage generator\n\nimport (\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n\n\t\"k8s.io\/kube-state-metrics\/pkg\/allow\"\n\t\"k8s.io\/kube-state-metrics\/pkg\/metric\"\n)\n\nfunc TestFilterMetricFamiliesLabels(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tallowLabels allow.Labels\n\t\tfamilyGenerators []FamilyGenerator\n\t\tresults []FamilyGenerator\n\t}{\n\t\t{\n\t\t\tname: \"Returns all the metric's keys and values if not annotation\/label metric by default\",\n\t\t\tallowLabels: allow.Labels(map[string][]string{}),\n\t\t\tfamilyGenerators: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_info\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"one\", \"two\", \"three\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-one\", \"value-two\", \"value-three\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tresults: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_info\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"one\", \"two\", \"three\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-one\", \"value-two\", \"value-three\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Returns no labels if it's an annotation metric and no allowed labels specified\",\n\t\t\tallowLabels: allow.Labels(map[string][]string{}),\n\t\t\tfamilyGenerators: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_annotations\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"one\", \"two\", \"three\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-one\", \"value-two\", \"value-three\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tresults: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_annotations\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Returns no labels if it's an label metric and no allowed labels specified\",\n\t\t\tallowLabels: allow.Labels(map[string][]string{}),\n\t\t\tfamilyGenerators: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_labels\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"one\", \"two\", \"three\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-one\", \"value-two\", \"value-three\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tresults: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_labels\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Returns allowed labels for metric and label and value pairs are correct\",\n\t\t\tallowLabels: allow.Labels(map[string][]string{\n\t\t\t\t\"node_info\": {\n\t\t\t\t\t\"two\",\n\t\t\t\t\t\"one\",\n\t\t\t\t},\n\t\t\t}),\n\t\t\tfamilyGenerators: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_info\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"one\", \"two\", \"three\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-one\", \"value-two\", \"value-three\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tresults: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_info\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"two\", \"one\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-two\", \"value-one\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Returns allowed labels for metric\",\n\t\t\tallowLabels: allow.Labels(map[string][]string{\n\t\t\t\t\"node_labels\": {\n\t\t\t\t\t\"one\",\n\t\t\t\t\t\"two\",\n\t\t\t\t},\n\t\t\t}),\n\t\t\tfamilyGenerators: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_labels\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"one\", \"two\", \"three\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-one\", \"value-two\", \"value-three\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tresults: []FamilyGenerator{\n\t\t\t\t{\n\t\t\t\t\tName: \"node_labels\",\n\t\t\t\t\tHelp: \"some help\",\n\t\t\t\t\tType: metric.Gauge,\n\t\t\t\t\tGenerateFunc: func(obj interface{}) *metric.Family {\n\t\t\t\t\t\treturn &metric.Family{\n\t\t\t\t\t\t\tMetrics: []*metric.Metric{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelKeys: []string{\"one\", \"two\"},\n\t\t\t\t\t\t\t\t\tLabelValues: []string{\"value-one\", \"value-two\"},\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tresults := FilterMetricFamiliesLabels(test.allowLabels, test.familyGenerators)\n\t\t\tif len(results) != len(test.results) {\n\t\t\t\tt.Fatalf(\"expected %v, got %v\", len(test.results), len(results))\n\t\t\t}\n\n\t\t\tfor i := range results {\n\t\t\t\tresult := results[i].GenerateFunc(nil)\n\t\t\t\texpected := test.results[i].GenerateFunc(nil)\n\t\t\t\tfor _, resultMetric := range result.Metrics {\n\t\t\t\t\tfor _, expectedMetric := range expected.Metrics {\n\t\t\t\t\t\tassertEqualSlices(t, expectedMetric.LabelKeys, resultMetric.LabelKeys, \"keys\")\n\t\t\t\t\t\tassertEqualSlices(t, expectedMetric.LabelValues, resultMetric.LabelValues, \"values\")\n\n\t\t\t\t\t\tif expectedMetric.Value != resultMetric.Value {\n\t\t\t\t\t\t\tt.Fatalf(\"value - expected %v, got %v\", expectedMetric.Value, resultMetric.Value)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc assertEqualSlices(t *testing.T, expected, actual []string, kind string) {\n\tsort.Strings(expected)\n\tsort.Strings(actual)\n\tif !reflect.DeepEqual(expected, actual) {\n\t\tt.Fatalf(\"%s - expected %v, got %v\", kind, expected, actual)\n\t}\n}\n<|endoftext|>"} {"text":"package camli\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"time\"\n\n\t\"camlistore.org\/pkg\/blob\"\n\t\"camlistore.org\/pkg\/client\"\n\t\"camlistore.org\/pkg\/schema\"\n\t\"camlistore.org\/pkg\/search\"\n)\n\n\/\/ Repo is our Camlistore scheme to model the state of a\n\/\/ particular repo at a particular point in time.\ntype Repo struct {\n\tName string\n\tParent string\n\tRetrieved time.Time\n\tRefs map[string]string\n\tPackfiles []string\n}\n\n\/\/ PutRepo stores a Repo in Camlistore.\nfunc (u *Uploader) PutRepo(r *Repo) error {\n\tbb := schema.NewBuilder()\n\tbb.SetType(\"git-repo\")\n\tbb.SetRawStringField(\"parent\", r.Parent)\n\tbb.SetRawStringField(\"retrieved\", schema.RFC3339FromTime(r.Retrieved))\n\tif refs, err := json.Marshal(r.Refs); err == nil {\n\t\t\/\/ TODO The builder just escapes this. We need the actual map as a\n\t\t\/\/ json object.\n\t\tbb.SetRawStringField(\"refs\", string(refs))\n\t} else {\n\t\treturn err\n\t}\n\tif packfiles, err := json.Marshal(r.Packfiles); err == nil {\n\t\t\/\/ TODO The builder just escapes this. We need the actual map as a\n\t\t\/\/ json object.\n\t\tbb.SetRawStringField(\"packfiles\", string(packfiles))\n\t} else {\n\t\treturn err\n\t}\n\n\tj := bb.Blob().JSON()\n\treporef := blob.SHA1FromString(j)\n\t_, err := uploadString(u.c, reporef, j)\n\n\tlog.Printf(\"stored repo: %s on %s\", r.Name, reporef)\n\n\t\/\/ Update or create its permanode.\n\tpn, _, err := u.findRepo(r.Name)\n\tif err != nil {\n\t\t\/\/ Create a new one.\n\t\tres, err := u.c.UploadNewPermanode()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpn = res.BlobRef\n\t\tlog.Printf(\"created permanode: %s\", pn)\n\n\t\ttitleattr := schema.NewSetAttributeClaim(pn, \"title\", r.Name)\n\t\tclaimTime := time.Now()\n\t\ttitleattr.SetClaimDate(claimTime)\n\t\tsigner, err := u.c.Signer()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsigned, err := titleattr.SignAt(signer, claimTime)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"couldn't to sign title claim\")\n\t\t}\n\t\t_, err = u.c.Upload(client.NewUploadHandleFromString(signed))\n\t}\n\tcontentattr := schema.NewSetAttributeClaim(pn, \"camliContent\", reporef.String())\n\tclaimTime := time.Now()\n\tcontentattr.SetClaimDate(claimTime)\n\tsigner, err := u.c.Signer()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsigned, err := contentattr.SignAt(signer, claimTime)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't to sign content claim\")\n\t}\n\t_, err = u.c.Upload(client.NewUploadHandleFromString(signed))\n\treturn err\n}\n\nvar repoNotFoundErr = errors.New(\"repo not found\")\n\nfunc (u *Uploader) findRepo(name string) (blob.Ref, search.MetaMap, error) {\n\tres, err := u.c.Query(&search.SearchQuery{\n\t\tLimit: 1,\n\t\tConstraint: &search.Constraint{\n\t\t\tPermanode: &search.PermanodeConstraint{\n\t\t\t\tAttr: \"title\", Value: name,\n\t\t\t},\n\t\t},\n\t\tDescribe: &search.DescribeRequest{},\n\t})\n\tif err != nil {\n\t\treturn blob.Ref{}, nil, err\n\t}\n\tif len(res.Blobs) < 1 {\n\t\treturn blob.Ref{}, nil, repoNotFoundErr\n\t}\n\treturn res.Blobs[0].Blob, res.Describe.Meta, nil\n}\n\n\/\/ GetRepo querys for a repo permanode with name, and returns its\n\/\/ Repo object.\nfunc (u *Uploader) GetRepo(name string) (*Repo, error) {\n\tpn, meta, err := u.findRepo(name)\n\tif err == repoNotFoundErr {\n\t\treturn nil, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\tref, ok := meta[pn.String()].ContentRef()\n\tif !ok {\n\t\treturn nil, errors.New(\"couldn't find repo data (but there's a permanode)\")\n\t}\n\tr, _, err := u.c.Fetch(ref)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbody, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar repo Repo\n\terr = json.Unmarshal(body, &repo)\n\treturn &repo, err\n}\ncamli: use bb.SetRawStringFieldpackage camli\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"time\"\n\n\t\"camlistore.org\/pkg\/blob\"\n\t\"camlistore.org\/pkg\/client\"\n\t\"camlistore.org\/pkg\/schema\"\n\t\"camlistore.org\/pkg\/search\"\n)\n\n\/\/ Repo is our Camlistore scheme to model the state of a\n\/\/ particular repo at a particular point in time.\ntype Repo struct {\n\tName string\n\tParent string\n\tRetrieved time.Time\n\tRefs map[string]string\n\tPackfiles []string\n}\n\n\/\/ PutRepo stores a Repo in Camlistore.\nfunc (u *Uploader) PutRepo(r *Repo) error {\n\tbb := schema.NewBuilder()\n\tbb.SetType(\"git-repo\")\n\tbb.SetRawStringField(\"parent\", r.Parent)\n\tbb.SetRawStringField(\"retrieved\", schema.RFC3339FromTime(r.Retrieved))\n\tif refs, err := schema.NewJSONObject(r.Refs); err == nil {\n\t\tbb.SetRawField(\"refs\", refs)\n\t} else {\n\t\treturn err\n\t}\n\tif packfiles, err := schema.NewJSONObject(r.Packfiles); err == nil {\n\t\tbb.SetRawField(\"packfiles\", packfiles)\n\t} else {\n\t\treturn err\n\t}\n\n\tj := bb.Blob().JSON()\n\treporef := blob.SHA1FromString(j)\n\t_, err := uploadString(u.c, reporef, j)\n\n\tlog.Printf(\"stored repo: %s on %s\", r.Name, reporef)\n\n\t\/\/ Update or create its permanode.\n\tpn, _, err := u.findRepo(r.Name)\n\tif err != nil {\n\t\t\/\/ Create a new one.\n\t\tres, err := u.c.UploadNewPermanode()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpn = res.BlobRef\n\t\tlog.Printf(\"created permanode: %s\", pn)\n\n\t\ttitleattr := schema.NewSetAttributeClaim(pn, \"title\", r.Name)\n\t\tclaimTime := time.Now()\n\t\ttitleattr.SetClaimDate(claimTime)\n\t\tsigner, err := u.c.Signer()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsigned, err := titleattr.SignAt(signer, claimTime)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"couldn't to sign title claim\")\n\t\t}\n\t\t_, err = u.c.Upload(client.NewUploadHandleFromString(signed))\n\t}\n\tcontentattr := schema.NewSetAttributeClaim(pn, \"camliContent\", reporef.String())\n\tclaimTime := time.Now()\n\tcontentattr.SetClaimDate(claimTime)\n\tsigner, err := u.c.Signer()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsigned, err := contentattr.SignAt(signer, claimTime)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't to sign content claim\")\n\t}\n\t_, err = u.c.Upload(client.NewUploadHandleFromString(signed))\n\treturn err\n}\n\nvar repoNotFoundErr = errors.New(\"repo not found\")\n\nfunc (u *Uploader) findRepo(name string) (blob.Ref, search.MetaMap, error) {\n\tres, err := u.c.Query(&search.SearchQuery{\n\t\tLimit: 1,\n\t\tConstraint: &search.Constraint{\n\t\t\tPermanode: &search.PermanodeConstraint{\n\t\t\t\tAttr: \"title\", Value: name,\n\t\t\t},\n\t\t},\n\t\tDescribe: &search.DescribeRequest{},\n\t})\n\tif err != nil {\n\t\treturn blob.Ref{}, nil, err\n\t}\n\tif len(res.Blobs) < 1 {\n\t\treturn blob.Ref{}, nil, repoNotFoundErr\n\t}\n\treturn res.Blobs[0].Blob, res.Describe.Meta, nil\n}\n\n\/\/ GetRepo querys for a repo permanode with name, and returns its\n\/\/ Repo object.\nfunc (u *Uploader) GetRepo(name string) (*Repo, error) {\n\tpn, meta, err := u.findRepo(name)\n\tif err == repoNotFoundErr {\n\t\treturn nil, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\tref, ok := meta[pn.String()].ContentRef()\n\tif !ok {\n\t\treturn nil, errors.New(\"couldn't find repo data (but there's a permanode)\")\n\t}\n\tr, _, err := u.c.Fetch(ref)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbody, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar repo Repo\n\terr = json.Unmarshal(body, &repo)\n\treturn &repo, err\n}\n<|endoftext|>"} {"text":"\/\/ Copyright © 2021 Steve Francia .\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage tpl\n\nfunc MainTemplate() []byte {\n\treturn []byte(`\/*\n{{ .Copyright }}\n{{ if .Legal.Header }}{{ .Legal.Header }}{{ end }}\n*\/\npackage main\n\nimport \"{{ .PkgName }}\/cmd\"\n\nfunc main() {\n\tcmd.Execute()\n}\n`)\n}\n\nfunc RootTemplate() []byte {\n\treturn []byte(`\/*\n{{ .Copyright }}\n{{ if .Legal.Header }}{{ .Legal.Header }}{{ end }}\n*\/\npackage cmd\n\nimport (\n{{- if .Viper }}\n\t\"fmt\"\n\t\"os\"\n{{ end }}\n\t\"github.com\/spf13\/cobra\"\n{{- if .Viper }}\n\t\"github.com\/spf13\/viper\"{{ end }}\n)\n\n{{ if .Viper -}}\nvar cfgFile string\n{{- end }}\n\n\/\/ rootCmd represents the base command when called without any subcommands\nvar rootCmd = &cobra.Command{\n\tUse: \"{{ .AppName }}\",\n\tShort: \"A brief description of your application\",\n\tLong: ` + \"`\" + `A longer description that spans multiple lines and likely contains\nexamples and usage of using your application. For example:\n\nCobra is a CLI library for Go that empowers applications.\nThis application is a tool to generate the needed files\nto quickly create a Cobra application.` + \"`\" + `,\n\t\/\/ Uncomment the following line if your bare application\n\t\/\/ has an action associated with it:\n\t\/\/ Run: func(cmd *cobra.Command, args []string) { },\n}\n\n\/\/ Execute adds all child commands to the root command and sets flags appropriately.\n\/\/ This is called by main.main(). It only needs to happen once to the rootCmd.\nfunc Execute() {\n\tcobra.CheckErr(rootCmd.Execute())\n}\n\nfunc init() {\n{{- if .Viper }}\n\tcobra.OnInitialize(initConfig)\n{{ end }}\n\t\/\/ Here you will define your flags and configuration settings.\n\t\/\/ Cobra supports persistent flags, which, if defined here,\n\t\/\/ will be global for your application.\n{{ if .Viper }}\n\trootCmd.PersistentFlags().StringVar(&cfgFile, \"config\", \"\", \"config file (default is $HOME\/.{{ .AppName }}.yaml)\")\n{{ else }}\n\t\/\/ rootCmd.PersistentFlags().StringVar(&cfgFile, \"config\", \"\", \"config file (default is $HOME\/.{{ .AppName }}.yaml)\")\n{{ end }}\n\t\/\/ Cobra also supports local flags, which will only run\n\t\/\/ when this action is called directly.\n\trootCmd.Flags().BoolP(\"toggle\", \"t\", false, \"Help message for toggle\")\n}\n\n{{ if .Viper -}}\n\/\/ initConfig reads in config file and ENV variables if set.\nfunc initConfig() {\n\tif cfgFile != \"\" {\n\t\t\/\/ Use config file from the flag.\n\t\tviper.SetConfigFile(cfgFile)\n\t} else {\n\t\t\/\/ Find home directory.\n\t\thome, err := os.UserHomeDir()\n\t\tcobra.CheckErr(err)\n\n\t\t\/\/ Search config in home directory with name \".{{ .AppName }}\" (without extension).\n\t\tviper.AddConfigPath(home)\n\t\tviper.SetConfigType(\"yaml\")\n\t\tviper.SetConfigName(\".{{ .AppName }}\")\n\t}\n\n\tviper.AutomaticEnv() \/\/ read in environment variables that match\n\n\t\/\/ If a config file is found, read it in.\n\tif err := viper.ReadInConfig(); err == nil {\n\t\tfmt.Fprintln(os.Stderr, \"Using config file:\", viper.ConfigFileUsed())\n\t}\n}\n{{- end }}\n`)\n}\n\nfunc AddCommandTemplate() []byte {\n\treturn []byte(`\/*\n{{ .Project.Copyright }}\n{{ if .Legal.Header }}{{ .Legal.Header }}{{ end }}\n*\/\npackage cmd\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ {{ .CmdName }}Cmd represents the {{ .CmdName }} command\nvar {{ .CmdName }}Cmd = &cobra.Command{\n\tUse: \"{{ .CmdName }}\",\n\tShort: \"A brief description of your command\",\n\tLong: ` + \"`\" + `A longer description that spans multiple lines and likely contains examples\nand usage of using your command. For example:\n\nCobra is a CLI library for Go that empowers applications.\nThis application is a tool to generate the needed files\nto quickly create a Cobra application.` + \"`\" + `,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tfmt.Println(\"{{ .CmdName }} called\")\n\t},\n}\n\nfunc init() {\n\t{{ .CmdParent }}.AddCommand({{ .CmdName }}Cmd)\n\n\t\/\/ Here you will define your flags and configuration settings.\n\n\t\/\/ Cobra supports Persistent Flags which will work for this command\n\t\/\/ and all subcommands, e.g.:\n\t\/\/ {{ .CmdName }}Cmd.PersistentFlags().String(\"foo\", \"\", \"A help for foo\")\n\n\t\/\/ Cobra supports local flags which will only run when this command\n\t\/\/ is called directly, e.g.:\n\t\/\/ {{ .CmdName }}Cmd.Flags().BoolP(\"toggle\", \"t\", false, \"Help message for toggle\")\n}\n`)\n}\nfix: Duplicate error message from cobra init boilerplates (#1463)\/\/ Copyright © 2021 Steve Francia .\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage tpl\n\nfunc MainTemplate() []byte {\n\treturn []byte(`\/*\n{{ .Copyright }}\n{{ if .Legal.Header }}{{ .Legal.Header }}{{ end }}\n*\/\npackage main\n\nimport \"{{ .PkgName }}\/cmd\"\n\nfunc main() {\n\tcmd.Execute()\n}\n`)\n}\n\nfunc RootTemplate() []byte {\n\treturn []byte(`\/*\n{{ .Copyright }}\n{{ if .Legal.Header }}{{ .Legal.Header }}{{ end }}\n*\/\npackage cmd\n\nimport (\n{{- if .Viper }}\n\t\"fmt\"\n\t\"os\"\n{{ end }}\n\t\"github.com\/spf13\/cobra\"\n{{- if .Viper }}\n\t\"github.com\/spf13\/viper\"{{ end }}\n)\n\n{{ if .Viper -}}\nvar cfgFile string\n{{- end }}\n\n\/\/ rootCmd represents the base command when called without any subcommands\nvar rootCmd = &cobra.Command{\n\tUse: \"{{ .AppName }}\",\n\tShort: \"A brief description of your application\",\n\tLong: ` + \"`\" + `A longer description that spans multiple lines and likely contains\nexamples and usage of using your application. For example:\n\nCobra is a CLI library for Go that empowers applications.\nThis application is a tool to generate the needed files\nto quickly create a Cobra application.` + \"`\" + `,\n\t\/\/ Uncomment the following line if your bare application\n\t\/\/ has an action associated with it:\n\t\/\/ Run: func(cmd *cobra.Command, args []string) { },\n}\n\n\/\/ Execute adds all child commands to the root command and sets flags appropriately.\n\/\/ This is called by main.main(). It only needs to happen once to the rootCmd.\nfunc Execute() {\n\terr := rootCmd.Execute()\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc init() {\n{{- if .Viper }}\n\tcobra.OnInitialize(initConfig)\n{{ end }}\n\t\/\/ Here you will define your flags and configuration settings.\n\t\/\/ Cobra supports persistent flags, which, if defined here,\n\t\/\/ will be global for your application.\n{{ if .Viper }}\n\trootCmd.PersistentFlags().StringVar(&cfgFile, \"config\", \"\", \"config file (default is $HOME\/.{{ .AppName }}.yaml)\")\n{{ else }}\n\t\/\/ rootCmd.PersistentFlags().StringVar(&cfgFile, \"config\", \"\", \"config file (default is $HOME\/.{{ .AppName }}.yaml)\")\n{{ end }}\n\t\/\/ Cobra also supports local flags, which will only run\n\t\/\/ when this action is called directly.\n\trootCmd.Flags().BoolP(\"toggle\", \"t\", false, \"Help message for toggle\")\n}\n\n{{ if .Viper -}}\n\/\/ initConfig reads in config file and ENV variables if set.\nfunc initConfig() {\n\tif cfgFile != \"\" {\n\t\t\/\/ Use config file from the flag.\n\t\tviper.SetConfigFile(cfgFile)\n\t} else {\n\t\t\/\/ Find home directory.\n\t\thome, err := os.UserHomeDir()\n\t\tcobra.CheckErr(err)\n\n\t\t\/\/ Search config in home directory with name \".{{ .AppName }}\" (without extension).\n\t\tviper.AddConfigPath(home)\n\t\tviper.SetConfigType(\"yaml\")\n\t\tviper.SetConfigName(\".{{ .AppName }}\")\n\t}\n\n\tviper.AutomaticEnv() \/\/ read in environment variables that match\n\n\t\/\/ If a config file is found, read it in.\n\tif err := viper.ReadInConfig(); err == nil {\n\t\tfmt.Fprintln(os.Stderr, \"Using config file:\", viper.ConfigFileUsed())\n\t}\n}\n{{- end }}\n`)\n}\n\nfunc AddCommandTemplate() []byte {\n\treturn []byte(`\/*\n{{ .Project.Copyright }}\n{{ if .Legal.Header }}{{ .Legal.Header }}{{ end }}\n*\/\npackage cmd\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ {{ .CmdName }}Cmd represents the {{ .CmdName }} command\nvar {{ .CmdName }}Cmd = &cobra.Command{\n\tUse: \"{{ .CmdName }}\",\n\tShort: \"A brief description of your command\",\n\tLong: ` + \"`\" + `A longer description that spans multiple lines and likely contains examples\nand usage of using your command. For example:\n\nCobra is a CLI library for Go that empowers applications.\nThis application is a tool to generate the needed files\nto quickly create a Cobra application.` + \"`\" + `,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tfmt.Println(\"{{ .CmdName }} called\")\n\t},\n}\n\nfunc init() {\n\t{{ .CmdParent }}.AddCommand({{ .CmdName }}Cmd)\n\n\t\/\/ Here you will define your flags and configuration settings.\n\n\t\/\/ Cobra supports Persistent Flags which will work for this command\n\t\/\/ and all subcommands, e.g.:\n\t\/\/ {{ .CmdName }}Cmd.PersistentFlags().String(\"foo\", \"\", \"A help for foo\")\n\n\t\/\/ Cobra supports local flags which will only run when this command\n\t\/\/ is called directly, e.g.:\n\t\/\/ {{ .CmdName }}Cmd.Flags().BoolP(\"toggle\", \"t\", false, \"Help message for toggle\")\n}\n`)\n}\n<|endoftext|>"} {"text":"package a\n\nimport (\n\t. \"github.com\/alecthomas\/chroma\" \/\/ nolint\n\t\"github.com\/alecthomas\/chroma\/lexers\/internal\"\n)\n\n\/\/ Al lexer.\nvar Al = internal.Register(MustNewLazyLexer(\n\t&Config{\n\t\tName: \"AL\",\n\t\tAliases: []string{\"al\"},\n\t\tFilenames: []string{\"*.al\", \"*.dal\"},\n\t\tMimeTypes: []string{\"text\/x-al\"},\n\t\tDotAll: true,\n\t\tCaseInsensitive: true,\n\t},\n\talRules,\n))\n\n\/\/ https:\/\/github.com\/microsoft\/AL\/blob\/master\/grammar\/alsyntax.tmlanguage\nfunc alRules() Rules {\n\treturn Rules{\n\t\t\"root\": {\n\t\t\t{`\\s+`, TextWhitespace, nil},\n\t\t\t{`(?s)\\\/\\*.*?\\\\*\\*\\\/`, CommentMultiline, nil},\n\t\t\t{`(?s)\/\/.*?\\n`, CommentSingle, nil},\n\t\t\t{`\\\"([^\\\"])*\\\"`, Text, nil},\n\t\t\t{`'([^'])*'`, LiteralString, nil},\n\t\t\t{`\\b(?i:(ARRAY|ASSERTERROR|BEGIN|BREAK|CASE|DO|DOWNTO|ELSE|END|EVENT|EXIT|FOR|FOREACH|FUNCTION|IF|IMPLEMENTS|IN|INDATASET|INTERFACE|INTERNAL|LOCAL|OF|PROCEDURE|PROGRAM|PROTECTED|REPEAT|RUNONCLIENT|SECURITYFILTERING|SUPPRESSDISPOSE|TEMPORARY|THEN|TO|TRIGGER|UNTIL|VAR|WHILE|WITH|WITHEVENTS))\\b`, Keyword, nil},\n\t\t\t{`\\b(?i:(AND|DIV|MOD|NOT|OR|XOR))\\b`, OperatorWord, nil},\n\t\t\t{`\\b(?i:(AVERAGE|CONST|COUNT|EXIST|FIELD|FILTER|LOOKUP|MAX|MIN|ORDER|SORTING|SUM|TABLEDATA|UPPERLIMIT|WHERE|ASCENDING|DESCENDING))\\b`, Keyword, nil},\n\t\t\t\/\/ Added new objects types of BC 2021 wave 1 (REPORTEXTENSION|Entitlement|PermissionSet|PermissionSetExtension)\n\t\t\t{`\\b(?i:(CODEUNIT|PAGE|PAGEEXTENSION|PAGECUSTOMIZATION|DOTNET|ENUM|ENUMEXTENSION|VALUE|QUERY|REPORT|TABLE|TABLEEXTENSION|XMLPORT|PROFILE|CONTROLADDIN|REPORTEXTENSION|Entitlement|PermissionSet|PermissionSetExtension))\\b`, Keyword, nil},\n\t\t\t{`\\b(?i:(Action|Array|Automation|BigInteger|BigText|Blob|Boolean|Byte|Char|ClientType|Code|Codeunit|CompletionTriggerErrorLevel|ConnectionType|Database|DataClassification|DataScope|Date|DateFormula|DateTime|Decimal|DefaultLayout|Dialog|Dictionary|DotNet|DotNetAssembly|DotNetTypeDeclaration|Duration|Enum|ErrorInfo|ErrorType|ExecutionContext|ExecutionMode|FieldClass|FieldRef|FieldType|File|FilterPageBuilder|Guid|InStream|Integer|Joker|KeyRef|List|ModuleDependencyInfo|ModuleInfo|None|Notification|NotificationScope|ObjectType|Option|OutStream|Page|PageResult|Query|Record|RecordId|RecordRef|Report|ReportFormat|SecurityFilter|SecurityFiltering|Table|TableConnectionType|TableFilter|TestAction|TestField|TestFilterField|TestPage|TestPermissions|TestRequestPage|Text|TextBuilder|TextConst|TextEncoding|Time|TransactionModel|TransactionType|Variant|Verbosity|Version|XmlPort|HttpContent|HttpHeaders|HttpClient|HttpRequestMessage|HttpResponseMessage|JsonToken|JsonValue|JsonArray|JsonObject|View|Views|XmlAttribute|XmlAttributeCollection|XmlComment|XmlCData|XmlDeclaration|XmlDocument|XmlDocumentType|XmlElement|XmlNamespaceManager|XmlNameTable|XmlNode|XmlNodeList|XmlProcessingInstruction|XmlReadOptions|XmlText|XmlWriteOptions|WebServiceActionContext|WebServiceActionResultCode|SessionSettings))\\b`, Keyword, nil},\n\t\t\t{`\\b([<>]=|<>|<|>)\\b?`, Operator, nil},\n\t\t\t{`\\b(\\-|\\+|\\\/|\\*)\\b`, Operator, nil},\n\t\t\t{`\\s*(\\:=|\\+=|-=|\\\/=|\\*=)\\s*?`, Operator, nil},\n\t\t\t{`\\b(?i:(ADDFIRST|ADDLAST|ADDAFTER|ADDBEFORE|ACTION|ACTIONS|AREA|ASSEMBLY|CHARTPART|CUEGROUP|CUSTOMIZES|COLUMN|DATAITEM|DATASET|ELEMENTS|EXTENDS|FIELD|FIELDGROUP|FIELDATTRIBUTE|FIELDELEMENT|FIELDGROUPS|FIELDS|FILTER|FIXED|GRID|GROUP|MOVEAFTER|MOVEBEFORE|KEY|KEYS|LABEL|LABELS|LAYOUT|MODIFY|MOVEFIRST|MOVELAST|MOVEBEFORE|MOVEAFTER|PART|REPEATER|USERCONTROL|REQUESTPAGE|SCHEMA|SEPARATOR|SYSTEMPART|TABLEELEMENT|TEXTATTRIBUTE|TEXTELEMENT|TYPE))\\b`, Keyword, nil},\n\t\t\t{`\\s*[(\\.\\.)&\\|]\\s*`, Operator, nil},\n\t\t\t{`\\b((0(x|X)[0-9a-fA-F]*)|(([0-9]+\\.?[0-9]*)|(\\.[0-9]+))((e|E)(\\+|-)?[0-9]+)?)(L|l|UL|ul|u|U|F|f|ll|LL|ull|ULL)?\\b`, LiteralNumber, nil},\n\t\t\t{`[;:,]`, Punctuation, nil},\n\t\t\t{`#[ \\t]*(if|else|elif|endif|define|undef|region|endregion|pragma)\\b.*?\\n`, CommentPreproc, nil},\n\t\t\t{`\\w+`, Text, nil},\n\t\t\t{`.`, Text, nil},\n\t\t},\n\t}\n}\nUpdate to the last version of microsoft grammar filepackage a\n\nimport (\n\t. \"github.com\/alecthomas\/chroma\" \/\/ nolint\n\t\"github.com\/alecthomas\/chroma\/lexers\/internal\"\n)\n\n\/\/ Al lexer.\nvar Al = internal.Register(MustNewLazyLexer(\n\t&Config{\n\t\tName: \"AL\",\n\t\tAliases: []string{\"al\"},\n\t\tFilenames: []string{\"*.al\", \"*.dal\"},\n\t\tMimeTypes: []string{\"text\/x-al\"},\n\t\tDotAll: true,\n\t\tCaseInsensitive: true,\n\t},\n\talRules,\n))\n\n\/\/ https:\/\/github.com\/microsoft\/AL\/blob\/master\/grammar\/alsyntax.tmlanguage\nfunc alRules() Rules {\n\treturn Rules{\n\t\t\"root\": {\n\t\t\t{`\\s+`, TextWhitespace, nil},\n\t\t\t{`(?s)\\\/\\*.*?\\\\*\\*\\\/`, CommentMultiline, nil},\n\t\t\t{`(?s)\/\/.*?\\n`, CommentSingle, nil},\n\t\t\t{`\\\"([^\\\"])*\\\"`, Text, nil},\n\t\t\t{`'([^'])*'`, LiteralString, nil},\n\t\t\t{`\\b(?i:(ARRAY|ASSERTERROR|BEGIN|BREAK|CASE|DO|DOWNTO|ELSE|END|EVENT|EXIT|FOR|FOREACH|FUNCTION|IF|IMPLEMENTS|IN|INDATASET|INTERFACE|INTERNAL|LOCAL|OF|PROCEDURE|PROGRAM|PROTECTED|REPEAT|RUNONCLIENT|SECURITYFILTERING|SUPPRESSDISPOSE|TEMPORARY|THEN|TO|TRIGGER|UNTIL|VAR|WHILE|WITH|WITHEVENTS))\\b`, Keyword, nil},\n\t\t\t{`\\b(?i:(AND|DIV|MOD|NOT|OR|XOR))\\b`, OperatorWord, nil},\n\t\t\t{`\\b(?i:(AVERAGE|CONST|COUNT|EXIST|FIELD|FILTER|LOOKUP|MAX|MIN|ORDER|SORTING|SUM|TABLEDATA|UPPERLIMIT|WHERE|ASCENDING|DESCENDING))\\b`, Keyword, nil},\n\t\t\t{`\\b(?i:(CODEUNIT|PAGE|PAGEEXTENSION|PAGECUSTOMIZATION|DOTNET|ENUM|ENUMEXTENSION|VALUE|QUERY|REPORT|TABLE|TABLEEXTENSION|XMLPORT|PROFILE|CONTROLADDIN|REPORTEXTENSION|INTERFACE|PERMISSIONSET|PERMISSIONSETEXTENSION|ENTITLEMENT))\\b`, Keyword, nil},\n\t\t\t{`\\b(?i:(Action|Array|Automation|BigInteger|BigText|Blob|Boolean|Byte|Char|ClientType|Code|Codeunit|CompletionTriggerErrorLevel|ConnectionType|Database|DataClassification|DataScope|Date|DateFormula|DateTime|Decimal|DefaultLayout|Dialog|Dictionary|DotNet|DotNetAssembly|DotNetTypeDeclaration|Duration|Enum|ErrorInfo|ErrorType|ExecutionContext|ExecutionMode|FieldClass|FieldRef|FieldType|File|FilterPageBuilder|Guid|InStream|Integer|Joker|KeyRef|List|ModuleDependencyInfo|ModuleInfo|None|Notification|NotificationScope|ObjectType|Option|OutStream|Page|PageResult|Query|Record|RecordId|RecordRef|Report|ReportFormat|SecurityFilter|SecurityFiltering|Table|TableConnectionType|TableFilter|TestAction|TestField|TestFilterField|TestPage|TestPermissions|TestRequestPage|Text|TextBuilder|TextConst|TextEncoding|Time|TransactionModel|TransactionType|Variant|Verbosity|Version|XmlPort|HttpContent|HttpHeaders|HttpClient|HttpRequestMessage|HttpResponseMessage|JsonToken|JsonValue|JsonArray|JsonObject|View|Views|XmlAttribute|XmlAttributeCollection|XmlComment|XmlCData|XmlDeclaration|XmlDocument|XmlDocumentType|XmlElement|XmlNamespaceManager|XmlNameTable|XmlNode|XmlNodeList|XmlProcessingInstruction|XmlReadOptions|XmlText|XmlWriteOptions|WebServiceActionContext|WebServiceActionResultCode|SessionSettings))\\b`, Keyword, nil},\n\t\t\t{`\\b([<>]=|<>|<|>)\\b?`, Operator, nil},\n\t\t\t{`\\b(\\-|\\+|\\\/|\\*)\\b`, Operator, nil},\n\t\t\t{`\\s*(\\:=|\\+=|-=|\\\/=|\\*=)\\s*?`, Operator, nil},\n\t\t\t{`\\b(?i:(ADD|ADDFIRST|ADDLAST|ADDAFTER|ADDBEFORE|ACTION|ACTIONS|AREA|ASSEMBLY|CHARTPART|CUEGROUP|CUSTOMIZES|COLUMN|DATAITEM|DATASET|ELEMENTS|EXTENDS|FIELD|FIELDGROUP|FIELDATTRIBUTE|FIELDELEMENT|FIELDGROUPS|FIELDS|FILTER|FIXED|GRID|GROUP|MOVEAFTER|MOVEBEFORE|KEY|KEYS|LABEL|LABELS|LAYOUT|MODIFY|MOVEFIRST|MOVELAST|MOVEBEFORE|MOVEAFTER|PART|REPEATER|USERCONTROL|REQUESTPAGE|SCHEMA|SEPARATOR|SYSTEMPART|TABLEELEMENT|TEXTATTRIBUTE|TEXTELEMENT|TYPE))\\b`, Keyword, nil},\n\t\t\t{`\\s*[(\\.\\.)&\\|]\\s*`, Operator, nil},\n\t\t\t{`\\b((0(x|X)[0-9a-fA-F]*)|(([0-9]+\\.?[0-9]*)|(\\.[0-9]+))((e|E)(\\+|-)?[0-9]+)?)(L|l|UL|ul|u|U|F|f|ll|LL|ull|ULL)?\\b`, LiteralNumber, nil},\n\t\t\t{`[;:,]`, Punctuation, nil},\n\t\t\t{`#[ \\t]*(if|else|elif|endif|define|undef|region|endregion|pragma)\\b.*?\\n`, CommentPreproc, nil},\n\t\t\t{`\\w+`, Text, nil},\n\t\t\t{`.`, Text, nil},\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"package api\n\nimport (\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n\t\"github.com\/NebulousLabs\/Sia\/crypto\"\n\t\"github.com\/NebulousLabs\/Sia\/types\"\n)\n\n\/\/ TestIntegrationHosting tests that the host correctly receives payment for\n\/\/ hosting files.\nfunc TestIntegrationHosting(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\n\tst, err := createServerTester(\"TestIntegrationHosting\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ announce the host\n\terr = st.stdGetAPI(\"\/host\/announce?address=\" + string(st.host.Address()))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ we need to announce twice, or the renter will complain about not having enough hosts\n\tloopAddr := \"127.0.0.1:\" + st.host.Address().Port()\n\terr = st.stdGetAPI(\"\/host\/announce?address=\" + loopAddr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ wait for announcement to register\n\tst.miner.AddBlock()\n\tvar hosts ActiveHosts\n\tst.getAPI(\"\/hostdb\/hosts\/active\", &hosts)\n\tif len(hosts.Hosts) == 0 {\n\t\tt.Fatal(\"host announcement not seen\")\n\t}\n\n\t\/\/ create a file\n\tpath := filepath.Join(build.SiaTestingDir, \"api\", \"TestIntegrationHosting\", \"test.dat\")\n\tdata, err := crypto.RandBytes(1024)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = ioutil.WriteFile(path, data, 0600)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ upload to host\n\terr = st.stdGetAPI(\"\/renter\/files\/upload?nickname=test&duration=10&source=\" + path)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar fi []FileInfo\n\tvar loops int\n\tfor len(fi) != 1 || fi[0].UploadProgress != 100 {\n\t\tst.getAPI(\"\/renter\/files\/list\", &fi)\n\t\ttime.Sleep(3 * time.Second)\n\t\tloops++\n\t}\n\n\t\/\/ mine blocks until storage proof is complete\n\tfor i := 0; i < 50+int(types.MaturityDelay); i++ {\n\t\tst.miner.AddBlock()\n\t}\n\n\t\/\/ check balance\n\tvar wi WalletGET\n\tst.getAPI(\"\/wallet\", &wi)\n\texpBal := \"16499494999617870000000002429474\"\n\tif wi.ConfirmedSiacoinBalance.String() != expBal {\n\t\tt.Fatalf(\"host's balance was not affected: expected %v, got %v\", expBal, wi.ConfirmedSiacoinBalance)\n\t}\n}\nuse host.Profit in host test instead of walletpackage api\n\nimport (\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n\t\"github.com\/NebulousLabs\/Sia\/crypto\"\n\t\"github.com\/NebulousLabs\/Sia\/modules\"\n\t\"github.com\/NebulousLabs\/Sia\/types\"\n)\n\n\/\/ TestIntegrationHosting tests that the host correctly receives payment for\n\/\/ hosting files.\nfunc TestIntegrationHosting(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\n\tst, err := createServerTester(\"TestIntegrationHosting\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ announce the host\n\terr = st.stdGetAPI(\"\/host\/announce?address=\" + string(st.host.Address()))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ we need to announce twice, or the renter will complain about not having enough hosts\n\tloopAddr := \"127.0.0.1:\" + st.host.Address().Port()\n\terr = st.stdGetAPI(\"\/host\/announce?address=\" + loopAddr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ wait for announcement to register\n\tst.miner.AddBlock()\n\tvar hosts ActiveHosts\n\tst.getAPI(\"\/hostdb\/hosts\/active\", &hosts)\n\tif len(hosts.Hosts) == 0 {\n\t\tt.Fatal(\"host announcement not seen\")\n\t}\n\n\t\/\/ create a file\n\tpath := filepath.Join(build.SiaTestingDir, \"api\", \"TestIntegrationHosting\", \"test.dat\")\n\tdata, err := crypto.RandBytes(1024)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = ioutil.WriteFile(path, data, 0600)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ upload to host\n\terr = st.stdGetAPI(\"\/renter\/files\/upload?nickname=test&duration=10&source=\" + path)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar fi []FileInfo\n\tfor len(fi) != 1 || fi[0].UploadProgress != 100 {\n\t\tst.getAPI(\"\/renter\/files\/list\", &fi)\n\t\ttime.Sleep(3 * time.Second)\n\t}\n\n\t\/\/ mine blocks until storage proof is complete\n\tfor i := 0; i < 20+int(types.MaturityDelay); i++ {\n\t\tst.miner.AddBlock()\n\t}\n\n\t\/\/ check profit\n\tvar hi modules.HostInfo\n\tst.getAPI(\"\/host\/status\", &hi)\n\texpProfit := \"382129999999997570526\"\n\tif hi.Profit.String() != expProfit {\n\t\tt.Log(hi)\n\t\tt.Fatalf(\"host's profit was not affected: expected %v, got %v\", expProfit, hi.Profit)\n\t}\n}\n<|endoftext|>"} {"text":"package manager\n\nimport (\n\t\"github.com\/rancher\/types\/apis\/management.cattle.io\/v3\"\n\t\"github.com\/sirupsen\/logrus\"\n\tkerrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\nfunc (m *Manager) Sync(key string, obj *v3.Catalog) error {\n\tif obj == nil {\n\t\treturn nil\n\t}\n\tif obj.DeletionTimestamp != nil {\n\t\ttemplates, err := m.getTemplateMap(obj.Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttvToDelete := map[string]struct{}{}\n\t\tfor _, t := range templates {\n\t\t\ttvs, err := m.getTemplateVersion(t.Name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor k := range tvs {\n\t\t\t\ttvToDelete[k] = struct{}{}\n\t\t\t}\n\t\t}\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tfor k := range templates {\n\t\t\t\t\tif err := m.templateClient.Delete(k, &metav1.DeleteOptions{}); err != nil && !kerrors.IsNotFound(err) {\n\t\t\t\t\t\tlogrus.Warnf(\"Deleting template %v doesn't succeed. Continue loop\", k)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor k := range tvToDelete {\n\t\t\t\t\tif err := m.templateVersionClient.Delete(k, &metav1.DeleteOptions{}); err != nil && !kerrors.IsNotFound(err) {\n\t\t\t\t\t\tlogrus.Warnf(\"Deleting templateVersion %v doesn't succeed. Continue loop\", k)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}()\n\t\treturn nil\n\t}\n\n\t\/\/ always get a refresh catalog from etcd\n\tcatalog, err := m.catalogClient.Get(key, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trepoPath, commit, err := m.prepareRepoPath(*catalog)\n\tif err != nil {\n\t\tv3.CatalogConditionRefreshed.False(catalog)\n\t\tv3.CatalogConditionRefreshed.ReasonAndMessageFromError(catalog, err)\n\t\tm.catalogClient.Update(catalog)\n\t\treturn err\n\t}\n\n\tif commit == catalog.Status.Commit {\n\t\tlogrus.Debugf(\"Catalog %s is already up to date\", catalog.Name)\n\t\tif v3.CatalogConditionRefreshed.IsUnknown(catalog) {\n\t\t\tv3.CatalogConditionRefreshed.True(catalog)\n\t\t\tv3.CatalogConditionRefreshed.Reason(catalog, \"\")\n\t\t\tm.catalogClient.Update(catalog)\n\t\t}\n\t\treturn nil\n\t}\n\n\tlogrus.Infof(\"Updating catalog %s\", catalog.Name)\n\treturn m.traverseAndUpdate(repoPath, commit, catalog)\n}\ncheck nil when deleting catalogpackage manager\n\nimport (\n\t\"github.com\/rancher\/types\/apis\/management.cattle.io\/v3\"\n\t\"github.com\/sirupsen\/logrus\"\n\tkerrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\nfunc (m *Manager) Sync(key string, obj *v3.Catalog) error {\n\tif obj == nil {\n\t\ttemplates, err := m.getTemplateMap(key)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttvToDelete := map[string]struct{}{}\n\t\tfor _, t := range templates {\n\t\t\ttvs, err := m.getTemplateVersion(t.Name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor k := range tvs {\n\t\t\t\ttvToDelete[k] = struct{}{}\n\t\t\t}\n\t\t}\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tfor k := range templates {\n\t\t\t\t\tif err := m.templateClient.Delete(k, &metav1.DeleteOptions{}); err != nil && !kerrors.IsNotFound(err) {\n\t\t\t\t\t\tlogrus.Warnf(\"Deleting template %v doesn't succeed. Continue loop\", k)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor k := range tvToDelete {\n\t\t\t\t\tif err := m.templateVersionClient.Delete(k, &metav1.DeleteOptions{}); err != nil && !kerrors.IsNotFound(err) {\n\t\t\t\t\t\tlogrus.Warnf(\"Deleting templateVersion %v doesn't succeed. Continue loop\", k)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}()\n\t\treturn nil\n\t}\n\n\t\/\/ always get a refresh catalog from etcd\n\tcatalog, err := m.catalogClient.Get(key, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trepoPath, commit, err := m.prepareRepoPath(*catalog)\n\tif err != nil {\n\t\tv3.CatalogConditionRefreshed.False(catalog)\n\t\tv3.CatalogConditionRefreshed.ReasonAndMessageFromError(catalog, err)\n\t\tm.catalogClient.Update(catalog)\n\t\treturn err\n\t}\n\n\tif commit == catalog.Status.Commit {\n\t\tlogrus.Debugf(\"Catalog %s is already up to date\", catalog.Name)\n\t\tif v3.CatalogConditionRefreshed.IsUnknown(catalog) {\n\t\t\tv3.CatalogConditionRefreshed.True(catalog)\n\t\t\tv3.CatalogConditionRefreshed.Reason(catalog, \"\")\n\t\t\tm.catalogClient.Update(catalog)\n\t\t}\n\t\treturn nil\n\t}\n\n\tlogrus.Infof(\"Updating catalog %s\", catalog.Name)\n\treturn m.traverseAndUpdate(repoPath, commit, catalog)\n}\n<|endoftext|>"} {"text":"package fsutil\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto\/sha512\"\n\t\"fmt\"\n\t\"io\"\n)\n\nfunc newChecksumReader(reader io.Reader) *ChecksumReader {\n\tr := new(ChecksumReader)\n\tr.checksummer = sha512.New()\n\tif _, ok := reader.(io.ByteReader); !ok {\n\t\tr.reader = bufio.NewReader(reader)\n\t} else {\n\t\tr.reader = reader\n\t}\n\treturn r\n}\n\nfunc (r *ChecksumReader) read(p []byte) (int, error) {\n\tnRead, err := r.reader.Read(p)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\t_, err = r.checksummer.Write(p[:nRead])\n\treturn nRead, err\n}\n\nfunc (r *ChecksumReader) readByte() (byte, error) {\n\tbuf := make([]byte, 1)\n\t_, err := r.read(buf)\n\treturn buf[0], err\n}\n\nfunc (r *ChecksumReader) verifyChecksum() error {\n\tbuf := make([]byte, r.checksummer.Size())\n\tnRead, err := r.reader.Read(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif nRead != r.checksummer.Size() {\n\t\treturn fmt.Errorf(\n\t\t\t\"ChecksumReader.Checksum(): expected: %d got: %d bytes\",\n\t\t\tr.checksummer.Size(), nRead)\n\t}\n\tif !bytes.Equal(buf, r.checksummer.Sum(nil)) {\n\t\treturn ErrorChecksumMismatch\n\t}\n\treturn nil\n}\n\nfunc newChecksumWriter(writer io.Writer) *ChecksumWriter {\n\tw := new(ChecksumWriter)\n\tw.checksummer = sha512.New()\n\tw.writer = writer\n\treturn w\n}\n\nfunc (w *ChecksumWriter) write(p []byte) (int, error) {\n\tif _, err := w.checksummer.Write(p); err != nil {\n\t\treturn 0, err\n\t}\n\treturn w.writer.Write(p)\n}\n\nfunc (w *ChecksumWriter) writeChecksum() error {\n\t_, err := w.writer.Write(w.checksummer.Sum(nil))\n\treturn err\n}\nFix short read bug in lib\/fsutil.ChecksumReader.VerifyChecksum().package fsutil\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto\/sha512\"\n\t\"fmt\"\n\t\"io\"\n)\n\nfunc newChecksumReader(reader io.Reader) *ChecksumReader {\n\tr := new(ChecksumReader)\n\tr.checksummer = sha512.New()\n\tif _, ok := reader.(io.ByteReader); !ok {\n\t\tr.reader = bufio.NewReader(reader)\n\t} else {\n\t\tr.reader = reader\n\t}\n\treturn r\n}\n\nfunc (r *ChecksumReader) read(p []byte) (int, error) {\n\tnRead, err := r.reader.Read(p)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\t_, err = r.checksummer.Write(p[:nRead])\n\treturn nRead, err\n}\n\nfunc (r *ChecksumReader) readByte() (byte, error) {\n\tbuf := make([]byte, 1)\n\t_, err := r.read(buf)\n\treturn buf[0], err\n}\n\nfunc (r *ChecksumReader) verifyChecksum() error {\n\tbuf := make([]byte, r.checksummer.Size())\n\tnRead, err := io.ReadAtLeast(r.reader, buf, len(buf))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif nRead != r.checksummer.Size() {\n\t\treturn fmt.Errorf(\n\t\t\t\"ChecksumReader.Checksum(): expected: %d got: %d bytes\",\n\t\t\tr.checksummer.Size(), nRead)\n\t}\n\tif !bytes.Equal(buf, r.checksummer.Sum(nil)) {\n\t\treturn ErrorChecksumMismatch\n\t}\n\treturn nil\n}\n\nfunc newChecksumWriter(writer io.Writer) *ChecksumWriter {\n\tw := new(ChecksumWriter)\n\tw.checksummer = sha512.New()\n\tw.writer = writer\n\treturn w\n}\n\nfunc (w *ChecksumWriter) write(p []byte) (int, error) {\n\tif _, err := w.checksummer.Write(p); err != nil {\n\t\treturn 0, err\n\t}\n\treturn w.writer.Write(p)\n}\n\nfunc (w *ChecksumWriter) writeChecksum() error {\n\t_, err := w.writer.Write(w.checksummer.Sum(nil))\n\treturn err\n}\n<|endoftext|>"} {"text":"\/\/ (c) 2019-2020, Ava Labs, Inc. All rights reserved.\n\/\/ See the file LICENSE for licensing terms.\n\npackage timestampvm\n\nimport (\n\t\"errors\"\n\t\"net\/http\"\n\n\t\"github.com\/ava-labs\/gecko\/ids\"\n\n\t\"github.com\/ava-labs\/gecko\/utils\/formatting\"\n)\n\nvar (\n\terrDBError = errors.New(\"error getting data from database\")\n\terrBadData = errors.New(\"data must be base 58 repr. of 32 bytes\")\n\terrNoSuchBlock = errors.New(\"couldn't get block from database. Does it exist?\")\n)\n\n\/\/ Service is the API service for this VM\ntype Service struct{ vm *VM }\n\n\/\/ ProposeBlockArgs are the arguments to function ProposeValue\ntype ProposeBlockArgs struct {\n\t\/\/ Data in the block. Must be base 58 encoding of 32 bytes.\n\tData string `json:\"data\"`\n}\n\n\/\/ ProposeBlockReply is the reply from function ProposeBlock\ntype ProposeBlockReply struct{ Success bool }\n\n\/\/ ProposeBlock is an API method to propose a new block whose data is [args].Data.\n\/\/ [args].Data must be a string repr. of a 32 byte array\nfunc (s *Service) ProposeBlock(_ *http.Request, args *ProposeBlockArgs, reply *ProposeBlockReply) error {\n\tbyteFormatter := formatting.CB58{}\n\tif err := byteFormatter.FromString(args.Data); err != nil {\n\t\treturn errBadData\n\t}\n\tdataSlice := byteFormatter.Bytes\n\tif len(dataSlice) != dataLen {\n\t\treturn errBadData\n\t}\n\tvar data [dataLen]byte \/\/ The data as an array of bytes\n\tcopy(data[:], dataSlice[:dataLen]) \/\/ Copy the bytes in dataSlice to data\n\ts.vm.proposeBlock(data)\n\treply.Success = true\n\treturn nil\n}\n\n\/\/ APIBlock is the API representation of a block\ntype APIBlock struct {\n\tTimestamp int64 `json:\"timestamp\"` \/\/ Timestamp of most recent block\n\tData string `json:\"data\"` \/\/ Data in the most recent block. Base 58 repr. of 5 bytes.\n\tID string `json:\"id\"` \/\/ String repr. of ID of the most recent block\n\tParentID string `json:\"parentID\"` \/\/ String repr. of ID of the most recent block's parent\n}\n\n\/\/ GetBlockArgs are the arguments to GetBlock\ntype GetBlockArgs struct {\n\t\/\/ ID of the block we're getting.\n\t\/\/ If left blank, gets the latest block\n\tID string\n}\n\n\/\/ GetBlockReply is the reply from GetBlock\ntype GetBlockReply struct {\n\tAPIBlock\n}\n\n\/\/ GetBlock gets the block whose ID is [args.ID]\n\/\/ If [args.ID] is empty, get the latest block\nfunc (s *Service) GetBlock(_ *http.Request, args *GetBlockArgs, reply *GetBlockReply) error {\n\tvar ID ids.ID\n\tvar err error\n\tif args.ID == \"\" {\n\t\tID = s.vm.LastAccepted()\n\t} else {\n\t\tID, err = ids.FromString(args.ID)\n\t\tif err != nil {\n\t\t\treturn errors.New(\"problem parsing ID\")\n\t\t}\n\t}\n\n\tblockInterface, err := s.vm.GetBlock(ID)\n\tif err != nil {\n\t\treturn errDatabase\n\t}\n\n\tblock, ok := blockInterface.(*Block)\n\tif !ok {\n\t\treturn errBadData\n\t}\n\n\treply.APIBlock.ID = block.ID().String()\n\treply.APIBlock.Timestamp = block.Timestamp\n\treply.APIBlock.ParentID = block.ParentID().String()\n\tbyteFormatter := formatting.CB58{Bytes: block.Data[:]}\n\treply.Data = byteFormatter.String()\n\n\treturn nil\n}\ngetBlock returns timestamp with quotes\/\/ (c) 2019-2020, Ava Labs, Inc. All rights reserved.\n\/\/ See the file LICENSE for licensing terms.\n\npackage timestampvm\n\nimport (\n\t\"errors\"\n\t\"net\/http\"\n\n\t\"github.com\/ava-labs\/gecko\/ids\"\n\t\"github.com\/ava-labs\/gecko\/utils\/json\"\n\n\t\"github.com\/ava-labs\/gecko\/utils\/formatting\"\n)\n\nvar (\n\terrDBError = errors.New(\"error getting data from database\")\n\terrBadData = errors.New(\"data must be base 58 repr. of 32 bytes\")\n\terrNoSuchBlock = errors.New(\"couldn't get block from database. Does it exist?\")\n)\n\n\/\/ Service is the API service for this VM\ntype Service struct{ vm *VM }\n\n\/\/ ProposeBlockArgs are the arguments to function ProposeValue\ntype ProposeBlockArgs struct {\n\t\/\/ Data in the block. Must be base 58 encoding of 32 bytes.\n\tData string `json:\"data\"`\n}\n\n\/\/ ProposeBlockReply is the reply from function ProposeBlock\ntype ProposeBlockReply struct{ Success bool }\n\n\/\/ ProposeBlock is an API method to propose a new block whose data is [args].Data.\n\/\/ [args].Data must be a string repr. of a 32 byte array\nfunc (s *Service) ProposeBlock(_ *http.Request, args *ProposeBlockArgs, reply *ProposeBlockReply) error {\n\tbyteFormatter := formatting.CB58{}\n\tif err := byteFormatter.FromString(args.Data); err != nil {\n\t\treturn errBadData\n\t}\n\tdataSlice := byteFormatter.Bytes\n\tif len(dataSlice) != dataLen {\n\t\treturn errBadData\n\t}\n\tvar data [dataLen]byte \/\/ The data as an array of bytes\n\tcopy(data[:], dataSlice[:dataLen]) \/\/ Copy the bytes in dataSlice to data\n\ts.vm.proposeBlock(data)\n\treply.Success = true\n\treturn nil\n}\n\n\/\/ APIBlock is the API representation of a block\ntype APIBlock struct {\n\tTimestamp json.Uint64 `json:\"timestamp\"` \/\/ Timestamp of most recent block\n\tData string `json:\"data\"` \/\/ Data in the most recent block. Base 58 repr. of 5 bytes.\n\tID string `json:\"id\"` \/\/ String repr. of ID of the most recent block\n\tParentID string `json:\"parentID\"` \/\/ String repr. of ID of the most recent block's parent\n}\n\n\/\/ GetBlockArgs are the arguments to GetBlock\ntype GetBlockArgs struct {\n\t\/\/ ID of the block we're getting.\n\t\/\/ If left blank, gets the latest block\n\tID string\n}\n\n\/\/ GetBlockReply is the reply from GetBlock\ntype GetBlockReply struct {\n\tAPIBlock\n}\n\n\/\/ GetBlock gets the block whose ID is [args.ID]\n\/\/ If [args.ID] is empty, get the latest block\nfunc (s *Service) GetBlock(_ *http.Request, args *GetBlockArgs, reply *GetBlockReply) error {\n\tvar ID ids.ID\n\tvar err error\n\tif args.ID == \"\" {\n\t\tID = s.vm.LastAccepted()\n\t} else {\n\t\tID, err = ids.FromString(args.ID)\n\t\tif err != nil {\n\t\t\treturn errors.New(\"problem parsing ID\")\n\t\t}\n\t}\n\n\tblockInterface, err := s.vm.GetBlock(ID)\n\tif err != nil {\n\t\treturn errDatabase\n\t}\n\n\tblock, ok := blockInterface.(*Block)\n\tif !ok {\n\t\treturn errBadData\n\t}\n\n\treply.APIBlock.ID = block.ID().String()\n\treply.APIBlock.Timestamp = json.Uint64(block.Timestamp)\n\treply.APIBlock.ParentID = block.ParentID().String()\n\tbyteFormatter := formatting.CB58{Bytes: block.Data[:]}\n\treply.Data = byteFormatter.String()\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"package sdees\n\nimport (\n\t\"math\/rand\"\n\t\"os\"\n\t\"regexp\"\n\t\"time\"\n)\n\n\/\/ timeTrack from https:\/\/coderwall.com\/p\/cp5fya\/measuring-execution-time-in-go\nfunc timeTrack(start time.Time, name string) {\n\telapsed := time.Since(start)\n\tlogger.Debug(\"%s took %s\", name, elapsed)\n}\n\n\/\/ RandStringBytesMaskImprSrc generates a random string using a alphabet and seed\n\/\/ from SO\nconst letterBytes = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\nconst (\n\tletterIdxBits = 6 \/\/ 6 bits to represent a letter index\n\tletterIdxMask = 1<= 0; {\n\t\tif remain == 0 {\n\t\t\tcache, remain = src.Int63(), letterIdxMax\n\t\t}\n\t\tif idx := int(cache & letterIdxMask); idx < len(letterBytes) {\n\t\t\tb[i] = letterBytes[idx]\n\t\t\ti--\n\t\t}\n\t\tcache >>= letterIdxBits\n\t\tremain--\n\t}\n\n\treturn string(b)\n}\n\n\/\/ exists returns whether the given file or directory exists or not\n\/\/ from http:\/\/stackoverflow.com\/questions\/10510691\/how-to-check-whether-a-file-or-directory-denoted-by-a-path-exists-in-golang\nfunc exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/ Shred writes random data to the file before erasing it\nfunc Shred(fileName string) error {\n\tf, err := os.OpenFile(fileName, os.O_RDWR|os.O_APPEND, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfileData, err := f.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\tb := make([]byte, fileData.Size())\n\t_, err = rand.Read(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = f.WriteAt(b, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.Close()\n\terr = os.Remove(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc GetWordsFromText(text string) []string {\n\twords := regexp.MustCompile(\"\\\\w+\")\n\treturn words.FindAllString(text, -1)\n}\nAdded GetRandomMD5Hashpackage sdees\n\nimport (\n\t\"math\/rand\"\n\t\"os\"\n\t\"regexp\"\n\t\"time\"\n)\n\n\/\/ timeTrack from https:\/\/coderwall.com\/p\/cp5fya\/measuring-execution-time-in-go\nfunc timeTrack(start time.Time, name string) {\n\telapsed := time.Since(start)\n\tlogger.Debug(\"%s took %s\", name, elapsed)\n}\n\nfunc GetRandomMD5Hash() string {\n\thasher := md5.New()\n\thasher.Write([]byte(RandStringBytesMaskImprSrc(10, time.Now().UnixNano())))\n\treturn hex.EncodeToString(hasher.Sum(nil))[0:8]\n}\n\n\/\/ RandStringBytesMaskImprSrc generates a random string using a alphabet and seed\n\/\/ from SO\nconst letterBytes = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\nconst (\n\tletterIdxBits = 6 \/\/ 6 bits to represent a letter index\n\tletterIdxMask = 1<= 0; {\n\t\tif remain == 0 {\n\t\t\tcache, remain = src.Int63(), letterIdxMax\n\t\t}\n\t\tif idx := int(cache & letterIdxMask); idx < len(letterBytes) {\n\t\t\tb[i] = letterBytes[idx]\n\t\t\ti--\n\t\t}\n\t\tcache >>= letterIdxBits\n\t\tremain--\n\t}\n\n\treturn string(b)\n}\n\n\/\/ exists returns whether the given file or directory exists or not\n\/\/ from http:\/\/stackoverflow.com\/questions\/10510691\/how-to-check-whether-a-file-or-directory-denoted-by-a-path-exists-in-golang\nfunc exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/ Shred writes random data to the file before erasing it\nfunc Shred(fileName string) error {\n\tf, err := os.OpenFile(fileName, os.O_RDWR|os.O_APPEND, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfileData, err := f.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\tb := make([]byte, fileData.Size())\n\t_, err = rand.Read(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = f.WriteAt(b, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.Close()\n\terr = os.Remove(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc GetWordsFromText(text string) []string {\n\twords := regexp.MustCompile(\"\\\\w+\")\n\treturn words.FindAllString(text, -1)\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"github.com\/Comcast\/webpa-common\/concurrent\"\n\t\"github.com\/Comcast\/webpa-common\/secure\"\n\t\"github.com\/Comcast\/webpa-common\/secure\/handler\"\n\t\"github.com\/Comcast\/webpa-common\/secure\/key\"\n\t\"github.com\/Comcast\/webpa-common\/server\"\n\t\"github.com\/Comcast\/webpa-common\/webhook\"\n\t\"github.com\/SermoDigital\/jose\/jwt\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/justinas\/alice\"\n\t\"github.com\/spf13\/pflag\"\n\t\"github.com\/spf13\/viper\"\n)\n\nconst (\n\tapplicationName = \"caduceus\"\n\tDEFAULT_KEY_ID = \"current\"\n)\n\n\/\/ getValidator returns validator for JWT tokens\nfunc getValidator(v *viper.Viper) (validator secure.Validator, err error) {\n\tdefault_validators := make(secure.Validators, 0, 0)\n\tvar jwtVals []JWTValidator\n\n\tv.UnmarshalKey(\"jwtValidators\", &jwtVals)\n\n\t\/\/ make sure there is at least one jwtValidator supplied\n\tif len(jwtVals) < 1 {\n\t\tvalidator = default_validators\n\t\treturn\n\t}\n\n\t\/\/ if a JWTKeys section was supplied, configure a JWS validator\n\t\/\/ and append it to the chain of validators\n\tvalidators := make(secure.Validators, 0, len(jwtVals))\n\n\tfor _, validatorDescriptor := range jwtVals {\n\t\tvar keyResolver key.Resolver\n\t\tkeyResolver, err = validatorDescriptor.Keys.NewResolver()\n\t\tif err != nil {\n\t\t\tvalidator = validators\n\t\t\treturn\n\t\t}\n\n\t\tvalidators = append(\n\t\t\tvalidators,\n\t\t\tsecure.JWSValidator{\n\t\t\t\tDefaultKeyId: DEFAULT_KEY_ID,\n\t\t\t\tResolver: keyResolver,\n\t\t\t\tJWTValidators: []*jwt.Validator{validatorDescriptor.Custom.New()},\n\t\t\t},\n\t\t)\n\t}\n\n\t\/\/ TODO: This should really be part of the unmarshalled validators somehow\n\tbasicAuth := v.GetStringSlice(\"authHeader\")\n\tfor _, authValue := range basicAuth {\n\t\tvalidators = append(\n\t\t\tvalidators,\n\t\t\tsecure.ExactMatchValidator(authValue),\n\t\t)\n\t}\n\n\tvalidator = validators\n\n\treturn\n}\n\n\/\/ caduceus is the driver function for Caduceus. It performs everything main() would do,\n\/\/ except for obtaining the command-line arguments (which are passed to it).\nfunc caduceus(arguments []string) int {\n\ttotalTime := time.Now()\n\n\tvar (\n\t\tf = pflag.NewFlagSet(applicationName, pflag.ContinueOnError)\n\t\tv = viper.New()\n\n\t\tlogger, webPA, err = server.Initialize(applicationName, arguments, f, v)\n\t)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to initialize Viper environment: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tlogger.Info(\"Using configuration file: %s\", v.ConfigFileUsed())\n\n\tcaduceusConfig := new(CaduceusConfig)\n\terr = v.Unmarshal(caduceusConfig)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to unmarshal configuration data into struct: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tworkerPool := WorkerPoolFactory{\n\t\tNumWorkers: caduceusConfig.NumWorkerThreads,\n\t\tQueueSize: caduceusConfig.JobQueueSize,\n\t}.New()\n\n\tmainCaduceusProfilerFactory := ServerProfilerFactory{\n\t\tFrequency: caduceusConfig.ProfilerFrequency,\n\t\tDuration: caduceusConfig.ProfilerDuration,\n\t\tQueueSize: caduceusConfig.ProfilerQueueSize,\n\t\tLogger: logger,\n\t}\n\n\t\/\/ here we create a profiler specifically for our main server handler\n\tcaduceusHandlerProfiler, err := mainCaduceusProfilerFactory.New(\"main\")\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to profiler for main caduceus handler: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tchildCaduceusProfilerFactory := mainCaduceusProfilerFactory\n\tchildCaduceusProfilerFactory.Parent = caduceusHandlerProfiler\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\tMaxIdleConnsPerHost: caduceusConfig.SenderNumWorkersPerSender,\n\t\tResponseHeaderTimeout: 10 * time.Second, \/\/ TODO Make this configurable\n\t}\n\n\ttimeout := time.Duration(caduceusConfig.SenderClientTimeout) * time.Second\n\n\t\/\/ declare a new sender wrapper and pass it a profiler factory so that it can create\n\t\/\/ unique profilers on a per outboundSender basis\n\tcaduceusSenderWrapper, err := SenderWrapperFactory{\n\t\tNumWorkersPerSender: caduceusConfig.SenderNumWorkersPerSender,\n\t\tQueueSizePerSender: caduceusConfig.SenderQueueSizePerSender,\n\t\tCutOffPeriod: time.Duration(caduceusConfig.SenderCutOffPeriod) * time.Second,\n\t\tLinger: time.Duration(caduceusConfig.SenderLinger) * time.Second,\n\t\tProfilerFactory: childCaduceusProfilerFactory,\n\t\tLogger: logger,\n\t\tClient: &http.Client{Transport: tr, Timeout: timeout},\n\t}.New()\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to initialize new caduceus sender wrapper: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tserverWrapper := &ServerHandler{\n\t\tLogger: logger,\n\t\tcaduceusHandler: &CaduceusHandler{\n\t\t\thandlerProfiler: caduceusHandlerProfiler,\n\t\t\tsenderWrapper: caduceusSenderWrapper,\n\t\t\tLogger: logger,\n\t\t},\n\t\tdoJob: workerPool.Send,\n\t}\n\n\tprofileWrapper := &ProfileHandler{\n\t\tprofilerData: caduceusHandlerProfiler,\n\t\tLogger: logger,\n\t}\n\n\tvalidator, err := getValidator(v)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Validator error: %v\\n\", err)\n\t\treturn 1\n\t}\n\n\tauthHandler := handler.AuthorizationHandler{\n\t\tHeaderName: \"Authorization\",\n\t\tForbiddenStatusCode: 403,\n\t\tValidator: validator,\n\t\tLogger: logger,\n\t}\n\n\tcaduceusHandler := alice.New(authHandler.Decorate)\n\n\tmux := mux.NewRouter()\n\tmux.Handle(\"\/api\/v1\/notify\", caduceusHandler.Then(serverWrapper))\n\tmux.Handle(\"\/api\/v1\/profile\", caduceusHandler.Then(profileWrapper))\n\n\t\/\/ Support the old endpoint too.\n\tmux.Handle(\"\/api\/v2\/notify\/{deviceid}\/event\/{eventtype:.*}\", caduceusHandler.Then(serverWrapper))\n\n\twebhookFactory, err := webhook.NewFactory(v)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error creating new webhook factory: %s\\n\", err)\n\t\treturn 1\n\t}\n\twebhookRegistry, webhookHandler := webhookFactory.NewRegistryAndHandler()\n\twebhookFactory.SetExternalUpdate(caduceusSenderWrapper.Update)\n\n\t\/\/ register webhook end points for api\n\tmux.Handle(\"\/hook\", caduceusHandler.ThenFunc(webhookRegistry.UpdateRegistry))\n\tmux.Handle(\"\/hooks\", caduceusHandler.ThenFunc(webhookRegistry.GetRegistry))\n\n\tselfURL := &url.URL{\n\t\tScheme: \"https\",\n\t\tHost: v.GetString(\"fqdn\") + v.GetString(\"primary.address\"),\n\t}\n\n\twebhookFactory.Initialize(mux, selfURL, webhookHandler, logger, nil)\n\n\tcaduceusHealth := &CaduceusHealth{}\n\tvar runnable concurrent.Runnable\n\n\tcaduceusHealth.Monitor, runnable = webPA.Prepare(logger, nil, mux)\n\tserverWrapper.caduceusHealth = caduceusHealth\n\n\twaitGroup, shutdown, err := concurrent.Execute(runnable)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to start device manager: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tlogger.Debug(\"calling webhookFactory.PrepareAndStart\")\n\tnow := time.Now()\n\twebhookFactory.PrepareAndStart()\n\tlogger.Debug(\"webhookFactory.PrepareAndStart done. elapsed time: %v\", time.Since(now))\n\n\t\/\/ Attempt to obtain the current listener list from current system without having to wait for listener reregistration.\n\tlogger.Debug(\"Attempting to obtain current listener list from %v\", v.GetString(\"start.apiPath\"))\n\tnow = time.Now()\n\tstartChan := make(chan webhook.Result, 1)\n\twebhookFactory.Start.GetCurrentSystemsHooks(startChan)\n\tvar webhookStartResults webhook.Result = <-startChan\n\tif webhookStartResults.Error != nil {\n\t\tlogger.Error(webhookStartResults.Error)\n\t} else {\n\t\t\/\/ todo: add message\n\t\twebhookFactory.SetList(webhook.NewList(webhookStartResults.Hooks))\n\t\tcaduceusSenderWrapper.Update(webhookStartResults.Hooks)\n\t}\n\tlogger.Debug(\"current listener retrieval, elapsed time: %v\", time.Since(now))\n\n\tlogger.Info(\"Caduceus is up and running! elapsed time: %v\", time.Since(totalTime))\n\n\tvar (\n\t\tsignals = make(chan os.Signal, 1)\n\t)\n\n\tsignal.Notify(signals)\n\t<-signals\n\tclose(shutdown)\n\twaitGroup.Wait()\n\n\t\/\/ shutdown the sender wrapper gently so that all queued messages get serviced\n\tcaduceusSenderWrapper.Shutdown(true)\n\n\treturn 0\n}\n\nfunc main() {\n\tos.Exit(caduceus(os.Args))\n}\nChange the new api to be v3 vs v1.package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"github.com\/Comcast\/webpa-common\/concurrent\"\n\t\"github.com\/Comcast\/webpa-common\/secure\"\n\t\"github.com\/Comcast\/webpa-common\/secure\/handler\"\n\t\"github.com\/Comcast\/webpa-common\/secure\/key\"\n\t\"github.com\/Comcast\/webpa-common\/server\"\n\t\"github.com\/Comcast\/webpa-common\/webhook\"\n\t\"github.com\/SermoDigital\/jose\/jwt\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/justinas\/alice\"\n\t\"github.com\/spf13\/pflag\"\n\t\"github.com\/spf13\/viper\"\n)\n\nconst (\n\tapplicationName = \"caduceus\"\n\tDEFAULT_KEY_ID = \"current\"\n)\n\n\/\/ getValidator returns validator for JWT tokens\nfunc getValidator(v *viper.Viper) (validator secure.Validator, err error) {\n\tdefault_validators := make(secure.Validators, 0, 0)\n\tvar jwtVals []JWTValidator\n\n\tv.UnmarshalKey(\"jwtValidators\", &jwtVals)\n\n\t\/\/ make sure there is at least one jwtValidator supplied\n\tif len(jwtVals) < 1 {\n\t\tvalidator = default_validators\n\t\treturn\n\t}\n\n\t\/\/ if a JWTKeys section was supplied, configure a JWS validator\n\t\/\/ and append it to the chain of validators\n\tvalidators := make(secure.Validators, 0, len(jwtVals))\n\n\tfor _, validatorDescriptor := range jwtVals {\n\t\tvar keyResolver key.Resolver\n\t\tkeyResolver, err = validatorDescriptor.Keys.NewResolver()\n\t\tif err != nil {\n\t\t\tvalidator = validators\n\t\t\treturn\n\t\t}\n\n\t\tvalidators = append(\n\t\t\tvalidators,\n\t\t\tsecure.JWSValidator{\n\t\t\t\tDefaultKeyId: DEFAULT_KEY_ID,\n\t\t\t\tResolver: keyResolver,\n\t\t\t\tJWTValidators: []*jwt.Validator{validatorDescriptor.Custom.New()},\n\t\t\t},\n\t\t)\n\t}\n\n\t\/\/ TODO: This should really be part of the unmarshalled validators somehow\n\tbasicAuth := v.GetStringSlice(\"authHeader\")\n\tfor _, authValue := range basicAuth {\n\t\tvalidators = append(\n\t\t\tvalidators,\n\t\t\tsecure.ExactMatchValidator(authValue),\n\t\t)\n\t}\n\n\tvalidator = validators\n\n\treturn\n}\n\n\/\/ caduceus is the driver function for Caduceus. It performs everything main() would do,\n\/\/ except for obtaining the command-line arguments (which are passed to it).\nfunc caduceus(arguments []string) int {\n\ttotalTime := time.Now()\n\n\tvar (\n\t\tf = pflag.NewFlagSet(applicationName, pflag.ContinueOnError)\n\t\tv = viper.New()\n\n\t\tlogger, webPA, err = server.Initialize(applicationName, arguments, f, v)\n\t)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to initialize Viper environment: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tlogger.Info(\"Using configuration file: %s\", v.ConfigFileUsed())\n\n\tcaduceusConfig := new(CaduceusConfig)\n\terr = v.Unmarshal(caduceusConfig)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to unmarshal configuration data into struct: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tworkerPool := WorkerPoolFactory{\n\t\tNumWorkers: caduceusConfig.NumWorkerThreads,\n\t\tQueueSize: caduceusConfig.JobQueueSize,\n\t}.New()\n\n\tmainCaduceusProfilerFactory := ServerProfilerFactory{\n\t\tFrequency: caduceusConfig.ProfilerFrequency,\n\t\tDuration: caduceusConfig.ProfilerDuration,\n\t\tQueueSize: caduceusConfig.ProfilerQueueSize,\n\t\tLogger: logger,\n\t}\n\n\t\/\/ here we create a profiler specifically for our main server handler\n\tcaduceusHandlerProfiler, err := mainCaduceusProfilerFactory.New(\"main\")\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to profiler for main caduceus handler: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tchildCaduceusProfilerFactory := mainCaduceusProfilerFactory\n\tchildCaduceusProfilerFactory.Parent = caduceusHandlerProfiler\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\tMaxIdleConnsPerHost: caduceusConfig.SenderNumWorkersPerSender,\n\t\tResponseHeaderTimeout: 10 * time.Second, \/\/ TODO Make this configurable\n\t}\n\n\ttimeout := time.Duration(caduceusConfig.SenderClientTimeout) * time.Second\n\n\t\/\/ declare a new sender wrapper and pass it a profiler factory so that it can create\n\t\/\/ unique profilers on a per outboundSender basis\n\tcaduceusSenderWrapper, err := SenderWrapperFactory{\n\t\tNumWorkersPerSender: caduceusConfig.SenderNumWorkersPerSender,\n\t\tQueueSizePerSender: caduceusConfig.SenderQueueSizePerSender,\n\t\tCutOffPeriod: time.Duration(caduceusConfig.SenderCutOffPeriod) * time.Second,\n\t\tLinger: time.Duration(caduceusConfig.SenderLinger) * time.Second,\n\t\tProfilerFactory: childCaduceusProfilerFactory,\n\t\tLogger: logger,\n\t\tClient: &http.Client{Transport: tr, Timeout: timeout},\n\t}.New()\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to initialize new caduceus sender wrapper: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tserverWrapper := &ServerHandler{\n\t\tLogger: logger,\n\t\tcaduceusHandler: &CaduceusHandler{\n\t\t\thandlerProfiler: caduceusHandlerProfiler,\n\t\t\tsenderWrapper: caduceusSenderWrapper,\n\t\t\tLogger: logger,\n\t\t},\n\t\tdoJob: workerPool.Send,\n\t}\n\n\tprofileWrapper := &ProfileHandler{\n\t\tprofilerData: caduceusHandlerProfiler,\n\t\tLogger: logger,\n\t}\n\n\tvalidator, err := getValidator(v)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Validator error: %v\\n\", err)\n\t\treturn 1\n\t}\n\n\tauthHandler := handler.AuthorizationHandler{\n\t\tHeaderName: \"Authorization\",\n\t\tForbiddenStatusCode: 403,\n\t\tValidator: validator,\n\t\tLogger: logger,\n\t}\n\n\tcaduceusHandler := alice.New(authHandler.Decorate)\n\n\tmux := mux.NewRouter()\n\tmux.Handle(\"\/api\/v3\/notify\", caduceusHandler.Then(serverWrapper))\n\tmux.Handle(\"\/api\/v3\/profile\", caduceusHandler.Then(profileWrapper))\n\n\t\/\/ Support the old endpoint too.\n\tmux.Handle(\"\/api\/v2\/notify\/{deviceid}\/event\/{eventtype:.*}\", caduceusHandler.Then(serverWrapper))\n\n\twebhookFactory, err := webhook.NewFactory(v)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error creating new webhook factory: %s\\n\", err)\n\t\treturn 1\n\t}\n\twebhookRegistry, webhookHandler := webhookFactory.NewRegistryAndHandler()\n\twebhookFactory.SetExternalUpdate(caduceusSenderWrapper.Update)\n\n\t\/\/ register webhook end points for api\n\tmux.Handle(\"\/hook\", caduceusHandler.ThenFunc(webhookRegistry.UpdateRegistry))\n\tmux.Handle(\"\/hooks\", caduceusHandler.ThenFunc(webhookRegistry.GetRegistry))\n\n\tselfURL := &url.URL{\n\t\tScheme: \"https\",\n\t\tHost: v.GetString(\"fqdn\") + v.GetString(\"primary.address\"),\n\t}\n\n\twebhookFactory.Initialize(mux, selfURL, webhookHandler, logger, nil)\n\n\tcaduceusHealth := &CaduceusHealth{}\n\tvar runnable concurrent.Runnable\n\n\tcaduceusHealth.Monitor, runnable = webPA.Prepare(logger, nil, mux)\n\tserverWrapper.caduceusHealth = caduceusHealth\n\n\twaitGroup, shutdown, err := concurrent.Execute(runnable)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to start device manager: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tlogger.Debug(\"calling webhookFactory.PrepareAndStart\")\n\tnow := time.Now()\n\twebhookFactory.PrepareAndStart()\n\tlogger.Debug(\"webhookFactory.PrepareAndStart done. elapsed time: %v\", time.Since(now))\n\n\t\/\/ Attempt to obtain the current listener list from current system without having to wait for listener reregistration.\n\tlogger.Debug(\"Attempting to obtain current listener list from %v\", v.GetString(\"start.apiPath\"))\n\tnow = time.Now()\n\tstartChan := make(chan webhook.Result, 1)\n\twebhookFactory.Start.GetCurrentSystemsHooks(startChan)\n\tvar webhookStartResults webhook.Result = <-startChan\n\tif webhookStartResults.Error != nil {\n\t\tlogger.Error(webhookStartResults.Error)\n\t} else {\n\t\t\/\/ todo: add message\n\t\twebhookFactory.SetList(webhook.NewList(webhookStartResults.Hooks))\n\t\tcaduceusSenderWrapper.Update(webhookStartResults.Hooks)\n\t}\n\tlogger.Debug(\"current listener retrieval, elapsed time: %v\", time.Since(now))\n\n\tlogger.Info(\"Caduceus is up and running! elapsed time: %v\", time.Since(totalTime))\n\n\tvar (\n\t\tsignals = make(chan os.Signal, 1)\n\t)\n\n\tsignal.Notify(signals)\n\t<-signals\n\tclose(shutdown)\n\twaitGroup.Wait()\n\n\t\/\/ shutdown the sender wrapper gently so that all queued messages get serviced\n\tcaduceusSenderWrapper.Shutdown(true)\n\n\treturn 0\n}\n\nfunc main() {\n\tos.Exit(caduceus(os.Args))\n}\n<|endoftext|>"} {"text":"package stick\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestF(t *testing.T) {\n\terr := F(\"foo %d\", 42)\n\n\tstr := err.Error()\n\tassert.Equal(t, `foo 42`, str)\n\n\tstr = fmt.Sprintf(\"%s\", err)\n\tassert.Equal(t, `foo 42`, str)\n\n\tstr = fmt.Sprintf(\"%v\", err)\n\tassert.Equal(t, `foo 42`, str)\n\n\tstr = fmt.Sprintf(\"%+v\", err)\n\tassert.Equal(t, []string{\n\t\t\"foo 42\",\n\t\t\"github.com\/256dpi\/fire\/stick.F\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/error.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.TestF\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/error_test.go:LN\",\n\t\t\"testing.tRunner\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/testing\/testing.go:LN\",\n\t\t\"runtime.goexit\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/runtime\/asm_amd64.s:LN\",\n\t}, splitTrace(str))\n}\n\nfunc TestWF(t *testing.T) {\n\terr := F(\"foo\")\n\terr = WF(err, \"bar %d\", 42)\n\n\tstr := err.Error()\n\tassert.Equal(t, `bar 42: foo`, str)\n\n\tstr = fmt.Sprintf(\"%s\", err)\n\tassert.Equal(t, `bar 42: foo`, str)\n\n\tstr = fmt.Sprintf(\"%v\", err)\n\tassert.Equal(t, `bar 42: foo`, str)\n\n\tstr = fmt.Sprintf(\"%+v\", err)\n\tassert.Equal(t, []string{\n\t\t\"foo\",\n\t\t\"github.com\/256dpi\/fire\/stick.F\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/error.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.TestWF\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/error_test.go:LN\",\n\t\t\"testing.tRunner\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/testing\/testing.go:LN\",\n\t\t\"runtime.goexit\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/runtime\/asm_amd64.s:LN\",\n\t\t\"bar 42\",\n\t\t\"github.com\/256dpi\/fire\/stick.WF\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/error.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.TestWF\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/error_test.go:LN\",\n\t\t\"testing.tRunner\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/testing\/testing.go:LN\",\n\t\t\"runtime.goexit\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/runtime\/asm_amd64.s:LN\",\n\t}, splitTrace(str))\n}\n\nfunc TestE(t *testing.T) {\n\terr := E(\"foo\")\n\tassert.True(t, IsSafe(err))\n\n\tstr := err.Error()\n\tassert.Equal(t, `foo`, str)\n\n\tstr = fmt.Sprintf(\"%s\", err)\n\tassert.Equal(t, `foo`, str)\n\n\tstr = fmt.Sprintf(\"%v\", err)\n\tassert.Equal(t, `foo`, str)\n\n\tstr = fmt.Sprintf(\"%+v\", err)\n\tassert.Equal(t, []string{\n\t\t\"foo\",\n\t\t\"github.com\/256dpi\/fire\/stick.F\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/error.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.E\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/error.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.TestE\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/error_test.go:LN\",\n\t\t\"testing.tRunner\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/testing\/testing.go:LN\",\n\t\t\"runtime.goexit\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/runtime\/asm_amd64.s:LN\",\n\t}, splitTrace(str))\n\n\t\/* wrapped *\/\n\n\terr = WF(err, \"bar\")\n\tassert.True(t, IsSafe(err))\n\n\tstr = err.Error()\n\tassert.Equal(t, `bar: foo`, str)\n\n\tstr = fmt.Sprintf(\"%s\", err)\n\tassert.Equal(t, `bar: foo`, str)\n\n\tstr = fmt.Sprintf(\"%v\", err)\n\tassert.Equal(t, `bar: foo`, str)\n\n\tstr = fmt.Sprintf(\"%+v\", err)\n\tassert.Equal(t, []string{\n\t\t\"foo\",\n\t\t\"github.com\/256dpi\/fire\/stick.F\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/error.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.E\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/error.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.TestE\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/error_test.go:LN\",\n\t\t\"testing.tRunner\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/testing\/testing.go:LN\",\n\t\t\"runtime.goexit\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/runtime\/asm_amd64.s:LN\",\n\t\t\"bar\",\n\t\t\"github.com\/256dpi\/fire\/stick.WF\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/error.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.TestE\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/error_test.go:LN\",\n\t\t\"testing.tRunner\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/testing\/testing.go:LN\",\n\t\t\"runtime.goexit\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/runtime\/asm_amd64.s:LN\",\n\t}, splitTrace(str))\n}\n\nfunc TestSafeError(t *testing.T) {\n\terr1 := F(\"foo\")\n\tassert.False(t, IsSafe(err1))\n\tassert.Equal(t, \"foo\", err1.Error())\n\tassert.Nil(t, AsSafe(err1))\n\n\terr2 := Safe(err1)\n\tassert.True(t, IsSafe(err2))\n\tassert.Equal(t, \"foo\", err2.Error())\n\tassert.Equal(t, err2, AsSafe(err2))\n\n\terr3 := WF(err2, \"bar\")\n\tassert.True(t, IsSafe(err3))\n\tassert.Equal(t, \"bar: foo\", err3.Error())\n\tassert.Equal(t, err2, AsSafe(err3))\n}\n\nfunc splitTrace(str string) []string {\n\tstr = strings.ReplaceAll(str, \"\\t\", \" \")\n\tstr = regexp.MustCompile(\":\\\\d+\").ReplaceAllString(str, \":LN\")\n\treturn strings.Split(str, \"\\n\")\n}\nfix testpackage stick\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestF(t *testing.T) {\n\terr := F(\"foo %d\", 42)\n\n\tstr := err.Error()\n\tassert.Equal(t, `foo 42`, str)\n\n\tstr = fmt.Sprintf(\"%s\", err)\n\tassert.Equal(t, `foo 42`, str)\n\n\tstr = fmt.Sprintf(\"%v\", err)\n\tassert.Equal(t, `foo 42`, str)\n\n\tstr = fmt.Sprintf(\"%+v\", err)\n\tassert.Equal(t, []string{\n\t\t\"foo 42\",\n\t\t\"github.com\/256dpi\/fire\/stick.F\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/errors.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.TestF\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/errors_test.go:LN\",\n\t\t\"testing.tRunner\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/testing\/testing.go:LN\",\n\t\t\"runtime.goexit\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/runtime\/asm_amd64.s:LN\",\n\t}, splitTrace(str))\n}\n\nfunc TestWF(t *testing.T) {\n\terr := F(\"foo\")\n\terr = WF(err, \"bar %d\", 42)\n\n\tstr := err.Error()\n\tassert.Equal(t, `bar 42: foo`, str)\n\n\tstr = fmt.Sprintf(\"%s\", err)\n\tassert.Equal(t, `bar 42: foo`, str)\n\n\tstr = fmt.Sprintf(\"%v\", err)\n\tassert.Equal(t, `bar 42: foo`, str)\n\n\tstr = fmt.Sprintf(\"%+v\", err)\n\tassert.Equal(t, []string{\n\t\t\"foo\",\n\t\t\"github.com\/256dpi\/fire\/stick.F\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/errors.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.TestWF\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/errors_test.go:LN\",\n\t\t\"testing.tRunner\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/testing\/testing.go:LN\",\n\t\t\"runtime.goexit\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/runtime\/asm_amd64.s:LN\",\n\t\t\"bar 42\",\n\t\t\"github.com\/256dpi\/fire\/stick.WF\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/errors.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.TestWF\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/errors_test.go:LN\",\n\t\t\"testing.tRunner\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/testing\/testing.go:LN\",\n\t\t\"runtime.goexit\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/runtime\/asm_amd64.s:LN\",\n\t}, splitTrace(str))\n}\n\nfunc TestE(t *testing.T) {\n\terr := E(\"foo\")\n\tassert.True(t, IsSafe(err))\n\n\tstr := err.Error()\n\tassert.Equal(t, `foo`, str)\n\n\tstr = fmt.Sprintf(\"%s\", err)\n\tassert.Equal(t, `foo`, str)\n\n\tstr = fmt.Sprintf(\"%v\", err)\n\tassert.Equal(t, `foo`, str)\n\n\tstr = fmt.Sprintf(\"%+v\", err)\n\tassert.Equal(t, []string{\n\t\t\"foo\",\n\t\t\"github.com\/256dpi\/fire\/stick.F\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/errors.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.E\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/errors.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.TestE\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/errors_test.go:LN\",\n\t\t\"testing.tRunner\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/testing\/testing.go:LN\",\n\t\t\"runtime.goexit\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/runtime\/asm_amd64.s:LN\",\n\t}, splitTrace(str))\n\n\t\/* wrapped *\/\n\n\terr = WF(err, \"bar\")\n\tassert.True(t, IsSafe(err))\n\n\tstr = err.Error()\n\tassert.Equal(t, `bar: foo`, str)\n\n\tstr = fmt.Sprintf(\"%s\", err)\n\tassert.Equal(t, `bar: foo`, str)\n\n\tstr = fmt.Sprintf(\"%v\", err)\n\tassert.Equal(t, `bar: foo`, str)\n\n\tstr = fmt.Sprintf(\"%+v\", err)\n\tassert.Equal(t, []string{\n\t\t\"foo\",\n\t\t\"github.com\/256dpi\/fire\/stick.F\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/errors.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.E\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/errors.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.TestE\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/errors_test.go:LN\",\n\t\t\"testing.tRunner\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/testing\/testing.go:LN\",\n\t\t\"runtime.goexit\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/runtime\/asm_amd64.s:LN\",\n\t\t\"bar\",\n\t\t\"github.com\/256dpi\/fire\/stick.WF\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/errors.go:LN\",\n\t\t\"github.com\/256dpi\/fire\/stick.TestE\",\n\t\t\" \/Users\/256dpi\/Development\/GitHub\/256dpi\/fire\/stick\/errors_test.go:LN\",\n\t\t\"testing.tRunner\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/testing\/testing.go:LN\",\n\t\t\"runtime.goexit\",\n\t\t\" \/usr\/local\/Cellar\/go\/1.14.1\/libexec\/src\/runtime\/asm_amd64.s:LN\",\n\t}, splitTrace(str))\n}\n\nfunc TestSafeError(t *testing.T) {\n\terr1 := F(\"foo\")\n\tassert.False(t, IsSafe(err1))\n\tassert.Equal(t, \"foo\", err1.Error())\n\tassert.Nil(t, AsSafe(err1))\n\n\terr2 := Safe(err1)\n\tassert.True(t, IsSafe(err2))\n\tassert.Equal(t, \"foo\", err2.Error())\n\tassert.Equal(t, err2, AsSafe(err2))\n\n\terr3 := WF(err2, \"bar\")\n\tassert.True(t, IsSafe(err3))\n\tassert.Equal(t, \"bar: foo\", err3.Error())\n\tassert.Equal(t, err2, AsSafe(err3))\n}\n\nfunc splitTrace(str string) []string {\n\tstr = strings.ReplaceAll(str, \"\\t\", \" \")\n\tstr = regexp.MustCompile(\":\\\\d+\").ReplaceAllString(str, \":LN\")\n\treturn strings.Split(str, \"\\n\")\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2021 Gravitational, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage events\n\nimport (\n\t\"github.com\/gravitational\/teleport\/api\/types\/events\"\n\t\"github.com\/gravitational\/teleport\/lib\/utils\"\n\t\"github.com\/gravitational\/trace\"\n\n\t\"encoding\/json\"\n)\n\n\/\/ FromEventFields converts from the typed dynamic representation\n\/\/ to the new typed interface-style representation.\n\/\/\n\/\/ This is mainly used to convert from the backend format used by\n\/\/ our various event backends.\nfunc FromEventFields(fields EventFields) (AuditEvent, error) {\n\tdata, err := json.Marshal(fields)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\teventType := fields.GetString(EventType)\n\n\tswitch eventType {\n\tcase SessionPrintEvent:\n\t\tvar e events.SessionPrint\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionStartEvent:\n\t\tvar e events.SessionStart\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionEndEvent:\n\t\tvar e events.SessionEnd\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionUploadEvent:\n\t\tvar e events.SessionUpload\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionJoinEvent:\n\t\tvar e events.SessionJoin\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionLeaveEvent:\n\t\tvar e events.SessionLeave\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionDataEvent:\n\t\tvar e events.SessionData\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase ClientDisconnectEvent:\n\t\tvar e events.ClientDisconnect\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase UserLoginEvent:\n\t\tvar e events.UserLogin\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase UserDeleteEvent:\n\t\tvar e events.UserDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase UserCreateEvent:\n\t\tvar e events.UserCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase UserUpdatedEvent:\n\t\t\/\/ note: user.update is a custom code applied on top of the same data as the user.create event\n\t\t\/\/ and they are thus functionally identical. There exists no direct gRPC version of user.update.\n\t\tvar e events.UserCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase UserPasswordChangeEvent:\n\t\tvar e events.UserPasswordChange\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase AccessRequestCreateEvent:\n\t\tvar e events.AccessRequestCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase AccessRequestUpdateEvent:\n\t\tvar e events.AccessRequestCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase BillingCardCreateEvent:\n\t\tvar e events.BillingCardCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase BillingCardUpdateEvent:\n\t\tvar e events.BillingCardCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase BillingCardDeleteEvent:\n\t\tvar e events.BillingCardDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase BillingInformationUpdateEvent:\n\t\tvar e events.BillingInformationUpdate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase ResetPasswordTokenCreateEvent:\n\t\tvar e events.ResetPasswordTokenCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase ExecEvent:\n\t\tvar e events.Exec\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SubsystemEvent:\n\t\tvar e events.Subsystem\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase X11ForwardEvent:\n\t\tvar e events.X11Forward\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase PortForwardEvent:\n\t\tvar e events.PortForward\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase AuthAttemptEvent:\n\t\tvar e events.AuthAttempt\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SCPEvent:\n\t\tvar e events.SCP\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase ResizeEvent:\n\t\tvar e events.Resize\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionCommandEvent:\n\t\tvar e events.SessionCommand\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionDiskEvent:\n\t\tvar e events.SessionDisk\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionNetworkEvent:\n\t\tvar e events.SessionNetwork\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase RoleCreatedEvent:\n\t\tvar e events.RoleCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase RoleDeletedEvent:\n\t\tvar e events.RoleDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase TrustedClusterCreateEvent:\n\t\tvar e events.TrustedClusterCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase TrustedClusterDeleteEvent:\n\t\tvar e events.TrustedClusterDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase TrustedClusterTokenCreateEvent:\n\t\tvar e events.TrustedClusterTokenCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase GithubConnectorCreatedEvent:\n\t\tvar e events.GithubConnectorCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase GithubConnectorDeletedEvent:\n\t\tvar e events.GithubConnectorDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase OIDCConnectorCreatedEvent:\n\t\tvar e events.OIDCConnectorCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase OIDCConnectorDeletedEvent:\n\t\tvar e events.OIDCConnectorDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SAMLConnectorCreatedEvent:\n\t\tvar e events.SAMLConnectorCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SAMLConnectorDeletedEvent:\n\t\tvar e events.SAMLConnectorDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionRejectedEvent:\n\t\tvar e events.SessionReject\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase AppSessionStartEvent:\n\t\tvar e events.AppSessionStart\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase AppSessionChunkEvent:\n\t\tvar e events.AppSessionChunk\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase AppSessionRequestEvent:\n\t\tvar e events.AppSessionRequest\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase DatabaseSessionStartEvent:\n\t\tvar e events.DatabaseSessionStart\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase DatabaseSessionEndEvent:\n\t\tvar e events.DatabaseSessionEnd\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase DatabaseSessionQueryEvent:\n\t\tvar e events.DatabaseSessionQuery\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase KubeRequestEvent:\n\t\tvar e events.KubeRequest\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase MFADeviceAddEvent:\n\t\tvar e events.MFADeviceAdd\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase MFADeviceDeleteEvent:\n\t\tvar e events.MFADeviceDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tdefault:\n\t\treturn nil, trace.BadParameter(\"unknown event type: %q\", eventType)\n\t}\n}\n\n\/\/ GetSessionID pulls the session ID from the events that have a\n\/\/ SessionMetadata. For other events an empty string is returned.\nfunc GetSessionID(event AuditEvent) string {\n\tvar sessionID string\n\n\tif g, ok := event.(SessionMetadataGetter); ok {\n\t\tsessionID = g.GetSessionID()\n\t}\n\n\treturn sessionID\n}\n\n\/\/ ToEventFields converts from the typed interface-style event representation\n\/\/ to the old dynamic map style representation in order to provide outer compatibility\n\/\/ with existing public API routes when the backend is updated with the typed events.\nfunc ToEventFields(event AuditEvent) (EventFields, error) {\n\tvar fields EventFields\n\tif err := utils.ObjectToStruct(event, &fields); err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treturn fields, nil\n}\nAdd event handler for access request review event (#6966)\/*\nCopyright 2021 Gravitational, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage events\n\nimport (\n\t\"github.com\/gravitational\/teleport\/api\/types\/events\"\n\t\"github.com\/gravitational\/teleport\/lib\/utils\"\n\t\"github.com\/gravitational\/trace\"\n\n\t\"encoding\/json\"\n)\n\n\/\/ FromEventFields converts from the typed dynamic representation\n\/\/ to the new typed interface-style representation.\n\/\/\n\/\/ This is mainly used to convert from the backend format used by\n\/\/ our various event backends.\nfunc FromEventFields(fields EventFields) (AuditEvent, error) {\n\tdata, err := json.Marshal(fields)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\teventType := fields.GetString(EventType)\n\n\tswitch eventType {\n\tcase SessionPrintEvent:\n\t\tvar e events.SessionPrint\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionStartEvent:\n\t\tvar e events.SessionStart\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionEndEvent:\n\t\tvar e events.SessionEnd\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionUploadEvent:\n\t\tvar e events.SessionUpload\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionJoinEvent:\n\t\tvar e events.SessionJoin\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionLeaveEvent:\n\t\tvar e events.SessionLeave\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionDataEvent:\n\t\tvar e events.SessionData\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase ClientDisconnectEvent:\n\t\tvar e events.ClientDisconnect\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase UserLoginEvent:\n\t\tvar e events.UserLogin\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase UserDeleteEvent:\n\t\tvar e events.UserDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase UserCreateEvent:\n\t\tvar e events.UserCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase UserUpdatedEvent:\n\t\t\/\/ note: user.update is a custom code applied on top of the same data as the user.create event\n\t\t\/\/ and they are thus functionally identical. There exists no direct gRPC version of user.update.\n\t\tvar e events.UserCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase UserPasswordChangeEvent:\n\t\tvar e events.UserPasswordChange\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase AccessRequestCreateEvent:\n\t\tvar e events.AccessRequestCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase AccessRequestReviewEvent:\n\t\t\/\/ note: access_request.review is a custom code applied on top of the same data as the access_request.create event\n\t\t\/\/ and they are thus functionally identical. There exists no direct gRPC version of access_request.review.\n\t\tvar e events.AccessRequestCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase AccessRequestUpdateEvent:\n\t\tvar e events.AccessRequestCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase BillingCardCreateEvent:\n\t\tvar e events.BillingCardCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase BillingCardUpdateEvent:\n\t\tvar e events.BillingCardCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase BillingCardDeleteEvent:\n\t\tvar e events.BillingCardDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase BillingInformationUpdateEvent:\n\t\tvar e events.BillingInformationUpdate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase ResetPasswordTokenCreateEvent:\n\t\tvar e events.ResetPasswordTokenCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase ExecEvent:\n\t\tvar e events.Exec\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SubsystemEvent:\n\t\tvar e events.Subsystem\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase X11ForwardEvent:\n\t\tvar e events.X11Forward\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase PortForwardEvent:\n\t\tvar e events.PortForward\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase AuthAttemptEvent:\n\t\tvar e events.AuthAttempt\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SCPEvent:\n\t\tvar e events.SCP\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase ResizeEvent:\n\t\tvar e events.Resize\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionCommandEvent:\n\t\tvar e events.SessionCommand\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionDiskEvent:\n\t\tvar e events.SessionDisk\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionNetworkEvent:\n\t\tvar e events.SessionNetwork\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase RoleCreatedEvent:\n\t\tvar e events.RoleCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase RoleDeletedEvent:\n\t\tvar e events.RoleDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase TrustedClusterCreateEvent:\n\t\tvar e events.TrustedClusterCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase TrustedClusterDeleteEvent:\n\t\tvar e events.TrustedClusterDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase TrustedClusterTokenCreateEvent:\n\t\tvar e events.TrustedClusterTokenCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase GithubConnectorCreatedEvent:\n\t\tvar e events.GithubConnectorCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase GithubConnectorDeletedEvent:\n\t\tvar e events.GithubConnectorDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase OIDCConnectorCreatedEvent:\n\t\tvar e events.OIDCConnectorCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase OIDCConnectorDeletedEvent:\n\t\tvar e events.OIDCConnectorDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SAMLConnectorCreatedEvent:\n\t\tvar e events.SAMLConnectorCreate\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SAMLConnectorDeletedEvent:\n\t\tvar e events.SAMLConnectorDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase SessionRejectedEvent:\n\t\tvar e events.SessionReject\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase AppSessionStartEvent:\n\t\tvar e events.AppSessionStart\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase AppSessionChunkEvent:\n\t\tvar e events.AppSessionChunk\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase AppSessionRequestEvent:\n\t\tvar e events.AppSessionRequest\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase DatabaseSessionStartEvent:\n\t\tvar e events.DatabaseSessionStart\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase DatabaseSessionEndEvent:\n\t\tvar e events.DatabaseSessionEnd\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase DatabaseSessionQueryEvent:\n\t\tvar e events.DatabaseSessionQuery\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase KubeRequestEvent:\n\t\tvar e events.KubeRequest\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase MFADeviceAddEvent:\n\t\tvar e events.MFADeviceAdd\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tcase MFADeviceDeleteEvent:\n\t\tvar e events.MFADeviceDelete\n\t\tif err := utils.FastUnmarshal(data, &e); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn &e, nil\n\tdefault:\n\t\treturn nil, trace.BadParameter(\"unknown event type: %q\", eventType)\n\t}\n}\n\n\/\/ GetSessionID pulls the session ID from the events that have a\n\/\/ SessionMetadata. For other events an empty string is returned.\nfunc GetSessionID(event AuditEvent) string {\n\tvar sessionID string\n\n\tif g, ok := event.(SessionMetadataGetter); ok {\n\t\tsessionID = g.GetSessionID()\n\t}\n\n\treturn sessionID\n}\n\n\/\/ ToEventFields converts from the typed interface-style event representation\n\/\/ to the old dynamic map style representation in order to provide outer compatibility\n\/\/ with existing public API routes when the backend is updated with the typed events.\nfunc ToEventFields(event AuditEvent) (EventFields, error) {\n\tvar fields EventFields\n\tif err := utils.ObjectToStruct(event, &fields); err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\treturn fields, nil\n}\n<|endoftext|>"} {"text":"\/\/ +build testtools\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/github\/git-lfs\/httputil\"\n\t\"github.com\/github\/git-lfs\/progress\"\n\t\"github.com\/github\/git-lfs\/tools\"\n)\n\n\/\/ This test custom adapter just acts as a bridge for uploads\/downloads\n\/\/ in order to demonstrate & test the custom transfer adapter protocols\n\/\/ All we actually do is relay the requests back to the normal storage URLs\n\/\/ of our test server for simplicity, but this proves the principle\nfunc main() {\n\n\tscanner := bufio.NewScanner(os.Stdin)\n\twriter := bufio.NewWriter(os.Stdout)\n\terrWriter := bufio.NewWriter(os.Stderr)\n\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tvar req request\n\t\tif err := json.Unmarshal([]byte(line), &req); err != nil {\n\t\t\twriteToStderr(fmt.Sprintf(\"Unable to parse request: %v\\n\", line), errWriter)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch req.Id {\n\t\tcase \"init\":\n\t\t\twriteToStderr(fmt.Sprintf(\"Initialised test custom adapter for %s\\n\", req.Operation), errWriter)\n\t\t\tresp := &initResponse{}\n\t\t\tsendResponse(resp, writer, errWriter)\n\t\tcase \"download\":\n\t\t\twriteToStderr(fmt.Sprintf(\"Received download request for %s\\n\", req.Oid), errWriter)\n\t\t\tperformDownload(req.Oid, req.Size, req.Action, writer, errWriter)\n\t\tcase \"upload\":\n\t\t\twriteToStderr(fmt.Sprintf(\"Received upload request for %s\\n\", req.Oid), errWriter)\n\t\t\tperformUpload(req.Oid, req.Size, req.Action, req.Path, writer, errWriter)\n\t\tcase \"terminate\":\n\t\t\twriteToStderr(\"Terminating test custom adapter gracefully.\\n\", errWriter)\n\t\t\tbreak\n\t\t}\n\t}\n\n}\n\nfunc writeToStderr(msg string, errWriter *bufio.Writer) {\n\tif !strings.HasSuffix(msg, \"\\n\") {\n\t\tmsg = msg + \"\\n\"\n\t}\n\terrWriter.WriteString(msg)\n\terrWriter.Flush()\n}\n\nfunc sendResponse(r interface{}, writer, errWriter *bufio.Writer) error {\n\tb, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Line oriented JSON\n\tb = append(b, '\\n')\n\t_, err = writer.Write(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\twriter.Flush()\n\twriteToStderr(fmt.Sprintf(\"Sent message %v\", string(b)), errWriter)\n\treturn nil\n}\n\nfunc sendTransferError(oid string, code int, message string, writer, errWriter *bufio.Writer) {\n\tresp := &transferResponse{\"complete\", oid, \"\", &transferError{code, message}}\n\terr := sendResponse(resp, writer, errWriter)\n\tif err != nil {\n\t\twriteToStderr(fmt.Sprintf(\"Unable to send transfer error: %v\\n\", err), errWriter)\n\t}\n}\n\nfunc sendProgress(oid string, bytesSoFar int64, bytesSinceLast int, writer, errWriter *bufio.Writer) {\n\tresp := &progressResponse{\"progress\", oid, bytesSoFar, bytesSinceLast}\n\terr := sendResponse(resp, writer, errWriter)\n\tif err != nil {\n\t\twriteToStderr(fmt.Sprintf(\"Unable to send progress update: %v\\n\", err), errWriter)\n\t}\n}\n\nfunc performDownload(oid string, size int64, a *action, writer, errWriter *bufio.Writer) {\n\t\/\/ We just use the URLs we're given, so we're just a proxy for the direct method\n\t\/\/ but this is enough to test intermediate custom adapters\n\treq, err := httputil.NewHttpRequest(\"GET\", a.Href, a.Header)\n\tif err != nil {\n\t\tsendTransferError(oid, 2, err.Error(), writer, errWriter)\n\t\treturn\n\t}\n\tres, err := httputil.DoHttpRequest(req, true)\n\tif err != nil {\n\t\tsendTransferError(oid, res.StatusCode, err.Error(), writer, errWriter)\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\n\tdlFile, err := ioutil.TempFile(\"\", \"lfscustomdl\")\n\tif err != nil {\n\t\tsendTransferError(oid, 3, err.Error(), writer, errWriter)\n\t\treturn\n\t}\n\tdefer dlFile.Close()\n\tdlfilename := dlFile.Name()\n\t\/\/ Wrap callback to give name context\n\tcb := func(totalSize int64, readSoFar int64, readSinceLast int) error {\n\t\tsendProgress(oid, readSoFar, readSinceLast, writer, errWriter)\n\t\treturn nil\n\t}\n\t_, err = tools.CopyWithCallback(dlFile, res.Body, res.ContentLength, cb)\n\tif err != nil {\n\t\tsendTransferError(oid, 4, fmt.Sprintf(\"cannot write data to tempfile %q: %v\", dlfilename, err), writer, errWriter)\n\t\tos.Remove(dlfilename)\n\t\treturn\n\t}\n\tif err := dlFile.Close(); err != nil {\n\t\tsendTransferError(oid, 5, fmt.Sprintf(\"can't close tempfile %q: %v\", dlfilename, err), writer, errWriter)\n\t\tos.Remove(dlfilename)\n\t\treturn\n\t}\n\n\t\/\/ completed\n\tcomplete := &transferResponse{\"complete\", oid, dlfilename, nil}\n\terr = sendResponse(complete, writer, errWriter)\n\tif err != nil {\n\t\twriteToStderr(fmt.Sprintf(\"Unable to send completion message: %v\\n\", err), errWriter)\n\t}\n}\n\nfunc performUpload(oid string, size int64, a *action, fromPath string, writer, errWriter *bufio.Writer) {\n\t\/\/ We just use the URLs we're given, so we're just a proxy for the direct method\n\t\/\/ but this is enough to test intermediate custom adapters\n\treq, err := httputil.NewHttpRequest(\"PUT\", a.Href, a.Header)\n\tif err != nil {\n\t\tsendTransferError(oid, 2, err.Error(), writer, errWriter)\n\t\treturn\n\t}\n\n\tif len(req.Header.Get(\"Content-Type\")) == 0 {\n\t\treq.Header.Set(\"Content-Type\", \"application\/octet-stream\")\n\t}\n\n\tif req.Header.Get(\"Transfer-Encoding\") == \"chunked\" {\n\t\treq.TransferEncoding = []string{\"chunked\"}\n\t} else {\n\t\treq.Header.Set(\"Content-Length\", strconv.FormatInt(size, 10))\n\t}\n\n\treq.ContentLength = size\n\n\tf, err := os.OpenFile(fromPath, os.O_RDONLY, 0644)\n\tif err != nil {\n\t\tsendTransferError(oid, 3, fmt.Sprintf(\"Cannot read data from %q: %v\", fromPath, err), writer, errWriter)\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\t\/\/ Ensure progress callbacks made while uploading\n\t\/\/ Wrap callback to give name context\n\tcb := func(totalSize int64, readSoFar int64, readSinceLast int) error {\n\t\tsendProgress(oid, readSoFar, readSinceLast, writer, errWriter)\n\t\treturn nil\n\t}\n\tvar reader io.Reader\n\treader = &progress.CallbackReader{\n\t\tC: cb,\n\t\tTotalSize: size,\n\t\tReader: f,\n\t}\n\n\treq.Body = ioutil.NopCloser(reader)\n\n\tres, err := httputil.DoHttpRequest(req, true)\n\tif err != nil {\n\t\tsendTransferError(oid, res.StatusCode, fmt.Sprintf(\"Error uploading data for %s: %v\", oid, err), writer, errWriter)\n\t\treturn\n\t}\n\n\tif res.StatusCode > 299 {\n\t\tsendTransferError(oid, res.StatusCode, fmt.Sprintf(\"Invalid status for %s: %d\", httputil.TraceHttpReq(req), res.StatusCode), writer, errWriter)\n\t\treturn\n\t}\n\n\tio.Copy(ioutil.Discard, res.Body)\n\tres.Body.Close()\n\n\t\/\/ completed\n\tcomplete := &transferResponse{\"complete\", oid, \"\", nil}\n\terr = sendResponse(complete, writer, errWriter)\n\tif err != nil {\n\t\twriteToStderr(fmt.Sprintf(\"Unable to send completion message: %v\\n\", err), errWriter)\n\t}\n\n}\n\n\/\/ Structs reimplemented so closer to a real external implementation\ntype header struct {\n\tKey string `json:\"key\"`\n\tValue string `json:\"value\"`\n}\ntype action struct {\n\tHref string `json:\"href\"`\n\tHeader map[string]string `json:\"header,omitempty\"`\n\tExpiresAt time.Time `json:\"expires_at,omitempty\"`\n}\ntype transferError struct {\n\tCode int `json:\"code\"`\n\tMessage string `json:\"message\"`\n}\n\n\/\/ Combined request struct which can accept anything\ntype request struct {\n\tId string `json:\"id\"`\n\tOperation string `json:\"operation\"`\n\tConcurrent bool `json:\"concurrent\"`\n\tConcurrentTransfers int `json:\"concurrenttransfers\"`\n\tOid string `json:\"oid\"`\n\tSize int64 `json:\"size\"`\n\tPath string `json:\"path\"`\n\tAction *action `json:\"action\"`\n}\n\ntype initResponse struct {\n\tError *transferError `json:\"error,omitempty\"`\n}\ntype transferResponse struct {\n\tId string `json:\"id\"`\n\tOid string `json:\"oid\"`\n\tPath string `json:\"path,omitempty\"` \/\/ always blank for upload\n\tError *transferError `json:\"error,omitempty\"`\n}\ntype progressResponse struct {\n\tId string `json:\"id\"`\n\tOid string `json:\"oid\"`\n\tBytesSoFar int64 `json:\"bytesSoFar\"`\n\tBytesSinceLast int `json:\"bytesSinceLast\"`\n}\nComment fixes\/\/ +build testtools\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/github\/git-lfs\/httputil\"\n\t\"github.com\/github\/git-lfs\/progress\"\n\t\"github.com\/github\/git-lfs\/tools\"\n)\n\n\/\/ This test custom adapter just acts as a bridge for uploads\/downloads\n\/\/ in order to demonstrate & test the custom transfer adapter protocols\n\/\/ All we actually do is relay the requests back to the normal storage URLs\n\/\/ of our test server for simplicity, but this proves the principle\nfunc main() {\n\n\tscanner := bufio.NewScanner(os.Stdin)\n\twriter := bufio.NewWriter(os.Stdout)\n\terrWriter := bufio.NewWriter(os.Stderr)\n\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tvar req request\n\t\tif err := json.Unmarshal([]byte(line), &req); err != nil {\n\t\t\twriteToStderr(fmt.Sprintf(\"Unable to parse request: %v\\n\", line), errWriter)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch req.Id {\n\t\tcase \"init\":\n\t\t\twriteToStderr(fmt.Sprintf(\"Initialised test custom adapter for %s\\n\", req.Operation), errWriter)\n\t\t\tresp := &initResponse{}\n\t\t\tsendResponse(resp, writer, errWriter)\n\t\tcase \"download\":\n\t\t\twriteToStderr(fmt.Sprintf(\"Received download request for %s\\n\", req.Oid), errWriter)\n\t\t\tperformDownload(req.Oid, req.Size, req.Action, writer, errWriter)\n\t\tcase \"upload\":\n\t\t\twriteToStderr(fmt.Sprintf(\"Received upload request for %s\\n\", req.Oid), errWriter)\n\t\t\tperformUpload(req.Oid, req.Size, req.Action, req.Path, writer, errWriter)\n\t\tcase \"terminate\":\n\t\t\twriteToStderr(\"Terminating test custom adapter gracefully.\\n\", errWriter)\n\t\t\tbreak\n\t\t}\n\t}\n\n}\n\nfunc writeToStderr(msg string, errWriter *bufio.Writer) {\n\tif !strings.HasSuffix(msg, \"\\n\") {\n\t\tmsg = msg + \"\\n\"\n\t}\n\terrWriter.WriteString(msg)\n\terrWriter.Flush()\n}\n\nfunc sendResponse(r interface{}, writer, errWriter *bufio.Writer) error {\n\tb, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Line oriented JSON\n\tb = append(b, '\\n')\n\t_, err = writer.Write(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\twriter.Flush()\n\twriteToStderr(fmt.Sprintf(\"Sent message %v\", string(b)), errWriter)\n\treturn nil\n}\n\nfunc sendTransferError(oid string, code int, message string, writer, errWriter *bufio.Writer) {\n\tresp := &transferResponse{\"complete\", oid, \"\", &transferError{code, message}}\n\terr := sendResponse(resp, writer, errWriter)\n\tif err != nil {\n\t\twriteToStderr(fmt.Sprintf(\"Unable to send transfer error: %v\\n\", err), errWriter)\n\t}\n}\n\nfunc sendProgress(oid string, bytesSoFar int64, bytesSinceLast int, writer, errWriter *bufio.Writer) {\n\tresp := &progressResponse{\"progress\", oid, bytesSoFar, bytesSinceLast}\n\terr := sendResponse(resp, writer, errWriter)\n\tif err != nil {\n\t\twriteToStderr(fmt.Sprintf(\"Unable to send progress update: %v\\n\", err), errWriter)\n\t}\n}\n\nfunc performDownload(oid string, size int64, a *action, writer, errWriter *bufio.Writer) {\n\t\/\/ We just use the URLs we're given, so we're just a proxy for the direct method\n\t\/\/ but this is enough to test intermediate custom adapters\n\treq, err := httputil.NewHttpRequest(\"GET\", a.Href, a.Header)\n\tif err != nil {\n\t\tsendTransferError(oid, 2, err.Error(), writer, errWriter)\n\t\treturn\n\t}\n\tres, err := httputil.DoHttpRequest(req, true)\n\tif err != nil {\n\t\tsendTransferError(oid, res.StatusCode, err.Error(), writer, errWriter)\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\n\tdlFile, err := ioutil.TempFile(\"\", \"lfscustomdl\")\n\tif err != nil {\n\t\tsendTransferError(oid, 3, err.Error(), writer, errWriter)\n\t\treturn\n\t}\n\tdefer dlFile.Close()\n\tdlfilename := dlFile.Name()\n\t\/\/ Turn callback into progress messages\n\tcb := func(totalSize int64, readSoFar int64, readSinceLast int) error {\n\t\tsendProgress(oid, readSoFar, readSinceLast, writer, errWriter)\n\t\treturn nil\n\t}\n\t_, err = tools.CopyWithCallback(dlFile, res.Body, res.ContentLength, cb)\n\tif err != nil {\n\t\tsendTransferError(oid, 4, fmt.Sprintf(\"cannot write data to tempfile %q: %v\", dlfilename, err), writer, errWriter)\n\t\tos.Remove(dlfilename)\n\t\treturn\n\t}\n\tif err := dlFile.Close(); err != nil {\n\t\tsendTransferError(oid, 5, fmt.Sprintf(\"can't close tempfile %q: %v\", dlfilename, err), writer, errWriter)\n\t\tos.Remove(dlfilename)\n\t\treturn\n\t}\n\n\t\/\/ completed\n\tcomplete := &transferResponse{\"complete\", oid, dlfilename, nil}\n\terr = sendResponse(complete, writer, errWriter)\n\tif err != nil {\n\t\twriteToStderr(fmt.Sprintf(\"Unable to send completion message: %v\\n\", err), errWriter)\n\t}\n}\n\nfunc performUpload(oid string, size int64, a *action, fromPath string, writer, errWriter *bufio.Writer) {\n\t\/\/ We just use the URLs we're given, so we're just a proxy for the direct method\n\t\/\/ but this is enough to test intermediate custom adapters\n\treq, err := httputil.NewHttpRequest(\"PUT\", a.Href, a.Header)\n\tif err != nil {\n\t\tsendTransferError(oid, 2, err.Error(), writer, errWriter)\n\t\treturn\n\t}\n\n\tif len(req.Header.Get(\"Content-Type\")) == 0 {\n\t\treq.Header.Set(\"Content-Type\", \"application\/octet-stream\")\n\t}\n\n\tif req.Header.Get(\"Transfer-Encoding\") == \"chunked\" {\n\t\treq.TransferEncoding = []string{\"chunked\"}\n\t} else {\n\t\treq.Header.Set(\"Content-Length\", strconv.FormatInt(size, 10))\n\t}\n\n\treq.ContentLength = size\n\n\tf, err := os.OpenFile(fromPath, os.O_RDONLY, 0644)\n\tif err != nil {\n\t\tsendTransferError(oid, 3, fmt.Sprintf(\"Cannot read data from %q: %v\", fromPath, err), writer, errWriter)\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\t\/\/ Turn callback into progress messages\n\tcb := func(totalSize int64, readSoFar int64, readSinceLast int) error {\n\t\tsendProgress(oid, readSoFar, readSinceLast, writer, errWriter)\n\t\treturn nil\n\t}\n\tvar reader io.Reader\n\treader = &progress.CallbackReader{\n\t\tC: cb,\n\t\tTotalSize: size,\n\t\tReader: f,\n\t}\n\n\treq.Body = ioutil.NopCloser(reader)\n\n\tres, err := httputil.DoHttpRequest(req, true)\n\tif err != nil {\n\t\tsendTransferError(oid, res.StatusCode, fmt.Sprintf(\"Error uploading data for %s: %v\", oid, err), writer, errWriter)\n\t\treturn\n\t}\n\n\tif res.StatusCode > 299 {\n\t\tsendTransferError(oid, res.StatusCode, fmt.Sprintf(\"Invalid status for %s: %d\", httputil.TraceHttpReq(req), res.StatusCode), writer, errWriter)\n\t\treturn\n\t}\n\n\tio.Copy(ioutil.Discard, res.Body)\n\tres.Body.Close()\n\n\t\/\/ completed\n\tcomplete := &transferResponse{\"complete\", oid, \"\", nil}\n\terr = sendResponse(complete, writer, errWriter)\n\tif err != nil {\n\t\twriteToStderr(fmt.Sprintf(\"Unable to send completion message: %v\\n\", err), errWriter)\n\t}\n\n}\n\n\/\/ Structs reimplemented so closer to a real external implementation\ntype header struct {\n\tKey string `json:\"key\"`\n\tValue string `json:\"value\"`\n}\ntype action struct {\n\tHref string `json:\"href\"`\n\tHeader map[string]string `json:\"header,omitempty\"`\n\tExpiresAt time.Time `json:\"expires_at,omitempty\"`\n}\ntype transferError struct {\n\tCode int `json:\"code\"`\n\tMessage string `json:\"message\"`\n}\n\n\/\/ Combined request struct which can accept anything\ntype request struct {\n\tId string `json:\"id\"`\n\tOperation string `json:\"operation\"`\n\tConcurrent bool `json:\"concurrent\"`\n\tConcurrentTransfers int `json:\"concurrenttransfers\"`\n\tOid string `json:\"oid\"`\n\tSize int64 `json:\"size\"`\n\tPath string `json:\"path\"`\n\tAction *action `json:\"action\"`\n}\n\ntype initResponse struct {\n\tError *transferError `json:\"error,omitempty\"`\n}\ntype transferResponse struct {\n\tId string `json:\"id\"`\n\tOid string `json:\"oid\"`\n\tPath string `json:\"path,omitempty\"` \/\/ always blank for upload\n\tError *transferError `json:\"error,omitempty\"`\n}\ntype progressResponse struct {\n\tId string `json:\"id\"`\n\tOid string `json:\"oid\"`\n\tBytesSoFar int64 `json:\"bytesSoFar\"`\n\tBytesSinceLast int `json:\"bytesSinceLast\"`\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2014 The Camlistore Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/mpl\/scancabimport\/third_party\/github.com\/golang\/oauth2\"\n\t\"github.com\/mpl\/scancabimport\/third_party\/github.com\/golang\/oauth2\/google\"\n\t\"github.com\/mpl\/scancabimport\/third_party\/google.golang.org\/cloud\/datastore\"\n)\n\n\/*\nTo get blobs, already tried:\n1) gcs with google.golang.org\/cloud\/storage -> getting a 403. Plus that wouldn't work anyway\nas the blobs don't seem to be in the bucket when I ls with gsutil.\n2) oauth2 with code.google.com\/p\/goauth2\/oauth + GET on \/resource -> getting redirected.\n3) oauth2 with github.com\/golang\/oauth2 + GET on \/resource -> same thing.\n4) oauth with github.com\/garyburd\/go-oauth\/oauth -> getting a 400, but maybe I half-assed it.\n5) went back to github.com\/golang\/oauth2, and added X-AppEngine-User-Email header -> not better.\n6) go doc hinted at the problem: there's still a login: required in app.yaml, that oauth does not override. need to test and confirm (that we're ok without it).\n7) back to approach in 1): was getting 403 because GCS JSON API was needed too. Getting 404s now.\nBut looked through API explorer at https:\/\/developers.google.com\/apis-explorer\/#p\/storage\/v1\/storage.objects.list\nwhich shows same as with gsutil, i.e. not my files. So probably no go that way.\n8) back to 6). -> yep, that works.\n*\/\n\nvar (\n\tprojectId = \"scancabcamli\"\n\tserviceAccount = \"886924983567-uiln6pus9iuumdq3i0vav0ntveodas0r@developer.gserviceaccount.com\"\n\tmyEmail = \"mathieu.lonjaret@gmail.com\"\n\tds *datastore.Dataset\n\tcl *http.Client\n\tclientId = \"886924983567-hnd1dertfvi2g0lpjs72aae8hi35k364.apps.googleusercontent.com\"\n\tclientSecret = \"nope\"\n\ttokenCacheFile = filepath.Join(os.Getenv(\"HOME\"), \"tokencache.json\")\n)\n\n\/\/ UserInfo represents the metadata associated with the Google User\n\/\/ currently logged-in to the app\ntype UserInfo struct {\n\t\/\/ User stores the email address of the currently logged-in user\n\t\/\/ this is used as the primary key\n\tUser string\n\n\t\/\/ MediaObjects is a count of the MediaObjects currently associated with this user\n\tMediaObjects int64\n\n\t\/\/ UploadPassword is a plain-text string that protects the scan upload API\n\tUploadPassword string\n}\n\n\/\/ MediaObject represents the metadata associated with each individual uploaded scan\ntype MediaObject struct {\n\t\/\/ Owner is the key of the UserInfo of the user that uploaded the file\n\tOwner *datastore.Key\n\n\t\/\/ IntID is the entity ID of the key associated with this MediaObject struct\n\t\/\/ Not stored in datastore but filled on each get()\n\t\/\/\tIntID int64 `datastore:\"-\"`\n\tResourceId int64 `datastore:\"-\"`\n\n\t\/\/ Blob is the key of blobstore entry with this uploaded file\n\tBlob string\n\n\t\/\/ Creation the time when this struct was originally created\n\tCreation time.Time\n\n\t\/\/ ContentType is the MIME-type of the uploaded file.\n\t\/\/ As the mime\/multipart package does not detect Content-Type\n\t\/\/ before sending the file in the command line client, this is\n\t\/\/ detected in the webapp and so this field may differ from the\n\t\/\/ content-type for the associated blob in the blobstore\n\tContentType string\n\n\t\/\/ Filename is the name of the file when it was uploaded\n\tFilename string\n\n\t\/\/ Size in bytes of the uploaded file\n\tSize int64\n\n\t\/\/ Document is the key of the associated Document struct.\n\t\/\/ A Document has many MediaObjects. When newly uploaded,\n\t\/\/ a MediaObject is not associated with a Document.\n\tDocument *datastore.Key\n\n\t\/\/ LacksDocument is false when this MediaObject is associated with a Document.\n\t\/\/ When newly uploaded, a MediaObject is not associated with a Document.\n\tLacksDocument bool\n}\n\n\/\/ Document is a structure that groups scans into a logical unit.\n\/\/ A letter (Stored as a document) could have several pages\n\/\/ (each is a MediaObject), for example.\ntype Document struct {\n\t\/\/ Owner is the key of the UserInfo of the user that created the Document\n\tOwner *datastore.Key\n\n\t\/\/ Pages are the keys of each Media Object that contitute this Document\n\tPages []*datastore.Key\n\n\t\/\/ IntID is the entity ID of the key associated with this Document struct\n\t\/\/ Not stored in datastore but filled on each get()\n\tIntID int64 `datastore:\"-\"`\n\n\t\/\/ DocDate is the user-nominated date associated with this document. It can\n\t\/\/ store any date the user likes but is intended to be when the document was\n\t\/\/ received, or, perhaps, written or sent\n\tDocDate time.Time\n\n\t\/\/ NoDate is false when DocDate has been set by the user\n\tNoDate bool\n\n\t\/\/ Creation is the date the Document struct was created\n\tCreation time.Time\n\n\t\/\/ Title is the user-nominated title of the document\n\tTitle string\n\n\t\/\/ Description is the user-nominated description of the document\n\tDescription string\n\n\t\/\/ Tags is the slice of zero or more tags associated with the document by the user\n\tTags string\n\n\t\/\/ LowercaseTags is the content of Tags but stored lowercase as a\n\t\/\/ canonical version so searches on tags can be case-insensitive\n\tLowercaseTags string\n\n\t\/\/ NoTags is true when Tags is empty\n\tNoTags bool\n\n\t\/\/ PhysicalLocation is the user-nominated description of the location\n\t\/\/ of the physical document of which the MediaObjects associated with this\n\t\/\/ Document are scans\n\tPhysicalLocation string\n\n\t\/\/ DueDate is the user-nominated date that the document is \"due\". The\n\t\/\/ meaning of what \"due\" means in relation to each particular document\n\t\/\/ is up to the user\n\tDueDate time.Time\n}\n\nconst (\n\tscansRequestLimit = 5\n\tdocsRequestLimit = 5\n)\n\nfunc getScans() ([]*MediaObject, error) {\n\tvar scans []*MediaObject\n\tquery := ds.NewQuery(\"MediaObject\")\n\tquery = query.Limit(scansRequestLimit)\n\tfor {\n\t\tsc := make([]*MediaObject, scansRequestLimit)\n\t\tkeys, next, err := ds.RunQuery(query, sc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ get the key id and store it in the media object because we'll need it\n\t\t\/\/ to fetch the corresponding file from the blobstore later.\n\t\tfor i, obj := range sc {\n\t\t\tif obj == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tobj.ResourceId = keys[i].ID()\n\t\t}\n\t\tscans = append(scans, sc...)\n\t\t\/\/\t\tfor _, v := range keys {\n\t\t\/\/\t\t\tfmt.Printf(\"key: %v, \", v)\n\t\t\/\/\t\t}\n\t\tif next == nil {\n\t\t\tbreak\n\t\t}\n\t\tquery = next\n\t}\n\treturn scans, nil\n}\n\nfunc getDocuments() ([]*Document, error) {\n\tvar docs []*Document\n\tquery := ds.NewQuery(\"Document\")\n\tquery = query.Limit(scansRequestLimit)\n\tfor {\n\t\tdc := make([]*Document, docsRequestLimit)\n\t\t\/\/\t\tkeys, next, err := ds.RunQuery(query, dc)\n\t\t_, next, err := ds.RunQuery(query, dc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdocs = append(docs, dc...)\n\t\t\/\/\t\tfor _, v := range keys {\n\t\t\/\/\t\t\tfmt.Printf(\"key: %v, \", v)\n\t\t\/\/\t\t}\n\t\tif next == nil {\n\t\t\tbreak\n\t\t}\n\t\tquery = next\n\t}\n\treturn docs, nil\n}\n\nfunc getScannedFile(key, filename string) error {\n\t\/\/\t\"https:\/\/scancabcamli.appspot.com\/resource\/5066549580791808\/glenda.png\"\n\t\/*\n\t\treq, err := http.NewRequest(\"GET\", \"https:\/\/scancabcamli.appspot.com\/resource\/\"+key+\"\/glenda.png\", nil)\n\t\treq.Header.Add(\"X-AppEngine-User-Email\", \"mathieu.lonjaret@gmail.com\")\n\t\tresp, err := cl.Do(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t*\/\n\tresp, err := cl.Get(\"https:\/\/\" + projectId + \".appspot.com\/resource\/\" + key + \"\/\" + filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Status %v\", resp.Status)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(filename, body, 0700)\n}\n\nfunc cacheToken(tok *oauth2.Token) error {\n\tfile, err := os.OpenFile(tokenCacheFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err := file.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\tif err := json.NewEncoder(file).Encode(tok); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc cachedToken() (*oauth2.Token, error) {\n\tfile, err := os.Open(tokenCacheFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\ttok := &oauth2.Token{}\n\tif err := json.NewDecoder(file).Decode(tok); err != nil {\n\t\treturn nil, err\n\t}\n\treturn tok, nil\n}\n\nfunc transportFromAPIKey() (*oauth2.Transport, error) {\n\tconf, err := oauth2.NewConfig(&oauth2.Options{\n\t\tScopes: []string{\"https:\/\/www.googleapis.com\/auth\/appengine.admin\",\n\t\t\t\"https:\/\/www.googleapis.com\/auth\/userinfo.email\"},\n\t\tClientID: clientId,\n\t\tClientSecret: clientSecret,\n\t\tRedirectURL: \"urn:ietf:wg:oauth:2.0:oob\",\n\t},\n\t\t\"https:\/\/accounts.google.com\/o\/oauth2\/auth\",\n\t\t\"https:\/\/accounts.google.com\/o\/oauth2\/token\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttoken, err := cachedToken()\n\tif err == nil {\n\t\ttr := conf.NewTransport()\n\t\ttr.SetToken(token)\n\t\treturn tr, nil\n\t}\n\n\t\/\/ Redirect user to consent page to ask for permission\n\t\/\/ for the scopes specified above.\n\turl := conf.AuthCodeURL(\"state\", \"online\", \"auto\")\n\t\/\/ url := conf.AuthCodeURL(\"state\", \"offline\", \"auto\")\n\tfmt.Printf(\"Visit the URL for the auth dialog: %v\\n\", url)\n\n\tinput := bufio.NewReader(os.Stdin)\n\tline, _, err := input.ReadLine()\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to read line: %v\", err)\n\t}\n\tauthorizationCode := strings.TrimSpace(string(line))\n\ttr, err := conf.NewTransportWithCode(authorizationCode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := cacheToken(tr.Token()); err != nil {\n\t\treturn nil, err\n\t}\n\treturn tr, nil\n}\n\nfunc main() {\n\n\tpemKeyBytes, err := ioutil.ReadFile(\"\/home\/mpl\/scancabcamli-496f5f6eb01b.pem\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ TODO(mpl): try using an authed transport from transportFromAPIKey, so we don't\n\t\/\/ have to setup two different auth.\n\t\/\/ The contrary is not possible (i.e. using transportFromServiceAccount for getting\n\t\/\/ the blobs\/files) because the server would see the service account email as the userinfo,\n\t\/\/ instead of our own joe user email, who is the owner of the objects in the datastore.\n\tds, err = datastore.NewDataset(projectId, serviceAccount, pemKeyBytes)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tscans, err := getScans()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttr, err := transportFromAPIKey()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcl = &http.Client{Transport: tr}\n\tdocuments := make(map[int64]*Document)\n\tusers := make(map[int64]*UserInfo)\n\tfor _, v := range scans {\n\t\tif v == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Printf(\"%v\\n\", v)\n\t\tif v.Owner != nil {\n\t\t\tuserId := v.Owner.ID()\n\t\t\tif _, ok := users[userId]; !ok {\n\t\t\t\tuserInfo := &UserInfo{}\n\t\t\t\tif err := ds.Get(v.Owner, userInfo); err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t\tusers[userId] = userInfo\n\t\t\t\tfmt.Printf(\"Owner: %v\\n\", userInfo)\n\t\t\t}\n\t\t}\n\t\t\/\/ TODO(mpl): skip if file already exists, or if any of v.ResourceId, v.Filename not good.\n\t\tif err := getScannedFile(fmt.Sprintf(\"%d\", v.ResourceId), v.Filename); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif v != nil && !v.LacksDocument && v.Document != nil {\n\t\t\tprintln(\"HAS DOCUMENT\")\n\t\t\tdocId := v.Document.ID()\n\t\t\tif _, ok := documents[docId]; ok {\n\t\t\t\tprintln(\"already got it: \" + fmt.Sprintf(\"%d\", docId))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdocument := &Document{}\n\t\t\tif err := ds.Get(v.Document, document); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tdocuments[docId] = document\n\t\t\tfmt.Printf(\"Document: %v\\n\", document)\n\t\t}\n\t}\n\treturn\n\n\t\/*\n\t\t\/\/ TODO(mpl): rm getDocuments, as we should have gotten them all from the scans.\n\t\tdocs, err := getDocuments()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfor _, v := range docs {\n\t\t\tfmt.Printf(\"%v\\n\", v)\n\t\t}\n\t\treturn\n\n\t\t\/\/ TODO(mpl): tokencache\n\t\ttr, err := transportFromAPIKey()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tcl = &http.Client{Transport: tr}\n\t\tscanBlobKey := \"5066549580791808\"\n\t\tif err := getScannedFile(scanBlobKey, \"glenda.png\"); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t*\/\n\n}\n\nfunc transportFromServiceAccount() (*oauth2.Transport, error) {\n\tpemKeyBytes, err := ioutil.ReadFile(\"\/home\/mpl\/scancabcamli-496f5f6eb01b.pem\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tconf, err := google.NewServiceAccountConfig(&oauth2.JWTOptions{\n\t\tEmail: serviceAccount,\n\t\tPrivateKey: pemKeyBytes,\n\t\tScopes: []string{\n\t\t\t\/\/\t\t\tgcstorage2.ScopeFullControl,\n\t\t\t\"https:\/\/www.googleapis.com\/auth\/appengine.admin\",\n\t\t\t\"https:\/\/www.googleapis.com\/auth\/userinfo.email\",\n\t\t},\n\t})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn conf.NewTransport(), nil\n}\ncleanup\/*\nCopyright 2014 The Camlistore Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/mpl\/scancabimport\/third_party\/github.com\/golang\/oauth2\"\n\t\"github.com\/mpl\/scancabimport\/third_party\/google.golang.org\/cloud\/datastore\"\n)\n\n\/*\nTo get blobs, already tried:\n1) gcs with google.golang.org\/cloud\/storage -> getting a 403. Plus that wouldn't work anyway\nas the blobs don't seem to be in the bucket when I ls with gsutil.\n2) oauth2 with code.google.com\/p\/goauth2\/oauth + GET on \/resource -> getting redirected.\n3) oauth2 with github.com\/golang\/oauth2 + GET on \/resource -> same thing.\n4) oauth with github.com\/garyburd\/go-oauth\/oauth -> getting a 400, but maybe I half-assed it.\n5) went back to github.com\/golang\/oauth2, and added X-AppEngine-User-Email header -> not better.\n6) go doc hinted at the problem: there's still a login: required in app.yaml, that oauth does not override. need to test and confirm (that we're ok without it).\n7) back to approach in 1): was getting 403 because GCS JSON API was needed too. Getting 404s now.\nBut looked through API explorer at https:\/\/developers.google.com\/apis-explorer\/#p\/storage\/v1\/storage.objects.list\nwhich shows same as with gsutil, i.e. not my files. So probably no go that way.\n8) back to 6). -> yep, that works.\n*\/\n\nvar (\n\tverbose = flag.Bool(\"v\", false, \"verbose\")\n)\n\nvar (\n\t\/\/ for the datastore, where we get the scans and documents metadata.\n\tprojectId = \"scancabcamli\"\n\tserviceAccount = \"886924983567-uiln6pus9iuumdq3i0vav0ntveodas0r@developer.gserviceaccount.com\"\n\tpemFile = \"scancabcamli-496f5f6eb01b.pem\"\n\tds *datastore.Dataset\n\n\t\/\/ we get the scans themselves, which are in the blobstore, through hitting the app itself.\n\tcl *http.Client\n\tclientId = \"886924983567-hnd1dertfvi2g0lpjs72aae8hi35k364.apps.googleusercontent.com\"\n\tclientSecret = \"nope\"\n\ttokenCacheFile = \"tokencache.json\"\n\tscansDir = \"scans\"\n)\n\n\/\/ UserInfo represents the metadata associated with the Google User\n\/\/ currently logged-in to the app\ntype UserInfo struct {\n\t\/\/ User stores the email address of the currently logged-in user\n\t\/\/ this is used as the primary key\n\tUser string\n\n\t\/\/ MediaObjects is a count of the MediaObjects currently associated with this user\n\tMediaObjects int64\n\n\t\/\/ UploadPassword is a plain-text string that protects the scan upload API\n\tUploadPassword string\n}\n\n\/\/ MediaObject represents the metadata associated with each individual uploaded scan\ntype MediaObject struct {\n\t\/\/ Owner is the key of the UserInfo of the user that uploaded the file\n\tOwner *datastore.Key\n\n\t\/\/ IntID is the entity ID of the key associated with this MediaObject struct\n\t\/\/ Not stored in datastore but filled on each get()\n\t\/\/\tIntID int64 `datastore:\"-\"`\n\tResourceId int64 `datastore:\"-\"`\n\n\t\/\/ Blob is the key of blobstore entry with this uploaded file\n\tBlob string\n\n\t\/\/ Creation the time when this struct was originally created\n\tCreation time.Time\n\n\t\/\/ ContentType is the MIME-type of the uploaded file.\n\t\/\/ As the mime\/multipart package does not detect Content-Type\n\t\/\/ before sending the file in the command line client, this is\n\t\/\/ detected in the webapp and so this field may differ from the\n\t\/\/ content-type for the associated blob in the blobstore\n\tContentType string\n\n\t\/\/ Filename is the name of the file when it was uploaded\n\tFilename string\n\n\t\/\/ Size in bytes of the uploaded file\n\tSize int64\n\n\t\/\/ Document is the key of the associated Document struct.\n\t\/\/ A Document has many MediaObjects. When newly uploaded,\n\t\/\/ a MediaObject is not associated with a Document.\n\tDocument *datastore.Key\n\n\t\/\/ LacksDocument is false when this MediaObject is associated with a Document.\n\t\/\/ When newly uploaded, a MediaObject is not associated with a Document.\n\tLacksDocument bool\n}\n\n\/\/ Document is a structure that groups scans into a logical unit.\n\/\/ A letter (Stored as a document) could have several pages\n\/\/ (each is a MediaObject), for example.\ntype Document struct {\n\t\/\/ Owner is the key of the UserInfo of the user that created the Document\n\tOwner *datastore.Key\n\n\t\/\/ Pages are the keys of each Media Object that contitute this Document\n\tPages []*datastore.Key\n\n\t\/\/ IntID is the entity ID of the key associated with this Document struct\n\t\/\/ Not stored in datastore but filled on each get()\n\tIntID int64 `datastore:\"-\"`\n\n\t\/\/ DocDate is the user-nominated date associated with this document. It can\n\t\/\/ store any date the user likes but is intended to be when the document was\n\t\/\/ received, or, perhaps, written or sent\n\tDocDate time.Time\n\n\t\/\/ NoDate is false when DocDate has been set by the user\n\tNoDate bool\n\n\t\/\/ Creation is the date the Document struct was created\n\tCreation time.Time\n\n\t\/\/ Title is the user-nominated title of the document\n\tTitle string\n\n\t\/\/ Description is the user-nominated description of the document\n\tDescription string\n\n\t\/\/ Tags is the slice of zero or more tags associated with the document by the user\n\tTags string\n\n\t\/\/ LowercaseTags is the content of Tags but stored lowercase as a\n\t\/\/ canonical version so searches on tags can be case-insensitive\n\tLowercaseTags string\n\n\t\/\/ NoTags is true when Tags is empty\n\tNoTags bool\n\n\t\/\/ PhysicalLocation is the user-nominated description of the location\n\t\/\/ of the physical document of which the MediaObjects associated with this\n\t\/\/ Document are scans\n\tPhysicalLocation string\n\n\t\/\/ DueDate is the user-nominated date that the document is \"due\". The\n\t\/\/ meaning of what \"due\" means in relation to each particular document\n\t\/\/ is up to the user\n\tDueDate time.Time\n}\n\nconst (\n\t\/\/ TODO(mpl): figure out how high these can be cranked up.\n\tscansRequestLimit = 5\n\tdocsRequestLimit = 5\n)\n\nfunc getScans() ([]*MediaObject, error) {\n\tvar scans []*MediaObject\n\tquery := ds.NewQuery(\"MediaObject\")\n\tquery = query.Limit(scansRequestLimit)\n\tfor {\n\t\tsc := make([]*MediaObject, scansRequestLimit)\n\t\tkeys, next, err := ds.RunQuery(query, sc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ get the key id and store it in the media object because we'll need it\n\t\t\/\/ to fetch the corresponding file from the blobstore later.\n\t\tfor i, obj := range sc {\n\t\t\tif obj == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tobj.ResourceId = keys[i].ID()\n\t\t}\n\t\tscans = append(scans, sc...)\n\t\tif next == nil {\n\t\t\tbreak\n\t\t}\n\t\tquery = next\n\t}\n\treturn scans, nil\n}\n\nfunc getDocuments() ([]*Document, error) {\n\tvar docs []*Document\n\tquery := ds.NewQuery(\"Document\")\n\tquery = query.Limit(scansRequestLimit)\n\tfor {\n\t\tdc := make([]*Document, docsRequestLimit)\n\t\t\/\/\t\tkeys, next, err := ds.RunQuery(query, dc)\n\t\t_, next, err := ds.RunQuery(query, dc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdocs = append(docs, dc...)\n\t\tif next == nil {\n\t\t\tbreak\n\t\t}\n\t\tquery = next\n\t}\n\treturn docs, nil\n}\n\nfunc getScannedFile(resourceId, filename string) error {\n\tif resourceId == \"\" {\n\t\tlog.Printf(\"WARNING: Not fetching scan because empty resourceId\")\n\t\treturn nil\n\t}\n\tif resourceId == \"\" {\n\t\tlog.Printf(\"WARNING: Not fetching scan because empty filename\")\n\t\treturn nil\n\t}\n\tfilePath := filepath.Join(scansDir, filename)\n\tif _, err := os.Stat(filePath); err == nil {\n\t\tlog.Printf(\"%s already exists, skipping download.\", filePath)\n\t\treturn nil\n\t}\n\tresp, err := cl.Get(\"https:\/\/\" + projectId + \".appspot.com\/resource\/\" + resourceId + \"\/\" + filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Status %v\", resp.Status)\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(filePath, body, 0700)\n}\n\nfunc cacheToken(tok *oauth2.Token) error {\n\tfile, err := os.OpenFile(tokenCacheFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err := file.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\tif err := json.NewEncoder(file).Encode(tok); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc cachedToken() (*oauth2.Token, error) {\n\tfile, err := os.Open(tokenCacheFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\ttok := &oauth2.Token{}\n\tif err := json.NewDecoder(file).Decode(tok); err != nil {\n\t\treturn nil, err\n\t}\n\treturn tok, nil\n}\n\nfunc transportFromAPIKey() (*oauth2.Transport, error) {\n\tconf, err := oauth2.NewConfig(&oauth2.Options{\n\t\tScopes: []string{\"https:\/\/www.googleapis.com\/auth\/appengine.admin\",\n\t\t\t\"https:\/\/www.googleapis.com\/auth\/userinfo.email\"},\n\t\tClientID: clientId,\n\t\tClientSecret: clientSecret,\n\t\tRedirectURL: \"urn:ietf:wg:oauth:2.0:oob\",\n\t},\n\t\t\"https:\/\/accounts.google.com\/o\/oauth2\/auth\",\n\t\t\"https:\/\/accounts.google.com\/o\/oauth2\/token\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttoken, err := cachedToken()\n\tif err == nil {\n\t\ttr := conf.NewTransport()\n\t\ttr.SetToken(token)\n\t\treturn tr, nil\n\t}\n\n\t\/\/ Redirect user to consent page to ask for permission\n\t\/\/ for the scopes specified above.\n\turl := conf.AuthCodeURL(\"state\", \"online\", \"auto\")\n\t\/\/ url := conf.AuthCodeURL(\"state\", \"offline\", \"auto\")\n\tfmt.Printf(\"Visit the URL for the auth dialog: %v\\n\", url)\n\tfmt.Println(\"And enter the authorization string displayed in your browser:\")\n\n\tinput := bufio.NewReader(os.Stdin)\n\tline, _, err := input.ReadLine()\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to read line: %v\", err)\n\t}\n\tauthorizationCode := strings.TrimSpace(string(line))\n\ttr, err := conf.NewTransportWithCode(authorizationCode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := cacheToken(tr.Token()); err != nil {\n\t\treturn nil, err\n\t}\n\treturn tr, nil\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif err := os.MkdirAll(scansDir, 0700); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tpemKeyBytes, err := ioutil.ReadFile(pemFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ TODO(mpl): try using an authed transport from transportFromAPIKey, so we don't\n\t\/\/ have to setup two different auth.\n\t\/\/ The contrary is not possible (i.e. using transportFromServiceAccount for getting\n\t\/\/ the blobs\/files) because the server would see the service account email as the userinfo,\n\t\/\/ instead of our own joe user email, who is the owner of the objects in the datastore.\n\tds, err = datastore.NewDataset(projectId, serviceAccount, pemKeyBytes)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ TODO(mpl): write scans + docs on json files\n\tscans, err := getScans()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttr, err := transportFromAPIKey()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcl = &http.Client{Transport: tr}\n\n\tdocuments := make(map[int64]*Document)\n\tusers := make(map[int64]*UserInfo)\n\tfor _, v := range scans {\n\t\tif v == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif *verbose {\n\t\t\tfmt.Printf(\"%v\\n\", v)\n\t\t}\n\t\tif v.Owner != nil {\n\t\t\tuserId := v.Owner.ID()\n\t\t\tif _, ok := users[userId]; !ok {\n\t\t\t\tuserInfo := &UserInfo{}\n\t\t\t\tif err := ds.Get(v.Owner, userInfo); err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t\tusers[userId] = userInfo\n\t\t\t\tif *verbose {\n\t\t\t\t\tfmt.Printf(\"Owner: %v\\n\", userInfo)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif err := getScannedFile(fmt.Sprintf(\"%d\", v.ResourceId), v.Filename); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif v != nil && !v.LacksDocument && v.Document != nil {\n\t\t\tdocId := v.Document.ID()\n\t\t\tif _, ok := documents[docId]; ok {\n\t\t\t\tif *verbose {\n\t\t\t\t\tfmt.Printf(\"Document cache hit: %d\\n\", docId)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdocument := &Document{}\n\t\t\tif err := ds.Get(v.Document, document); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tdocuments[docId] = document\n\t\t\tif *verbose {\n\t\t\t\tfmt.Printf(\"Document: %v\\n\", document)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/heqzha\/dcache\"\n)\n\nfunc TestDCacheString(t *testing.T) {\n\tpool := dcache.GetCliPoolInst()\n\tcli, err := pool.GetOrAdd(\"127.0.0.1:11000\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tkey := \"test12\"\n\tstrVal := \"\"\n\tif err := cli.Get(\"default\", key, &strVal); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tt.Log(strVal)\n\tif err := cli.Set(\"default\", key, \"Hello World\"); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tif err := cli.Get(\"default\", key, &strVal); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tt.Log(strVal)\n\n\tif err := cli.Del(\"default\", key, &strVal); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tt.Log(strVal)\n}\n\ntype TestObj struct {\n\tName string\n\tAge int\n\tTs time.Time\n}\n\nfunc TestDCacheObjGetSet(t *testing.T) {\n\tpool := dcache.GetCliPoolInst()\n\tcli, err := pool.GetOrAdd(\"127.0.0.1:11001\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tkey := \"test1\"\n\n\toldObj := TestObj{}\n\tif err := cli.Get(\"default\", key, &oldObj); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tt.Log(oldObj)\n\n\tobj := TestObj{\n\t\tName: \"abc\",\n\t\tAge: 11,\n\t\tTs: time.Now(),\n\t}\n\tif err := cli.Set(\"default\", key, obj); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tnewObj := TestObj{}\n\tif err := cli.Get(\"default\", key, &newObj); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tt.Log(newObj)\n}\n\nfunc TestDCacheObjDel(t *testing.T) {\n\tpool := dcache.GetCliPoolInst()\n\tcli, err := pool.GetOrAdd(\"127.0.0.1:11001\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tkey := \"test1\"\n\n\tobj := TestObj{\n\t\tName: \"abc\",\n\t\tAge: 11,\n\t\tTs: time.Now(),\n\t}\n\tif err := cli.Set(\"default\", key, obj); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tdelObj := TestObj{}\n\tif err := cli.Del(\"default\", key, &delObj); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tt.Log(delObj)\n}\nAdd TestDCacheObjGetIfExist test codespackage test\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/heqzha\/dcache\"\n)\n\nfunc TestDCacheString(t *testing.T) {\n\tpool := dcache.GetCliPoolInst()\n\tcli, err := pool.GetOrAdd(\"127.0.0.1:11000\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tkey := \"test12\"\n\tstrVal := \"\"\n\tif err := cli.Get(\"default\", key, &strVal); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tt.Log(strVal)\n\tif err := cli.Set(\"default\", key, \"Hello World\"); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tif err := cli.Get(\"default\", key, &strVal); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tt.Log(strVal)\n\n\tif err := cli.Del(\"default\", key, &strVal); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tt.Log(strVal)\n}\n\ntype TestObj struct {\n\tName string\n\tAge int\n\tTs time.Time\n}\n\nfunc TestDCacheObjGetSet(t *testing.T) {\n\tpool := dcache.GetCliPoolInst()\n\tcli, err := pool.GetOrAdd(\"127.0.0.1:11000\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tkey := \"test1\"\n\n\toldObj := TestObj{}\n\tif err := cli.Get(\"default\", key, &oldObj); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tt.Log(oldObj)\n\n\tobj := TestObj{\n\t\tName: \"abc\",\n\t\tAge: 11,\n\t\tTs: time.Now(),\n\t}\n\tif err := cli.Set(\"default\", key, obj); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tnewObj := TestObj{}\n\tif err := cli.Get(\"default\", key, &newObj); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tt.Log(newObj)\n}\n\nfunc TestDCacheObjDel(t *testing.T) {\n\tpool := dcache.GetCliPoolInst()\n\tcli, err := pool.GetOrAdd(\"127.0.0.1:11000\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tkey := \"test1\"\n\n\tobj := TestObj{\n\t\tName: \"abc\",\n\t\tAge: 11,\n\t\tTs: time.Now(),\n\t}\n\tif err := cli.Set(\"default\", key, obj); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tdelObj := TestObj{}\n\tif err := cli.Del(\"default\", key, &delObj); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tt.Log(delObj)\n}\n\nfunc TestDCacheObjGetIfExist(t *testing.T) {\n\tpool := dcache.GetCliPoolInst()\n\tcli, err := pool.GetOrAdd(\"127.0.0.1:11000\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tgroup := \"default\"\n\tkey := \"key1\"\n\tobj := TestObj{\n\t\tName: \"abc\",\n\t\tAge: 11,\n\t\tTs: time.Now(),\n\t}\n\tif err := cli.SetWithExpire(group, key, obj, 10*time.Second); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tfor {\n\t\tnewObj := TestObj{}\n\t\tif err := cli.GetIfExist(group, key, &newObj); err != nil {\n\t\t\tif err == dcache.KeyNotExistError {\n\t\t\t\tfmt.Println(\"Done\")\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tt.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tfmt.Println(newObj)\n\t\ttime.Sleep(time.Second)\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2022 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage daisyutils\n\nimport (\n\tdaisy \"github.com\/GoogleCloudPlatform\/compute-daisy\"\n\tcomputeBeta \"google.golang.org\/api\/compute\/v0.beta\"\n\t\"google.golang.org\/api\/compute\/v1\"\n)\n\n\/\/ EnableNestedVirtualizationHook is a WorkflowHook that updates CreateInstances in a\n\/\/ daisy workflow such that they will be created with nested virtualization enabled.\n\/\/\n\/\/ For more info on nested virtualization see:\n\/\/\n\/\/\thttps:\/\/cloud.google.com\/compute\/docs\/instances\/nested-virtualization\/overview\ntype EnableNestedVirtualizationHook struct{}\n\n\/\/ PreRunHook updates the CreateInstances steps so that they won't have an external IP.\nfunc (t *EnableNestedVirtualizationHook) PreRunHook(wf *daisy.Workflow) error {\n\twf.IterateWorkflowSteps(func(step *daisy.Step) {\n\t\tif step.CreateInstances != nil {\n\t\t\tfor _, instance := range step.CreateInstances.Instances {\n\t\t\t\tif instance.AdvancedMachineFeatures == nil {\n\t\t\t\t\tinstance.AdvancedMachineFeatures = &compute.AdvancedMachineFeatures{}\n\t\t\t\t}\n\t\t\t\tinstance.AdvancedMachineFeatures.EnableNestedVirtualization = true\n\t\t\t}\n\t\t\tfor _, instance := range step.CreateInstances.InstancesBeta {\n\t\t\t\tif instance.AdvancedMachineFeatures == nil {\n\t\t\t\t\tinstance.AdvancedMachineFeatures = &computeBeta.AdvancedMachineFeatures{}\n\t\t\t\t}\n\t\t\t\tinstance.AdvancedMachineFeatures.EnableNestedVirtualization = true\n\t\t\t}\n\n\t\t}\n\t})\n\treturn nil\n}\nFix comment. (#76)\/\/ Copyright 2022 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage daisyutils\n\nimport (\n\tdaisy \"github.com\/GoogleCloudPlatform\/compute-daisy\"\n\tcomputeBeta \"google.golang.org\/api\/compute\/v0.beta\"\n\t\"google.golang.org\/api\/compute\/v1\"\n)\n\n\/\/ EnableNestedVirtualizationHook is a WorkflowHook that updates CreateInstances in a\n\/\/ daisy workflow such that they will be created with nested virtualization enabled.\n\/\/\n\/\/ For more info on nested virtualization see:\n\/\/\n\/\/\thttps:\/\/cloud.google.com\/compute\/docs\/instances\/nested-virtualization\/overview\ntype EnableNestedVirtualizationHook struct{}\n\n\/\/ PreRunHook updates the CreateInstances steps so that they will be created with\n\/\/ nested virtualization enabled.\nfunc (t *EnableNestedVirtualizationHook) PreRunHook(wf *daisy.Workflow) error {\n\twf.IterateWorkflowSteps(func(step *daisy.Step) {\n\t\tif step.CreateInstances != nil {\n\t\t\tfor _, instance := range step.CreateInstances.Instances {\n\t\t\t\tif instance.AdvancedMachineFeatures == nil {\n\t\t\t\t\tinstance.AdvancedMachineFeatures = &compute.AdvancedMachineFeatures{}\n\t\t\t\t}\n\t\t\t\tinstance.AdvancedMachineFeatures.EnableNestedVirtualization = true\n\t\t\t}\n\t\t\tfor _, instance := range step.CreateInstances.InstancesBeta {\n\t\t\t\tif instance.AdvancedMachineFeatures == nil {\n\t\t\t\t\tinstance.AdvancedMachineFeatures = &computeBeta.AdvancedMachineFeatures{}\n\t\t\t\t}\n\t\t\t\tinstance.AdvancedMachineFeatures.EnableNestedVirtualization = true\n\t\t\t}\n\n\t\t}\n\t})\n\treturn nil\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/gemnasium\/logrus-hooks\/graylog\"\n\t\"github.com\/plumbum\/mgorus\"\n\t\"time\"\n)\n\nfunc main() {\n\n\t\/\/ log.SetFormatter(&log.JSONFormatter{})\n\t\/\/ log.SetOutput(os.Stderr)\n\n\tlog := logrus.New()\n\tlog.Level = logrus.DebugLevel\n\thook := graylog.NewGraylogHook(\"127.0.0.1:12201\", \"myFacility\", map[string]interface{}{\"startTime\": time.Now().String()})\n\tlog.Hooks.Add(hook)\n\n\thooker, err := mgorus.NewHooker(\"localhost:27017\", \"logrus\", \"log\")\n\tif err == nil {\n\t\tlog.Hooks.Add(hooker)\n\t\tlog.Info(\"MongoDB log ok\")\n\t}\n\n\tlog.Print(\"Simple print\")\n\tlog.Warn(\"warn\")\n\tlog.Info(\"some logging message\")\n\tlog.Debug(\"debug\")\n\tlog.Error(\"Is great error\")\n\tlog.Print(\"Сообщение на русском\")\n\n\tlog.WithFields(logrus.Fields{\n\t\t\"name\": \"zhangsan\",\n\t\t\"age\": 28,\n\t}).Error(\"Hello world!\")\n\n\tlog.WithField(\"extra\", \"Is extra message\").WithField(\"date\", time.Now().String()).Info(\"Item\")\n\n\ttime.Sleep(time.Second) \/\/ Ждём одну секунду, что бы логи вывалились в graylog\n\n}\nlogrus write to sentrypackage main\n\nimport (\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/evalphobia\/logrus_sentry\"\n\t\"github.com\/getsentry\/raven-go\"\n\t\"github.com\/plumbum\/mgorus\"\n\t\"time\"\n)\n\ntype HS map[string]string\n\nfunc main() {\n\n\t\/\/ log.SetFormatter(&log.JSONFormatter{})\n\t\/\/ log.SetOutput(os.Stderr)\n\n\tlog := logrus.New()\n\tlog.Level = logrus.DebugLevel\n\t\/*\n\t\thook := graylog.NewGraylogHook(\"127.0.0.1:12201\", \"myFacility\", map[string]interface{}{\"startTime\": time.Now().String()})\n\t\tlog.Hooks.Add(hook)\n\t*\/\n\n\travenClient, err := raven.New(\"http:\/\/78d5df1b220e47958c28fbab30ac92d5:db59624047fd4616939520e684a62dd7@172.17.0.5:9000\/2\")\n\tif err == nil {\n\t\travenClient.CaptureMessage(\"Запущен Sentry\", HS{\"tag1\": \"one\", \"tag2\": \"two\"})\n\t\thookSentry, err := logrus_sentry.NewWithClientSentryHook(\n\t\t\travenClient,\n\t\t\t[]logrus.Level{\n\t\t\t\tlogrus.PanicLevel,\n\t\t\t\tlogrus.FatalLevel,\n\t\t\t\tlogrus.ErrorLevel,\n\t\t\t})\n\t\tif err == nil {\n\t\t\tlog.Hooks.Add(hookSentry)\n\t\t\tlog.Info(\"Sentry logger OK\")\n\t\t\travenClient.CaptureMessage(\"Подключили Sentry к логу\", HS{\"tag1\": \"one\", \"tag2\": \"two\"})\n\t\t} else {\n\t\t\tlog.Warn(\"Can't create Sentry hook: \", err)\n\t\t}\n\n\n\t\travenClient.CapturePanic(func () {\n\t\t\tpanic(\"Здесь перехватываем панику\")\n\t\t}, HS{\"status\": \"panic\"})\n\n\t\travenClient.\n\n\t} else {\n\t\tlog.Warn(\"Can't connect to Sentry: \", err)\n\t}\n\n\thookMongo, err := mgorus.NewHooker(\"localhost:27017\", \"logrus\", \"log\")\n\tif err == nil {\n\t\tlog.Hooks.Add(hookMongo)\n\t\tlog.Info(\"MongoDB logger OK\")\n\t} else {\n\t\tlog.Warn(\"Can't create Mongo hook\", err)\n\t}\n\n\tlog.Print(\"Simple print\")\n\tlog.Warn(\"warn\")\n\tlog.Info(\"some logging message\")\n\tlog.Debug(\"debug\")\n\ttime.Sleep(time.Second) \/\/ Ждём одну секунду, что бы логи вывалились в graylog\n\tlog.Error(\"Is great error\")\n\tlog.WithField(\"lang\", \"ru-RU\").Print(\"Сообщение на русском\")\n\n\tlog.WithFields(logrus.Fields{\n\t\t\"name\": \"zhangsan\",\n\t\t\"age\": 28,\n\t}).Error(\"Hello world!\")\n\n\tlog.WithField(\"extra\", \"Is extra message\").WithField(\"date\", time.Now().String()).Info(\"Item\")\n\n\travenClient.Wait()\n\ttime.Sleep(time.Second) \/\/ Ждём одну секунду, что бы логи вывалились в graylog\n\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cluster\n\nimport (\n\t\"github.com\/docker\/machine\/drivers\/vmwarefusion\"\n\t\"github.com\/docker\/machine\/libmachine\/drivers\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/constants\"\n)\n\nfunc createVMwareFusionHost(config MachineConfig) drivers.Driver {\n\td := vmwarefusion.NewDriver(constants.MachineName, constants.Minipath).(*vmwarefusion.Driver)\n\td.Boot2DockerURL = config.GetISOFileURI()\n\td.Memory = config.Memory\n\td.CPU = config.CPUs\n\n\t\/\/ TODO(philips): push these defaults upstream to fixup this driver\n\td.SSHPort = 22\n\td.ISO = d.ResolveStorePath(\"boot2docker.iso\")\n\treturn d\n}\n\ntype xhyveDriver struct {\n\t*drivers.BaseDriver\n\tBoot2DockerURL string\n\tBootCmd string\n\tCPU int\n\tCaCertPath string\n\tDiskSize int64\n\tMacAddr string\n\tMemory int\n\tPrivateKeyPath string\n\tUUID string\n\tNFSShare bool\n\tDiskNumber int\n\tVirtio9p bool\n\tVirtio9pFolder string\n}\n\nfunc createXhyveHost(config MachineConfig) *xhyveDriver {\n\treturn &xhyveDriver{\n\t\tBaseDriver: &drivers.BaseDriver{\n\t\t\tMachineName: constants.MachineName,\n\t\t\tStorePath: constants.Minipath,\n\t\t},\n\t\tMemory: config.Memory,\n\t\tCPU: config.CPUs,\n\t\tBoot2DockerURL: config.GetISOFileURI(),\n\t\tBootCmd: \"loglevel=3 user=docker console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 base host=boot2docker\",\n\t\tDiskSize: int64(config.DiskSize),\n\t\tVirtio9p: true,\n\t\tVirtio9pFolder: \"\/Users\",\n\t}\n}\nEnsures that we get the same IP between start\/delete\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cluster\n\nimport (\n\t\"github.com\/docker\/machine\/drivers\/vmwarefusion\"\n\t\"github.com\/docker\/machine\/libmachine\/drivers\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/constants\"\n)\n\n\/\/ Ensures that we get assigned the same IP across deletes\/starts\nconst xhyveUUID = \"57FD2012-FA4A-4FF7-AEFF-26E1A1D76847\"\n\nfunc createVMwareFusionHost(config MachineConfig) drivers.Driver {\n\td := vmwarefusion.NewDriver(constants.MachineName, constants.Minipath).(*vmwarefusion.Driver)\n\td.Boot2DockerURL = config.GetISOFileURI()\n\td.Memory = config.Memory\n\td.CPU = config.CPUs\n\n\t\/\/ TODO(philips): push these defaults upstream to fixup this driver\n\td.SSHPort = 22\n\td.ISO = d.ResolveStorePath(\"boot2docker.iso\")\n\treturn d\n}\n\ntype xhyveDriver struct {\n\t*drivers.BaseDriver\n\tBoot2DockerURL string\n\tBootCmd string\n\tCPU int\n\tCaCertPath string\n\tDiskSize int64\n\tMacAddr string\n\tMemory int\n\tPrivateKeyPath string\n\tUUID string\n\tNFSShare bool\n\tDiskNumber int\n\tVirtio9p bool\n\tVirtio9pFolder string\n}\n\nfunc createXhyveHost(config MachineConfig) *xhyveDriver {\n\treturn &xhyveDriver{\n\t\tBaseDriver: &drivers.BaseDriver{\n\t\t\tMachineName: constants.MachineName,\n\t\t\tStorePath: constants.Minipath,\n\t\t},\n\t\tMemory: config.Memory,\n\t\tCPU: config.CPUs,\n\t\tBoot2DockerURL: config.GetISOFileURI(),\n\t\tBootCmd: \"loglevel=3 user=docker console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 base host=boot2docker\",\n\t\tDiskSize: int64(config.DiskSize),\n\t\tVirtio9p: true,\n\t\tVirtio9pFolder: \"\/Users\",\n\t\tUUID: xhyveUUID,\n\t}\n}\n<|endoftext|>"} {"text":"package lfs\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\tv1Aliases = []string{\n\t\t\"http:\/\/git-media.io\/v\/2\", \/\/ alpha\n\t\t\"https:\/\/hawser.github.com\/spec\/v1\", \/\/ pre-release\n\t\t\"https:\/\/git-lfs.github.com\/spec\/v1\", \/\/ public launch\n\t}\n\tlatest = \"https:\/\/git-lfs.github.com\/spec\/v1\"\n\toidType = \"sha256\"\n\toidRE = regexp.MustCompile(`\\A[[:alnum:]]{64}`)\n\tmatcherRE = regexp.MustCompile(\"git-media|hawser|git-lfs\")\n\textRE = regexp.MustCompile(`\\Aext-\\d{1}-\\w+`)\n\tpointerKeys = []string{\"version\", \"oid\", \"size\"}\n)\n\ntype Pointer struct {\n\tVersion string\n\tOid string\n\tSize int64\n\tOidType string\n\tExtensions []*PointerExtension\n}\n\n\/\/ A PointerExtension is parsed from the Git LFS Pointer file.\ntype PointerExtension struct {\n\tName string\n\tPriority int\n\tOid string\n\tOidType string\n}\n\ntype ByPriority []*PointerExtension\n\nfunc (p ByPriority) Len() int { return len(p) }\nfunc (p ByPriority) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\nfunc (p ByPriority) Less(i, j int) bool { return p[i].Priority < p[j].Priority }\n\nfunc NewPointer(oid string, size int64, exts []*PointerExtension) *Pointer {\n\treturn &Pointer{latest, oid, size, oidType, exts}\n}\n\nfunc NewPointerExtension(name string, priority int, oid string) *PointerExtension {\n\treturn &PointerExtension{name, priority, oid, oidType}\n}\n\nfunc (p *Pointer) Smudge(writer io.Writer, workingfile string, download bool, cb CopyCallback) error {\n\treturn PointerSmudge(writer, p, workingfile, download, cb)\n}\n\nfunc (p *Pointer) Encode(writer io.Writer) (int, error) {\n\treturn EncodePointer(writer, p)\n}\n\nfunc (p *Pointer) Encoded() string {\n\tvar buffer bytes.Buffer\n\tif p.Size != 0 {\n\t\tbuffer.WriteString(fmt.Sprintf(\"version %s\\n\", latest))\n\t\tfor _, ext := range p.Extensions {\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"ext-%d-%s %s:%s\\n\", ext.Priority, ext.Name, ext.OidType, ext.Oid))\n\t\t}\n\t\tbuffer.WriteString(fmt.Sprintf(\"oid %s:%s\\n\", p.OidType, p.Oid))\n\t\tbuffer.WriteString(fmt.Sprintf(\"size %d\\n\", p.Size))\n\t}\n\treturn buffer.String()\n}\n\nfunc EncodePointer(writer io.Writer, pointer *Pointer) (int, error) {\n\treturn writer.Write([]byte(pointer.Encoded()))\n}\n\nfunc DecodePointerFromFile(file string) (*Pointer, error) {\n\t\/\/ Check size before reading\n\tstat, err := os.Stat(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif stat.Size() > blobSizeCutoff {\n\t\treturn nil, newNotAPointerError(nil)\n\t}\n\tf, err := os.OpenFile(file, os.O_RDONLY, 0644)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\treturn DecodePointer(f)\n}\nfunc DecodePointer(reader io.Reader) (*Pointer, error) {\n\t_, p, err := DecodeFrom(reader)\n\treturn p, err\n}\n\nfunc DecodeFrom(reader io.Reader) ([]byte, *Pointer, error) {\n\tbuf := make([]byte, blobSizeCutoff)\n\twritten, err := reader.Read(buf)\n\toutput := buf[0:written]\n\n\tif err != nil {\n\t\treturn output, nil, err\n\t}\n\n\tp, err := decodeKV(bytes.TrimSpace(output))\n\treturn output, p, err\n}\n\nfunc verifyVersion(version string) error {\n\tif len(version) == 0 {\n\t\treturn newNotAPointerError(errors.New(\"Missing version\"))\n\t}\n\n\tfor _, v := range v1Aliases {\n\t\tif v == version {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn errors.New(\"Invalid version: \" + version)\n}\n\nfunc decodeKV(data []byte) (*Pointer, error) {\n\tkvps, exts, err := decodeKVData(data)\n\tif err != nil {\n\t\tif IsBadPointerKeyError(err) {\n\t\t\tbadErr := err.(badPointerKeyError)\n\t\t\tif badErr.Expected == \"version\" {\n\t\t\t\treturn nil, newNotAPointerError(err)\n\t\t\t}\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tif err := verifyVersion(kvps[\"version\"]); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalue, ok := kvps[\"oid\"]\n\tif !ok {\n\t\treturn nil, errors.New(\"Invalid Oid\")\n\t}\n\n\toid, err := parseOid(value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalue, ok = kvps[\"size\"]\n\tsize, err := strconv.ParseInt(value, 10, 0)\n\tif err != nil || size < 0 {\n\t\treturn nil, fmt.Errorf(\"Invalid size: %q\", value)\n\t}\n\n\tvar extensions []*PointerExtension\n\tif exts != nil {\n\t\tfor key, value := range exts {\n\t\t\text, err := parsePointerExtension(key, value)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\textensions = append(extensions, ext)\n\t\t}\n\t\tif err = validatePointerExtensions(extensions); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsort.Sort(ByPriority(extensions))\n\t}\n\n\treturn NewPointer(oid, size, extensions), nil\n}\n\nfunc parseOid(value string) (string, error) {\n\tparts := strings.SplitN(value, \":\", 2)\n\tif len(parts) != 2 {\n\t\treturn \"\", errors.New(\"Invalid Oid value: \" + value)\n\t}\n\tif parts[0] != oidType {\n\t\treturn \"\", errors.New(\"Invalid Oid type: \" + parts[0])\n\t}\n\toid := parts[1]\n\tif !oidRE.Match([]byte(oid)) {\n\t\treturn \"\", errors.New(\"Invalid Oid: \" + oid)\n\t}\n\treturn oid, nil\n}\n\nfunc parsePointerExtension(key string, value string) (*PointerExtension, error) {\n\tkeyParts := strings.SplitN(key, \"-\", 3)\n\tif len(keyParts) != 3 || keyParts[0] != \"ext\" {\n\t\treturn nil, errors.New(\"Invalid extension value: \" + value)\n\t}\n\n\tp, err := strconv.Atoi(keyParts[1])\n\tif err != nil || p < 0 {\n\t\treturn nil, errors.New(\"Invalid priority: \" + keyParts[1])\n\t}\n\n\tname := keyParts[2]\n\n\toid, err := parseOid(value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewPointerExtension(name, p, oid), nil\n}\n\nfunc validatePointerExtensions(exts []*PointerExtension) error {\n\tm := make(map[int]struct{})\n\tfor _, ext := range exts {\n\t\tif _, exist := m[ext.Priority]; exist {\n\t\t\treturn fmt.Errorf(\"Duplicate priority found: %d\", ext.Priority)\n\t\t}\n\t\tm[ext.Priority] = struct{}{}\n\t}\n\treturn nil\n}\n\nfunc decodeKVData(data []byte) (kvps map[string]string, exts map[string]string, err error) {\n\tkvps = make(map[string]string)\n\n\tif !matcherRE.Match(data) {\n\t\terr = newNotAPointerError(err)\n\t\treturn\n\t}\n\n\tscanner := bufio.NewScanner(bytes.NewBuffer(data))\n\tline := 0\n\tnumKeys := len(pointerKeys)\n\tfor scanner.Scan() {\n\t\ttext := scanner.Text()\n\t\tif len(text) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tparts := strings.SplitN(text, \" \", 2)\n\t\tif len(parts) < 2 {\n\t\t\terr = fmt.Errorf(\"Error reading line %d: %s\", line, text)\n\t\t\treturn\n\t\t}\n\n\t\tkey := parts[0]\n\t\tvalue := parts[1]\n\n\t\tif numKeys <= line {\n\t\t\terr = fmt.Errorf(\"Extra line: %s\", text)\n\t\t\treturn\n\t\t}\n\n\t\tif expected := pointerKeys[line]; key != expected {\n\t\t\tif !extRE.Match([]byte(key)) {\n\t\t\t\terr = newBadPointerKeyError(expected, key)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif exts == nil {\n\t\t\t\texts = make(map[string]string)\n\t\t\t}\n\t\t\texts[key] = value\n\t\t\tcontinue\n\t\t}\n\n\t\tline += 1\n\t\tkvps[key] = value\n\t}\n\n\terr = scanner.Err()\n\treturn\n}\nReturn empty buffer early on empty stringpackage lfs\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\tv1Aliases = []string{\n\t\t\"http:\/\/git-media.io\/v\/2\", \/\/ alpha\n\t\t\"https:\/\/hawser.github.com\/spec\/v1\", \/\/ pre-release\n\t\t\"https:\/\/git-lfs.github.com\/spec\/v1\", \/\/ public launch\n\t}\n\tlatest = \"https:\/\/git-lfs.github.com\/spec\/v1\"\n\toidType = \"sha256\"\n\toidRE = regexp.MustCompile(`\\A[[:alnum:]]{64}`)\n\tmatcherRE = regexp.MustCompile(\"git-media|hawser|git-lfs\")\n\textRE = regexp.MustCompile(`\\Aext-\\d{1}-\\w+`)\n\tpointerKeys = []string{\"version\", \"oid\", \"size\"}\n)\n\ntype Pointer struct {\n\tVersion string\n\tOid string\n\tSize int64\n\tOidType string\n\tExtensions []*PointerExtension\n}\n\n\/\/ A PointerExtension is parsed from the Git LFS Pointer file.\ntype PointerExtension struct {\n\tName string\n\tPriority int\n\tOid string\n\tOidType string\n}\n\ntype ByPriority []*PointerExtension\n\nfunc (p ByPriority) Len() int { return len(p) }\nfunc (p ByPriority) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\nfunc (p ByPriority) Less(i, j int) bool { return p[i].Priority < p[j].Priority }\n\nfunc NewPointer(oid string, size int64, exts []*PointerExtension) *Pointer {\n\treturn &Pointer{latest, oid, size, oidType, exts}\n}\n\nfunc NewPointerExtension(name string, priority int, oid string) *PointerExtension {\n\treturn &PointerExtension{name, priority, oid, oidType}\n}\n\nfunc (p *Pointer) Smudge(writer io.Writer, workingfile string, download bool, cb CopyCallback) error {\n\treturn PointerSmudge(writer, p, workingfile, download, cb)\n}\n\nfunc (p *Pointer) Encode(writer io.Writer) (int, error) {\n\treturn EncodePointer(writer, p)\n}\n\nfunc (p *Pointer) Encoded() string {\n\tvar buffer bytes.Buffer\n\tif p.Size == 0 {\n\t\treturn buffer.String()\n\t}\n\n\tbuffer.WriteString(fmt.Sprintf(\"version %s\\n\", latest))\n\tfor _, ext := range p.Extensions {\n\t\tbuffer.WriteString(fmt.Sprintf(\"ext-%d-%s %s:%s\\n\", ext.Priority, ext.Name, ext.OidType, ext.Oid))\n\t}\n\tbuffer.WriteString(fmt.Sprintf(\"oid %s:%s\\n\", p.OidType, p.Oid))\n\tbuffer.WriteString(fmt.Sprintf(\"size %d\\n\", p.Size))\n\treturn buffer.String()\n}\n\nfunc EncodePointer(writer io.Writer, pointer *Pointer) (int, error) {\n\treturn writer.Write([]byte(pointer.Encoded()))\n}\n\nfunc DecodePointerFromFile(file string) (*Pointer, error) {\n\t\/\/ Check size before reading\n\tstat, err := os.Stat(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif stat.Size() > blobSizeCutoff {\n\t\treturn nil, newNotAPointerError(nil)\n\t}\n\tf, err := os.OpenFile(file, os.O_RDONLY, 0644)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\treturn DecodePointer(f)\n}\nfunc DecodePointer(reader io.Reader) (*Pointer, error) {\n\t_, p, err := DecodeFrom(reader)\n\treturn p, err\n}\n\nfunc DecodeFrom(reader io.Reader) ([]byte, *Pointer, error) {\n\tbuf := make([]byte, blobSizeCutoff)\n\twritten, err := reader.Read(buf)\n\toutput := buf[0:written]\n\n\tif err != nil {\n\t\treturn output, nil, err\n\t}\n\n\tp, err := decodeKV(bytes.TrimSpace(output))\n\treturn output, p, err\n}\n\nfunc verifyVersion(version string) error {\n\tif len(version) == 0 {\n\t\treturn newNotAPointerError(errors.New(\"Missing version\"))\n\t}\n\n\tfor _, v := range v1Aliases {\n\t\tif v == version {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn errors.New(\"Invalid version: \" + version)\n}\n\nfunc decodeKV(data []byte) (*Pointer, error) {\n\tkvps, exts, err := decodeKVData(data)\n\tif err != nil {\n\t\tif IsBadPointerKeyError(err) {\n\t\t\tbadErr := err.(badPointerKeyError)\n\t\t\tif badErr.Expected == \"version\" {\n\t\t\t\treturn nil, newNotAPointerError(err)\n\t\t\t}\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tif err := verifyVersion(kvps[\"version\"]); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalue, ok := kvps[\"oid\"]\n\tif !ok {\n\t\treturn nil, errors.New(\"Invalid Oid\")\n\t}\n\n\toid, err := parseOid(value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalue, ok = kvps[\"size\"]\n\tsize, err := strconv.ParseInt(value, 10, 0)\n\tif err != nil || size < 0 {\n\t\treturn nil, fmt.Errorf(\"Invalid size: %q\", value)\n\t}\n\n\tvar extensions []*PointerExtension\n\tif exts != nil {\n\t\tfor key, value := range exts {\n\t\t\text, err := parsePointerExtension(key, value)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\textensions = append(extensions, ext)\n\t\t}\n\t\tif err = validatePointerExtensions(extensions); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsort.Sort(ByPriority(extensions))\n\t}\n\n\treturn NewPointer(oid, size, extensions), nil\n}\n\nfunc parseOid(value string) (string, error) {\n\tparts := strings.SplitN(value, \":\", 2)\n\tif len(parts) != 2 {\n\t\treturn \"\", errors.New(\"Invalid Oid value: \" + value)\n\t}\n\tif parts[0] != oidType {\n\t\treturn \"\", errors.New(\"Invalid Oid type: \" + parts[0])\n\t}\n\toid := parts[1]\n\tif !oidRE.Match([]byte(oid)) {\n\t\treturn \"\", errors.New(\"Invalid Oid: \" + oid)\n\t}\n\treturn oid, nil\n}\n\nfunc parsePointerExtension(key string, value string) (*PointerExtension, error) {\n\tkeyParts := strings.SplitN(key, \"-\", 3)\n\tif len(keyParts) != 3 || keyParts[0] != \"ext\" {\n\t\treturn nil, errors.New(\"Invalid extension value: \" + value)\n\t}\n\n\tp, err := strconv.Atoi(keyParts[1])\n\tif err != nil || p < 0 {\n\t\treturn nil, errors.New(\"Invalid priority: \" + keyParts[1])\n\t}\n\n\tname := keyParts[2]\n\n\toid, err := parseOid(value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewPointerExtension(name, p, oid), nil\n}\n\nfunc validatePointerExtensions(exts []*PointerExtension) error {\n\tm := make(map[int]struct{})\n\tfor _, ext := range exts {\n\t\tif _, exist := m[ext.Priority]; exist {\n\t\t\treturn fmt.Errorf(\"Duplicate priority found: %d\", ext.Priority)\n\t\t}\n\t\tm[ext.Priority] = struct{}{}\n\t}\n\treturn nil\n}\n\nfunc decodeKVData(data []byte) (kvps map[string]string, exts map[string]string, err error) {\n\tkvps = make(map[string]string)\n\n\tif !matcherRE.Match(data) {\n\t\terr = newNotAPointerError(err)\n\t\treturn\n\t}\n\n\tscanner := bufio.NewScanner(bytes.NewBuffer(data))\n\tline := 0\n\tnumKeys := len(pointerKeys)\n\tfor scanner.Scan() {\n\t\ttext := scanner.Text()\n\t\tif len(text) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tparts := strings.SplitN(text, \" \", 2)\n\t\tif len(parts) < 2 {\n\t\t\terr = fmt.Errorf(\"Error reading line %d: %s\", line, text)\n\t\t\treturn\n\t\t}\n\n\t\tkey := parts[0]\n\t\tvalue := parts[1]\n\n\t\tif numKeys <= line {\n\t\t\terr = fmt.Errorf(\"Extra line: %s\", text)\n\t\t\treturn\n\t\t}\n\n\t\tif expected := pointerKeys[line]; key != expected {\n\t\t\tif !extRE.Match([]byte(key)) {\n\t\t\t\terr = newBadPointerKeyError(expected, key)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif exts == nil {\n\t\t\t\texts = make(map[string]string)\n\t\t\t}\n\t\t\texts[key] = value\n\t\t\tcontinue\n\t\t}\n\n\t\tline += 1\n\t\tkvps[key] = value\n\t}\n\n\terr = scanner.Err()\n\treturn\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/imdario\/mergo\"\n\t\"github.com\/mitchellh\/go-homedir\"\n\t\"github.com\/zackbloom\/go-ini\"\n\t\"github.com\/zackbloom\/goamz\/aws\"\n\t\"github.com\/zackbloom\/goamz\/cloudfront\"\n\t\"github.com\/zackbloom\/goamz\/iam\"\n\t\"github.com\/zackbloom\/goamz\/route53\"\n\t\"github.com\/zackbloom\/goamz\/s3\"\n\t\"gopkg.in\/yaml.v1\"\n)\n\nconst (\n\tLIMITED = 60\n\tFOREVER = 31556926\n)\n\nvar s3Session *s3.S3\nvar iamSession *iam.IAM\nvar r53Session *route53.Route53\nvar cfSession *cloudfront.CloudFront\n\nfunc getRegion(region string) aws.Region {\n\tregionS, ok := aws.Regions[region]\n\tif !ok {\n\t\tpanic(\"Region not found\")\n\t}\n\treturn regionS\n}\n\nfunc openS3(key, secret, region string) *s3.S3 {\n\tregionS := getRegion(region)\n\n\tauth := aws.Auth{\n\t\tAccessKey: key,\n\t\tSecretKey: secret,\n\t}\n\treturn s3.New(auth, regionS)\n}\n\nfunc openIAM(key, secret, region string) *iam.IAM {\n\tregionS := getRegion(region)\n\n\tauth := aws.Auth{\n\t\tAccessKey: key,\n\t\tSecretKey: secret,\n\t}\n\treturn iam.New(auth, regionS)\n}\n\nfunc openCloudFront(key, secret string) *cloudfront.CloudFront {\n\tauth := aws.Auth{\n\t\tAccessKey: key,\n\t\tSecretKey: secret,\n\t}\n\treturn cloudfront.NewCloudFront(auth)\n}\n\nfunc openRoute53(key, secret string) *route53.Route53 {\n\tauth := aws.Auth{\n\t\tAccessKey: key,\n\t\tSecretKey: secret,\n\t}\n\n\tr53, _ := route53.NewRoute53(auth)\n\treturn r53\n}\n\nfunc panicIf(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\nfunc must(val interface{}, err error) interface{} {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn val\n}\nfunc mustString(val string, err error) string {\n\tpanicIf(err)\n\treturn val\n}\nfunc mustInt(val int, err error) int {\n\tpanicIf(err)\n\treturn val\n}\n\ntype Options struct {\n\tFiles string `yaml:\"files\"`\n\tRoot string `yaml:\"root\"`\n\tDest string `yaml:\"dest\"`\n\tConfigFile string `yaml:\"-\"`\n\tEnv string `yaml:\"-\"`\n\tBucket string `yaml:\"bucket\"`\n\tAWSKey string `yaml:\"key\"`\n\tAWSSecret string `yaml:\"secret\"`\n\tAWSRegion string `yaml:\"region\"`\n\tNoUser bool `yaml:\"-\"`\n}\n\nfunc parseOptions() (o Options, set *flag.FlagSet) {\n\tset = flag.NewFlagSet(os.Args[1], flag.ExitOnError)\n\t\/\/TODO: Set set.Usage\n\n\tset.StringVar(&o.Files, \"files\", \"*\", \"Comma-seperated glob patterns of files to deploy (within root)\")\n\tset.StringVar(&o.Root, \"root\", \".\/\", \"The local directory to deploy\")\n\tset.StringVar(&o.Dest, \"dest\", \".\/\", \"The destination directory to write files to in the S3 bucket\")\n\tset.StringVar(&o.ConfigFile, \"config\", \"\", \"A yaml file to read configuration from\")\n\tset.StringVar(&o.Env, \"env\", \"\", \"The env to read from the config file\")\n\tset.StringVar(&o.Bucket, \"bucket\", \"\", \"The bucket to deploy to\")\n\tset.StringVar(&o.AWSKey, \"key\", \"\", \"The AWS key to use\")\n\tset.StringVar(&o.AWSSecret, \"secret\", \"\", \"The AWS secret of the provided key\")\n\tset.StringVar(&o.AWSRegion, \"region\", \"us-east-1\", \"The AWS region the S3 bucket is in\")\n\tset.BoolVar(&o.NoUser, \"no-user\", false, \"When creating, should we make a user account?\")\n\n\tset.Parse(os.Args[2:])\n\n\treturn\n}\n\ntype ConfigFile map[string]Options\n\nfunc loadConfigFile(o *Options) {\n\tisDefault := false\n\tconfigPath := o.ConfigFile\n\tif o.ConfigFile == \"\" {\n\t\tisDefault = true\n\t\tconfigPath = \".\/deploy.yaml\"\n\t}\n\n\tdata, err := ioutil.ReadFile(configPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) && isDefault {\n\t\t\treturn\n\t\t}\n\n\t\tpanic(err)\n\t}\n\n\tvar file ConfigFile\n\terr = yaml.Unmarshal(data, &file)\n\tpanicIf(err)\n\n\tvar envCfg Options\n\tif o.Env != \"\" {\n\t\tvar ok bool\n\t\tenvCfg, ok = file[o.Env]\n\t\tif !ok {\n\t\t\tpanic(\"Config for specified env not found\")\n\t\t}\n\t}\n\n\tdefCfg, _ := file[\"default\"]\n\n\tpanicIf(mergo.Merge(o, defCfg))\n\tpanicIf(mergo.Merge(o, envCfg))\n}\n\nfunc addAWSConfig(o *Options) {\n\tif o.AWSKey == \"\" && o.AWSSecret == \"\" {\n\t\to.AWSKey, o.AWSSecret = loadAWSConfig()\n\t}\n}\n\ntype AWSConfig struct {\n\tDefault struct {\n\t\tAccessKey string `ini:\"aws_access_key_id\"`\n\t\tSecretKey string `ini:\"aws_secret_access_key\"`\n\t} `ini:\"[default]\"`\n}\n\nfunc loadAWSConfig() (access string, secret string) {\n\tcfg := AWSConfig{}\n\n\tpath, err := homedir.Expand(\"~\/.aws\/config\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tcontent, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tini.Unmarshal(content, &cfg)\n\n\treturn cfg.Default.AccessKey, cfg.Default.SecretKey\n}\n\nfunc copyFile(bucket *s3.Bucket, from string, to string, contentType string, maxAge int) {\n\tcopyOpts := s3.CopyOptions{\n\t\tMetadataDirective: \"REPLACE\",\n\t\tContentType: contentType,\n\t\tOptions: s3.Options{\n\t\t\tCacheControl: fmt.Sprintf(\"public, max-age=%d\", maxAge),\n\t\t\tContentEncoding: \"gzip\",\n\t\t},\n\t}\n\n\t_, err := bucket.PutCopy(to, s3.PublicRead, copyOpts, joinPath(bucket.Name, from))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nvar pathRe = regexp.MustCompile(\"\/{2,}\")\n\nfunc joinPath(parts ...string) string {\n\t\/\/ Like filepath.Join, but always uses '\/'\n\tout := filepath.Join(parts...)\n\n\tif os.PathSeparator != '\/' {\n\t\tout = strings.Replace(out, string(os.PathSeparator), \"\/\", -1)\n\t}\n\n\treturn out\n}\nAlso look in AWS Credentials filepackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/imdario\/mergo\"\n\t\"github.com\/mitchellh\/go-homedir\"\n\t\"github.com\/zackbloom\/go-ini\"\n\t\"github.com\/zackbloom\/goamz\/aws\"\n\t\"github.com\/zackbloom\/goamz\/cloudfront\"\n\t\"github.com\/zackbloom\/goamz\/iam\"\n\t\"github.com\/zackbloom\/goamz\/route53\"\n\t\"github.com\/zackbloom\/goamz\/s3\"\n\t\"gopkg.in\/yaml.v1\"\n)\n\nconst (\n\tLIMITED = 60\n\tFOREVER = 31556926\n)\n\nvar s3Session *s3.S3\nvar iamSession *iam.IAM\nvar r53Session *route53.Route53\nvar cfSession *cloudfront.CloudFront\n\nfunc getRegion(region string) aws.Region {\n\tregionS, ok := aws.Regions[region]\n\tif !ok {\n\t\tpanic(\"Region not found\")\n\t}\n\treturn regionS\n}\n\nfunc openS3(key, secret, region string) *s3.S3 {\n\tregionS := getRegion(region)\n\n\tauth := aws.Auth{\n\t\tAccessKey: key,\n\t\tSecretKey: secret,\n\t}\n\treturn s3.New(auth, regionS)\n}\n\nfunc openIAM(key, secret, region string) *iam.IAM {\n\tregionS := getRegion(region)\n\n\tauth := aws.Auth{\n\t\tAccessKey: key,\n\t\tSecretKey: secret,\n\t}\n\treturn iam.New(auth, regionS)\n}\n\nfunc openCloudFront(key, secret string) *cloudfront.CloudFront {\n\tauth := aws.Auth{\n\t\tAccessKey: key,\n\t\tSecretKey: secret,\n\t}\n\treturn cloudfront.NewCloudFront(auth)\n}\n\nfunc openRoute53(key, secret string) *route53.Route53 {\n\tauth := aws.Auth{\n\t\tAccessKey: key,\n\t\tSecretKey: secret,\n\t}\n\n\tr53, _ := route53.NewRoute53(auth)\n\treturn r53\n}\n\nfunc panicIf(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\nfunc must(val interface{}, err error) interface{} {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn val\n}\nfunc mustString(val string, err error) string {\n\tpanicIf(err)\n\treturn val\n}\nfunc mustInt(val int, err error) int {\n\tpanicIf(err)\n\treturn val\n}\n\ntype Options struct {\n\tFiles string `yaml:\"files\"`\n\tRoot string `yaml:\"root\"`\n\tDest string `yaml:\"dest\"`\n\tConfigFile string `yaml:\"-\"`\n\tEnv string `yaml:\"-\"`\n\tBucket string `yaml:\"bucket\"`\n\tAWSKey string `yaml:\"key\"`\n\tAWSSecret string `yaml:\"secret\"`\n\tAWSRegion string `yaml:\"region\"`\n\tNoUser bool `yaml:\"-\"`\n}\n\nfunc parseOptions() (o Options, set *flag.FlagSet) {\n\tset = flag.NewFlagSet(os.Args[1], flag.ExitOnError)\n\t\/\/TODO: Set set.Usage\n\n\tset.StringVar(&o.Files, \"files\", \"*\", \"Comma-seperated glob patterns of files to deploy (within root)\")\n\tset.StringVar(&o.Root, \"root\", \".\/\", \"The local directory to deploy\")\n\tset.StringVar(&o.Dest, \"dest\", \".\/\", \"The destination directory to write files to in the S3 bucket\")\n\tset.StringVar(&o.ConfigFile, \"config\", \"\", \"A yaml file to read configuration from\")\n\tset.StringVar(&o.Env, \"env\", \"\", \"The env to read from the config file\")\n\tset.StringVar(&o.Bucket, \"bucket\", \"\", \"The bucket to deploy to\")\n\tset.StringVar(&o.AWSKey, \"key\", \"\", \"The AWS key to use\")\n\tset.StringVar(&o.AWSSecret, \"secret\", \"\", \"The AWS secret of the provided key\")\n\tset.StringVar(&o.AWSRegion, \"region\", \"us-east-1\", \"The AWS region the S3 bucket is in\")\n\tset.BoolVar(&o.NoUser, \"no-user\", false, \"When creating, should we make a user account?\")\n\n\tset.Parse(os.Args[2:])\n\n\treturn\n}\n\ntype ConfigFile map[string]Options\n\nfunc loadConfigFile(o *Options) {\n\tisDefault := false\n\tconfigPath := o.ConfigFile\n\tif o.ConfigFile == \"\" {\n\t\tisDefault = true\n\t\tconfigPath = \".\/deploy.yaml\"\n\t}\n\n\tdata, err := ioutil.ReadFile(configPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) && isDefault {\n\t\t\treturn\n\t\t}\n\n\t\tpanic(err)\n\t}\n\n\tvar file ConfigFile\n\terr = yaml.Unmarshal(data, &file)\n\tpanicIf(err)\n\n\tvar envCfg Options\n\tif o.Env != \"\" {\n\t\tvar ok bool\n\t\tenvCfg, ok = file[o.Env]\n\t\tif !ok {\n\t\t\tpanic(\"Config for specified env not found\")\n\t\t}\n\t}\n\n\tdefCfg, _ := file[\"default\"]\n\n\tpanicIf(mergo.Merge(o, defCfg))\n\tpanicIf(mergo.Merge(o, envCfg))\n}\n\nfunc addAWSConfig(o *Options) {\n\tif o.AWSKey == \"\" && o.AWSSecret == \"\" {\n\t\to.AWSKey, o.AWSSecret = loadAWSConfig()\n\t}\n}\n\ntype AWSConfig struct {\n\tDefault struct {\n\t\tAccessKey string `ini:\"aws_access_key_id\"`\n\t\tSecretKey string `ini:\"aws_secret_access_key\"`\n\t} `ini:\"[default]\"`\n}\n\nfunc loadAWSConfig() (access string, secret string) {\n\tcfg := AWSConfig{}\n\n\tfor _, file := range []string{\"~\/.aws\/config\", \"~\/.aws\/credentials\"} {\n\t\tpath, err := homedir.Expand(file)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tcontent, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tini.Unmarshal(content, &cfg)\n\n\t\tif cfg.Default.AccessKey != \"\" {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn cfg.Default.AccessKey, cfg.Default.SecretKey\n}\n\nfunc copyFile(bucket *s3.Bucket, from string, to string, contentType string, maxAge int) {\n\tcopyOpts := s3.CopyOptions{\n\t\tMetadataDirective: \"REPLACE\",\n\t\tContentType: contentType,\n\t\tOptions: s3.Options{\n\t\t\tCacheControl: fmt.Sprintf(\"public, max-age=%d\", maxAge),\n\t\t\tContentEncoding: \"gzip\",\n\t\t},\n\t}\n\n\t_, err := bucket.PutCopy(to, s3.PublicRead, copyOpts, joinPath(bucket.Name, from))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nvar pathRe = regexp.MustCompile(\"\/{2,}\")\n\nfunc joinPath(parts ...string) string {\n\t\/\/ Like filepath.Join, but always uses '\/'\n\tout := filepath.Join(parts...)\n\n\tif os.PathSeparator != '\/' {\n\t\tout = strings.Replace(out, string(os.PathSeparator), \"\/\", -1)\n\t}\n\n\treturn out\n}\n<|endoftext|>"} {"text":"package app\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/ugorji\/go\/codec\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/weaveworks\/scope\/common\/mtime\"\n\t\"github.com\/weaveworks\/scope\/report\"\n)\n\n\/\/ Reporter is something that can produce reports on demand. It's a convenient\n\/\/ interface for parts of the app, and several experimental components.\ntype Reporter interface {\n\tReport(context.Context) (report.Report, error)\n\tWaitOn(context.Context, chan struct{})\n\tUnWait(context.Context, chan struct{})\n}\n\n\/\/ Adder is something that can accept reports. It's a convenient interface for\n\/\/ parts of the app, and several experimental components.\ntype Adder interface {\n\tAdd(context.Context, report.Report) error\n}\n\n\/\/ A Collector is a Reporter and an Adder\ntype Collector interface {\n\tReporter\n\tAdder\n}\n\n\/\/ Collector receives published reports from multiple producers. It yields a\n\/\/ single merged report, representing all collected reports.\ntype collector struct {\n\tmtx sync.Mutex\n\treports []report.Report\n\ttimestamps []time.Time\n\twindow time.Duration\n\tcached *report.Report\n\tmerger Merger\n\twaitableCondition\n}\n\ntype waitableCondition struct {\n\tsync.Mutex\n\twaiters map[chan struct{}]struct{}\n}\n\nfunc (wc *waitableCondition) WaitOn(_ context.Context, waiter chan struct{}) {\n\twc.Lock()\n\twc.waiters[waiter] = struct{}{}\n\twc.Unlock()\n}\n\nfunc (wc *waitableCondition) UnWait(_ context.Context, waiter chan struct{}) {\n\twc.Lock()\n\tdelete(wc.waiters, waiter)\n\twc.Unlock()\n}\n\nfunc (wc *waitableCondition) Broadcast() {\n\twc.Lock()\n\tfor waiter := range wc.waiters {\n\t\t\/\/ Non-block write to channel\n\t\tselect {\n\t\tcase waiter <- struct{}{}:\n\t\tdefault:\n\t\t}\n\t}\n\twc.Unlock()\n}\n\n\/\/ NewCollector returns a collector ready for use.\nfunc NewCollector(window time.Duration) Collector {\n\treturn &collector{\n\t\twindow: window,\n\t\twaitableCondition: waitableCondition{\n\t\t\twaiters: map[chan struct{}]struct{}{},\n\t\t},\n\t\tmerger: NewSmartMerger(),\n\t}\n}\n\n\/\/ Add adds a report to the collector's internal state. It implements Adder.\nfunc (c *collector) Add(_ context.Context, rpt report.Report) error {\n\tc.mtx.Lock()\n\tdefer c.mtx.Unlock()\n\tc.reports = append(c.reports, rpt)\n\tc.timestamps = append(c.timestamps, mtime.Now())\n\n\tc.clean()\n\tc.cached = nil\n\tif rpt.Shortcut {\n\t\tc.Broadcast()\n\t}\n\treturn nil\n}\n\n\/\/ Report returns a merged report over all added reports. It implements\n\/\/ Reporter.\nfunc (c *collector) Report(_ context.Context) (report.Report, error) {\n\tc.mtx.Lock()\n\tdefer c.mtx.Unlock()\n\n\t\/\/ If the oldest report is still within range,\n\t\/\/ and there is a cached report, return that.\n\tif c.cached != nil && len(c.reports) > 0 {\n\t\toldest := mtime.Now().Add(-c.window)\n\t\tif c.timestamps[0].After(oldest) {\n\t\t\treturn *c.cached, nil\n\t\t}\n\t}\n\n\tc.clean()\n\treturn c.merger.Merge(c.reports), nil\n}\n\nfunc (c *collector) clean() {\n\tvar (\n\t\tcleanedReports = make([]report.Report, 0, len(c.reports))\n\t\tcleanedTimestamps = make([]time.Time, 0, len(c.timestamps))\n\t\toldest = mtime.Now().Add(-c.window)\n\t)\n\tfor i, r := range c.reports {\n\t\tif c.timestamps[i].After(oldest) {\n\t\t\tcleanedReports = append(cleanedReports, r)\n\t\t\tcleanedTimestamps = append(cleanedTimestamps, c.timestamps[i])\n\t\t}\n\t}\n\tc.reports = cleanedReports\n\tc.timestamps = cleanedTimestamps\n}\n\n\/\/ StaticCollector always returns the given report.\ntype StaticCollector report.Report\n\n\/\/ Report returns a merged report over all added reports. It implements\n\/\/ Reporter.\nfunc (c StaticCollector) Report(context.Context) (report.Report, error) { return report.Report(c), nil }\n\n\/\/ Add adds a report to the collector's internal state. It implements Adder.\nfunc (c StaticCollector) Add(context.Context, report.Report) error { return nil }\n\n\/\/ WaitOn lets other conponents wait on a new report being received. It\n\/\/ implements Reporter.\nfunc (c StaticCollector) WaitOn(context.Context, chan struct{}) {}\n\n\/\/ UnWait lets other conponents stop waiting on a new report being received. It\n\/\/ implements Reporter.\nfunc (c StaticCollector) UnWait(context.Context, chan struct{}) {}\n\n\/\/ NewFileCollector reads and json parses the given path, returning a collector\n\/\/ which always returns that report.\nfunc NewFileCollector(path string) (Collector, error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tvar (\n\t\trpt report.Report\n\t\thandle codec.Handle\n\t\tgzipped bool\n\t)\n\tfileType := filepath.Ext(path)\n\tif fileType == \".gz\" {\n\t\tgzipped = true\n\t\tfileType = filepath.Ext(strings.TrimSuffix(path, fileType))\n\t}\n\tswitch fileType {\n\tcase \".json\":\n\t\thandle = &codec.JsonHandle{}\n\tcase \".msgpack\":\n\t\thandle = &codec.MsgpackHandle{}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unsupported file extension: %v\", fileType)\n\t}\n\n\tif err := rpt.ReadBinary(f, gzipped, handle); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn StaticCollector(rpt), nil\n}\nReview Feedbackpackage app\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/ugorji\/go\/codec\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/weaveworks\/scope\/common\/mtime\"\n\t\"github.com\/weaveworks\/scope\/report\"\n)\n\n\/\/ Reporter is something that can produce reports on demand. It's a convenient\n\/\/ interface for parts of the app, and several experimental components.\ntype Reporter interface {\n\tReport(context.Context) (report.Report, error)\n\tWaitOn(context.Context, chan struct{})\n\tUnWait(context.Context, chan struct{})\n}\n\n\/\/ Adder is something that can accept reports. It's a convenient interface for\n\/\/ parts of the app, and several experimental components.\ntype Adder interface {\n\tAdd(context.Context, report.Report) error\n}\n\n\/\/ A Collector is a Reporter and an Adder\ntype Collector interface {\n\tReporter\n\tAdder\n}\n\n\/\/ Collector receives published reports from multiple producers. It yields a\n\/\/ single merged report, representing all collected reports.\ntype collector struct {\n\tmtx sync.Mutex\n\treports []report.Report\n\ttimestamps []time.Time\n\twindow time.Duration\n\tcached *report.Report\n\tmerger Merger\n\twaitableCondition\n}\n\ntype waitableCondition struct {\n\tsync.Mutex\n\twaiters map[chan struct{}]struct{}\n}\n\nfunc (wc *waitableCondition) WaitOn(_ context.Context, waiter chan struct{}) {\n\twc.Lock()\n\twc.waiters[waiter] = struct{}{}\n\twc.Unlock()\n}\n\nfunc (wc *waitableCondition) UnWait(_ context.Context, waiter chan struct{}) {\n\twc.Lock()\n\tdelete(wc.waiters, waiter)\n\twc.Unlock()\n}\n\nfunc (wc *waitableCondition) Broadcast() {\n\twc.Lock()\n\tfor waiter := range wc.waiters {\n\t\t\/\/ Non-block write to channel\n\t\tselect {\n\t\tcase waiter <- struct{}{}:\n\t\tdefault:\n\t\t}\n\t}\n\twc.Unlock()\n}\n\n\/\/ NewCollector returns a collector ready for use.\nfunc NewCollector(window time.Duration) Collector {\n\treturn &collector{\n\t\twindow: window,\n\t\twaitableCondition: waitableCondition{\n\t\t\twaiters: map[chan struct{}]struct{}{},\n\t\t},\n\t\tmerger: NewSmartMerger(),\n\t}\n}\n\n\/\/ Add adds a report to the collector's internal state. It implements Adder.\nfunc (c *collector) Add(_ context.Context, rpt report.Report) error {\n\tc.mtx.Lock()\n\tdefer c.mtx.Unlock()\n\tc.reports = append(c.reports, rpt)\n\tc.timestamps = append(c.timestamps, mtime.Now())\n\n\tc.clean()\n\tc.cached = nil\n\tif rpt.Shortcut {\n\t\tc.Broadcast()\n\t}\n\treturn nil\n}\n\n\/\/ Report returns a merged report over all added reports. It implements\n\/\/ Reporter.\nfunc (c *collector) Report(_ context.Context) (report.Report, error) {\n\tc.mtx.Lock()\n\tdefer c.mtx.Unlock()\n\n\t\/\/ If the oldest report is still within range,\n\t\/\/ and there is a cached report, return that.\n\tif c.cached != nil && len(c.reports) > 0 {\n\t\toldest := mtime.Now().Add(-c.window)\n\t\tif c.timestamps[0].After(oldest) {\n\t\t\treturn *c.cached, nil\n\t\t}\n\t}\n\n\tc.clean()\n\treturn c.merger.Merge(c.reports), nil\n}\n\nfunc (c *collector) clean() {\n\tvar (\n\t\tcleanedReports = make([]report.Report, 0, len(c.reports))\n\t\tcleanedTimestamps = make([]time.Time, 0, len(c.timestamps))\n\t\toldest = mtime.Now().Add(-c.window)\n\t)\n\tfor i, r := range c.reports {\n\t\tif c.timestamps[i].After(oldest) {\n\t\t\tcleanedReports = append(cleanedReports, r)\n\t\t\tcleanedTimestamps = append(cleanedTimestamps, c.timestamps[i])\n\t\t}\n\t}\n\tc.reports = cleanedReports\n\tc.timestamps = cleanedTimestamps\n}\n\n\/\/ StaticCollector always returns the given report.\ntype StaticCollector report.Report\n\n\/\/ Report returns a merged report over all added reports. It implements\n\/\/ Reporter.\nfunc (c StaticCollector) Report(context.Context) (report.Report, error) { return report.Report(c), nil }\n\n\/\/ Add adds a report to the collector's internal state. It implements Adder.\nfunc (c StaticCollector) Add(context.Context, report.Report) error { return nil }\n\n\/\/ WaitOn lets other components wait on a new report being received. It\n\/\/ implements Reporter.\nfunc (c StaticCollector) WaitOn(context.Context, chan struct{}) {}\n\n\/\/ UnWait lets other components stop waiting on a new report being received. It\n\/\/ implements Reporter.\nfunc (c StaticCollector) UnWait(context.Context, chan struct{}) {}\n\n\/\/ NewFileCollector reads and parses the given path, returning a collector\n\/\/ which always returns that report.\nfunc NewFileCollector(path string) (Collector, error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tvar (\n\t\trpt report.Report\n\t\thandle codec.Handle\n\t\tgzipped bool\n\t)\n\tfileType := filepath.Ext(path)\n\tif fileType == \".gz\" {\n\t\tgzipped = true\n\t\tfileType = filepath.Ext(strings.TrimSuffix(path, fileType))\n\t}\n\tswitch fileType {\n\tcase \".json\":\n\t\thandle = &codec.JsonHandle{}\n\tcase \".msgpack\":\n\t\thandle = &codec.MsgpackHandle{}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unsupported file extension: %v\", fileType)\n\t}\n\n\tif err := rpt.ReadBinary(f, gzipped, handle); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn StaticCollector(rpt), nil\n}\n<|endoftext|>"} {"text":"\/**\n * Copyright 2017 Comcast Cable Communications Management, LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n *\/\npackage main\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"github.com\/Comcast\/webpa-common\/concurrent\"\n\t\"github.com\/Comcast\/webpa-common\/logging\"\n\t\"github.com\/Comcast\/webpa-common\/secure\"\n\t\"github.com\/Comcast\/webpa-common\/secure\/handler\"\n\t\"github.com\/Comcast\/webpa-common\/secure\/key\"\n\t\"github.com\/Comcast\/webpa-common\/server\"\n\t\"github.com\/Comcast\/webpa-common\/webhook\"\n\t\"github.com\/SermoDigital\/jose\/jwt\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/justinas\/alice\"\n\t\"github.com\/spf13\/pflag\"\n\t\"github.com\/spf13\/viper\"\n)\n\nconst (\n\tapplicationName = \"caduceus\"\n\tDEFAULT_KEY_ID = \"current\"\n)\n\n\/\/ getValidator returns validator for JWT tokens\nfunc getValidator(v *viper.Viper) (validator secure.Validator, err error) {\n\tdefault_validators := make(secure.Validators, 0, 0)\n\tvar jwtVals []JWTValidator\n\n\tv.UnmarshalKey(\"jwtValidators\", &jwtVals)\n\n\t\/\/ make sure there is at least one jwtValidator supplied\n\tif len(jwtVals) < 1 {\n\t\tvalidator = default_validators\n\t\treturn\n\t}\n\n\t\/\/ if a JWTKeys section was supplied, configure a JWS validator\n\t\/\/ and append it to the chain of validators\n\tvalidators := make(secure.Validators, 0, len(jwtVals))\n\n\tfor _, validatorDescriptor := range jwtVals {\n\t\tvar keyResolver key.Resolver\n\t\tkeyResolver, err = validatorDescriptor.Keys.NewResolver()\n\t\tif err != nil {\n\t\t\tvalidator = validators\n\t\t\treturn\n\t\t}\n\n\t\tvalidators = append(\n\t\t\tvalidators,\n\t\t\tsecure.JWSValidator{\n\t\t\t\tDefaultKeyId: DEFAULT_KEY_ID,\n\t\t\t\tResolver: keyResolver,\n\t\t\t\tJWTValidators: []*jwt.Validator{validatorDescriptor.Custom.New()},\n\t\t\t},\n\t\t)\n\t}\n\n\t\/\/ TODO: This should really be part of the unmarshalled validators somehow\n\tbasicAuth := v.GetStringSlice(\"authHeader\")\n\tfor _, authValue := range basicAuth {\n\t\tvalidators = append(\n\t\t\tvalidators,\n\t\t\tsecure.ExactMatchValidator(authValue),\n\t\t)\n\t}\n\n\tvalidator = validators\n\n\treturn\n}\n\n\/\/ caduceus is the driver function for Caduceus. It performs everything main() would do,\n\/\/ except for obtaining the command-line arguments (which are passed to it).\n\nfunc caduceus(arguments []string) int {\n\tbeginCaduceus := time.Now()\n\n\tvar (\n\t\tf = pflag.NewFlagSet(applicationName, pflag.ContinueOnError)\n\t\tv = viper.New()\n\n\t\tlogger, metricsRegistry, webPA, err = server.Initialize(applicationName, arguments, f, v)\n\t)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to initialize Viper environment: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tvar (\n\t\tinfoLog = logging.Info(logger)\n\t\terrorLog = logging.Error(logger)\n\t\tdebugLog = logging.Debug(logger)\n\t)\n\n\tinfoLog.Log(\"configurationFile\", v.ConfigFileUsed())\n\n\tcaduceusConfig := new(CaduceusConfig)\n\terr = v.Unmarshal(caduceusConfig)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to unmarshal configuration data into struct: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tworkerPool := WorkerPoolFactory{\n\t\tNumWorkers: caduceusConfig.NumWorkerThreads,\n\t\tQueueSize: caduceusConfig.JobQueueSize,\n\t}.New()\n\n\tmainCaduceusProfilerFactory := ServerProfilerFactory{\n\t\tFrequency: caduceusConfig.ProfilerFrequency,\n\t\tDuration: caduceusConfig.ProfilerDuration,\n\t\tQueueSize: caduceusConfig.ProfilerQueueSize,\n\t\tLogger: logger,\n\t}\n\n\t\/\/ here we create a profiler specifically for our main server handler\n\tcaduceusHandlerProfiler, err := mainCaduceusProfilerFactory.New(\"main\")\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to profiler for main caduceus handler: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tchildCaduceusProfilerFactory := mainCaduceusProfilerFactory\n\tchildCaduceusProfilerFactory.Parent = caduceusHandlerProfiler\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\tMaxIdleConnsPerHost: caduceusConfig.SenderNumWorkersPerSender,\n\t\tResponseHeaderTimeout: 10 * time.Second, \/\/ TODO Make this configurable\n\t}\n\n\ttimeout := time.Duration(caduceusConfig.SenderClientTimeout) * time.Second\n\n\t\/\/ declare a new sender wrapper and pass it a profiler factory so that it can create\n\t\/\/ unique profilers on a per outboundSender basis\n\tcaduceusSenderWrapper, err := SenderWrapperFactory{\n\t\tNumWorkersPerSender: caduceusConfig.SenderNumWorkersPerSender,\n\t\tQueueSizePerSender: caduceusConfig.SenderQueueSizePerSender,\n\t\tCutOffPeriod: time.Duration(caduceusConfig.SenderCutOffPeriod) * time.Second,\n\t\tLinger: time.Duration(caduceusConfig.SenderLinger) * time.Second,\n\t\tProfilerFactory: childCaduceusProfilerFactory,\n\t\tLogger: logger,\n\t\tClient: &http.Client{Transport: tr, Timeout: timeout},\n\t}.New()\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to initialize new caduceus sender wrapper: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tserverWrapper := &ServerHandler{\n\t\tLogger: logger,\n\t\tcaduceusHandler: &CaduceusHandler{\n\t\t\thandlerProfiler: caduceusHandlerProfiler,\n\t\t\tsenderWrapper: caduceusSenderWrapper,\n\t\t\tLogger: logger,\n\t\t},\n\t\tdoJob: workerPool.Send,\n\t}\n\n\tprofileWrapper := &ProfileHandler{\n\t\tprofilerData: caduceusHandlerProfiler,\n\t\tLogger: logger,\n\t}\n\n\tvalidator, err := getValidator(v)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Validator error: %v\\n\", err)\n\t\treturn 1\n\t}\n\n\tauthHandler := handler.AuthorizationHandler{\n\t\tHeaderName: \"Authorization\",\n\t\tForbiddenStatusCode: 403,\n\t\tValidator: validator,\n\t\tLogger: logger,\n\t}\n\n\tcaduceusHandler := alice.New(authHandler.Decorate, TrackEmptyRequestBody(metricsRegistry))\n\n\trouter := mux.NewRouter()\n\n\trouter = configServerRouter(router, caduceusHandler, serverWrapper)\n\n\trouter.Handle(\"\/api\/v3\/profile\", caduceusHandler.Then(profileWrapper))\n\n\twebhookFactory, err := webhook.NewFactory(v)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error creating new webhook factory: %s\\n\", err)\n\t\treturn 1\n\t}\n\twebhookRegistry, webhookHandler := webhookFactory.NewRegistryAndHandler()\n\twebhookFactory.SetExternalUpdate(caduceusSenderWrapper.Update)\n\n\t\/\/ register webhook end points for api\n\trouter.Handle(\"\/hook\", caduceusHandler.ThenFunc(webhookRegistry.UpdateRegistry))\n\trouter.Handle(\"\/hooks\", caduceusHandler.ThenFunc(webhookRegistry.GetRegistry))\n\n\tselfURL := &url.URL{\n\t\tScheme: \"https\",\n\t\tHost: v.GetString(\"fqdn\") + v.GetString(\"primary.address\"),\n\t}\n\n\twebhookFactory.Initialize(router, selfURL, webhookHandler, logger, nil)\n\n\tcaduceusHealth := &CaduceusHealth{}\n\tvar runnable concurrent.Runnable\n\n\tcaduceusHealth.Monitor, runnable = webPA.Prepare(logger, nil, metricsRegistry, router)\n\tserverWrapper.caduceusHealth = caduceusHealth\n\n\twaitGroup, shutdown, err := concurrent.Execute(runnable)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to start device manager: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tvar messageKey = logging.MessageKey()\n\n\tdebugLog.Log(messageKey, \"Calling webhookFactory.PrepareAndStart\")\n\tbeginPrepStart := time.Now()\n\twebhookFactory.PrepareAndStart()\n\tdebugLog.Log(messageKey, \"WebhookFactory.PrepareAndStart done.\", \"elapsedTime\", time.Since(beginPrepStart))\n\n\t\/\/ Attempt to obtain the current listener list from current system without having to wait for listener reregistration.\n\tdebugLog.Log(messageKey, \"Attempting to obtain current listener list from source\", \"source\",\n\t\tv.GetString(\"start.apiPath\"))\n\tbeginObtainList := time.Now()\n\tstartChan := make(chan webhook.Result, 1)\n\twebhookFactory.Start.GetCurrentSystemsHooks(startChan)\n\tvar webhookStartResults webhook.Result = <-startChan\n\tif webhookStartResults.Error != nil {\n\t\terrorLog.Log(logging.ErrorKey(), webhookStartResults.Error)\n\t} else {\n\t\t\/\/ todo: add message\n\t\twebhookFactory.SetList(webhook.NewList(webhookStartResults.Hooks))\n\t\tcaduceusSenderWrapper.Update(webhookStartResults.Hooks)\n\t}\n\tdebugLog.Log(messageKey, \"Current listener retrieval.\", \"elapsedTime\", time.Since(beginObtainList))\n\n\tinfoLog.Log(messageKey, \"Caduceus is up and running!\", \"elapsedTime\", time.Since(beginCaduceus))\n\n\tvar (\n\t\tsignals = make(chan os.Signal, 1)\n\t)\n\n\tsignal.Notify(signals)\n\t<-signals\n\tclose(shutdown)\n\twaitGroup.Wait()\n\n\t\/\/ shutdown the sender wrapper gently so that all queued messages get serviced\n\tcaduceusSenderWrapper.Shutdown(true)\n\n\treturn 0\n}\n\nfunc configServerRouter(router *mux.Router, caduceusHandler alice.Chain, serverWrapper *ServerHandler) *mux.Router {\n\tvar singleContentType = func(r *http.Request, _ *mux.RouteMatch) bool {\n\t\treturn len(r.Header[\"Content-Type\"]) == 1 \/\/require single specification for Content-Type Header\n\t}\n\n\trouter.Handle(\"\/api\/v3\/notify\", caduceusHandler.Then(serverWrapper)).Methods(\"POST\").\n\t\tHeadersRegexp(\"Content-Type\", \"application\/(json|msgpack)\").MatcherFunc(singleContentType)\n\n\t\/\/ Support the old endpoint too.\n\trouter.Handle(\"\/api\/v2\/notify\/{deviceid}\/event\/{eventtype:.*}\", caduceusHandler.Then(serverWrapper)).\n\t\tMethods(\"POST\").HeadersRegexp(\"Content-Type\", \"application\/(json|msgpack)\").\n\t\tMatcherFunc(singleContentType)\n\n\treturn router\n}\n\nfunc main() {\n\tos.Exit(caduceus(os.Args))\n}\nForgot this: add the module metrics\/**\n * Copyright 2017 Comcast Cable Communications Management, LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n *\/\npackage main\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"net\/http\"\n\t_ \"net\/http\/pprof\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"github.com\/Comcast\/webpa-common\/concurrent\"\n\t\"github.com\/Comcast\/webpa-common\/logging\"\n\t\"github.com\/Comcast\/webpa-common\/secure\"\n\t\"github.com\/Comcast\/webpa-common\/secure\/handler\"\n\t\"github.com\/Comcast\/webpa-common\/secure\/key\"\n\t\"github.com\/Comcast\/webpa-common\/server\"\n\t\"github.com\/Comcast\/webpa-common\/webhook\"\n\t\"github.com\/SermoDigital\/jose\/jwt\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/justinas\/alice\"\n\t\"github.com\/spf13\/pflag\"\n\t\"github.com\/spf13\/viper\"\n)\n\nconst (\n\tapplicationName = \"caduceus\"\n\tDEFAULT_KEY_ID = \"current\"\n)\n\n\/\/ getValidator returns validator for JWT tokens\nfunc getValidator(v *viper.Viper) (validator secure.Validator, err error) {\n\tdefault_validators := make(secure.Validators, 0, 0)\n\tvar jwtVals []JWTValidator\n\n\tv.UnmarshalKey(\"jwtValidators\", &jwtVals)\n\n\t\/\/ make sure there is at least one jwtValidator supplied\n\tif len(jwtVals) < 1 {\n\t\tvalidator = default_validators\n\t\treturn\n\t}\n\n\t\/\/ if a JWTKeys section was supplied, configure a JWS validator\n\t\/\/ and append it to the chain of validators\n\tvalidators := make(secure.Validators, 0, len(jwtVals))\n\n\tfor _, validatorDescriptor := range jwtVals {\n\t\tvar keyResolver key.Resolver\n\t\tkeyResolver, err = validatorDescriptor.Keys.NewResolver()\n\t\tif err != nil {\n\t\t\tvalidator = validators\n\t\t\treturn\n\t\t}\n\n\t\tvalidators = append(\n\t\t\tvalidators,\n\t\t\tsecure.JWSValidator{\n\t\t\t\tDefaultKeyId: DEFAULT_KEY_ID,\n\t\t\t\tResolver: keyResolver,\n\t\t\t\tJWTValidators: []*jwt.Validator{validatorDescriptor.Custom.New()},\n\t\t\t},\n\t\t)\n\t}\n\n\t\/\/ TODO: This should really be part of the unmarshalled validators somehow\n\tbasicAuth := v.GetStringSlice(\"authHeader\")\n\tfor _, authValue := range basicAuth {\n\t\tvalidators = append(\n\t\t\tvalidators,\n\t\t\tsecure.ExactMatchValidator(authValue),\n\t\t)\n\t}\n\n\tvalidator = validators\n\n\treturn\n}\n\n\/\/ caduceus is the driver function for Caduceus. It performs everything main() would do,\n\/\/ except for obtaining the command-line arguments (which are passed to it).\n\nfunc caduceus(arguments []string) int {\n\tbeginCaduceus := time.Now()\n\n\tvar (\n\t\tf = pflag.NewFlagSet(applicationName, pflag.ContinueOnError)\n\t\tv = viper.New()\n\n\t\tlogger, metricsRegistry, webPA, err = server.Initialize(applicationName, arguments, f, v, Metrics)\n\t)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to initialize Viper environment: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tvar (\n\t\tinfoLog = logging.Info(logger)\n\t\terrorLog = logging.Error(logger)\n\t\tdebugLog = logging.Debug(logger)\n\t)\n\n\tinfoLog.Log(\"configurationFile\", v.ConfigFileUsed())\n\n\tcaduceusConfig := new(CaduceusConfig)\n\terr = v.Unmarshal(caduceusConfig)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to unmarshal configuration data into struct: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tworkerPool := WorkerPoolFactory{\n\t\tNumWorkers: caduceusConfig.NumWorkerThreads,\n\t\tQueueSize: caduceusConfig.JobQueueSize,\n\t}.New()\n\n\tmainCaduceusProfilerFactory := ServerProfilerFactory{\n\t\tFrequency: caduceusConfig.ProfilerFrequency,\n\t\tDuration: caduceusConfig.ProfilerDuration,\n\t\tQueueSize: caduceusConfig.ProfilerQueueSize,\n\t\tLogger: logger,\n\t}\n\n\t\/\/ here we create a profiler specifically for our main server handler\n\tcaduceusHandlerProfiler, err := mainCaduceusProfilerFactory.New(\"main\")\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to profiler for main caduceus handler: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tchildCaduceusProfilerFactory := mainCaduceusProfilerFactory\n\tchildCaduceusProfilerFactory.Parent = caduceusHandlerProfiler\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\tMaxIdleConnsPerHost: caduceusConfig.SenderNumWorkersPerSender,\n\t\tResponseHeaderTimeout: 10 * time.Second, \/\/ TODO Make this configurable\n\t}\n\n\ttimeout := time.Duration(caduceusConfig.SenderClientTimeout) * time.Second\n\n\t\/\/ declare a new sender wrapper and pass it a profiler factory so that it can create\n\t\/\/ unique profilers on a per outboundSender basis\n\tcaduceusSenderWrapper, err := SenderWrapperFactory{\n\t\tNumWorkersPerSender: caduceusConfig.SenderNumWorkersPerSender,\n\t\tQueueSizePerSender: caduceusConfig.SenderQueueSizePerSender,\n\t\tCutOffPeriod: time.Duration(caduceusConfig.SenderCutOffPeriod) * time.Second,\n\t\tLinger: time.Duration(caduceusConfig.SenderLinger) * time.Second,\n\t\tProfilerFactory: childCaduceusProfilerFactory,\n\t\tLogger: logger,\n\t\tClient: &http.Client{Transport: tr, Timeout: timeout},\n\t}.New()\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to initialize new caduceus sender wrapper: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tserverWrapper := &ServerHandler{\n\t\tLogger: logger,\n\t\tcaduceusHandler: &CaduceusHandler{\n\t\t\thandlerProfiler: caduceusHandlerProfiler,\n\t\t\tsenderWrapper: caduceusSenderWrapper,\n\t\t\tLogger: logger,\n\t\t},\n\t\tdoJob: workerPool.Send,\n\t}\n\n\tprofileWrapper := &ProfileHandler{\n\t\tprofilerData: caduceusHandlerProfiler,\n\t\tLogger: logger,\n\t}\n\n\tvalidator, err := getValidator(v)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Validator error: %v\\n\", err)\n\t\treturn 1\n\t}\n\n\tauthHandler := handler.AuthorizationHandler{\n\t\tHeaderName: \"Authorization\",\n\t\tForbiddenStatusCode: 403,\n\t\tValidator: validator,\n\t\tLogger: logger,\n\t}\n\n\tcaduceusHandler := alice.New(authHandler.Decorate, TrackEmptyRequestBody(metricsRegistry))\n\n\trouter := mux.NewRouter()\n\n\trouter = configServerRouter(router, caduceusHandler, serverWrapper)\n\n\trouter.Handle(\"\/api\/v3\/profile\", caduceusHandler.Then(profileWrapper))\n\n\twebhookFactory, err := webhook.NewFactory(v)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error creating new webhook factory: %s\\n\", err)\n\t\treturn 1\n\t}\n\twebhookRegistry, webhookHandler := webhookFactory.NewRegistryAndHandler()\n\twebhookFactory.SetExternalUpdate(caduceusSenderWrapper.Update)\n\n\t\/\/ register webhook end points for api\n\trouter.Handle(\"\/hook\", caduceusHandler.ThenFunc(webhookRegistry.UpdateRegistry))\n\trouter.Handle(\"\/hooks\", caduceusHandler.ThenFunc(webhookRegistry.GetRegistry))\n\n\tselfURL := &url.URL{\n\t\tScheme: \"https\",\n\t\tHost: v.GetString(\"fqdn\") + v.GetString(\"primary.address\"),\n\t}\n\n\twebhookFactory.Initialize(router, selfURL, webhookHandler, logger, nil)\n\n\tcaduceusHealth := &CaduceusHealth{}\n\tvar runnable concurrent.Runnable\n\n\tcaduceusHealth.Monitor, runnable = webPA.Prepare(logger, nil, metricsRegistry, router)\n\tserverWrapper.caduceusHealth = caduceusHealth\n\n\twaitGroup, shutdown, err := concurrent.Execute(runnable)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to start device manager: %s\\n\", err)\n\t\treturn 1\n\t}\n\n\tvar messageKey = logging.MessageKey()\n\n\tdebugLog.Log(messageKey, \"Calling webhookFactory.PrepareAndStart\")\n\tbeginPrepStart := time.Now()\n\twebhookFactory.PrepareAndStart()\n\tdebugLog.Log(messageKey, \"WebhookFactory.PrepareAndStart done.\", \"elapsedTime\", time.Since(beginPrepStart))\n\n\t\/\/ Attempt to obtain the current listener list from current system without having to wait for listener reregistration.\n\tdebugLog.Log(messageKey, \"Attempting to obtain current listener list from source\", \"source\",\n\t\tv.GetString(\"start.apiPath\"))\n\tbeginObtainList := time.Now()\n\tstartChan := make(chan webhook.Result, 1)\n\twebhookFactory.Start.GetCurrentSystemsHooks(startChan)\n\tvar webhookStartResults webhook.Result = <-startChan\n\tif webhookStartResults.Error != nil {\n\t\terrorLog.Log(logging.ErrorKey(), webhookStartResults.Error)\n\t} else {\n\t\t\/\/ todo: add message\n\t\twebhookFactory.SetList(webhook.NewList(webhookStartResults.Hooks))\n\t\tcaduceusSenderWrapper.Update(webhookStartResults.Hooks)\n\t}\n\tdebugLog.Log(messageKey, \"Current listener retrieval.\", \"elapsedTime\", time.Since(beginObtainList))\n\n\tinfoLog.Log(messageKey, \"Caduceus is up and running!\", \"elapsedTime\", time.Since(beginCaduceus))\n\n\tvar (\n\t\tsignals = make(chan os.Signal, 1)\n\t)\n\n\tsignal.Notify(signals)\n\t<-signals\n\tclose(shutdown)\n\twaitGroup.Wait()\n\n\t\/\/ shutdown the sender wrapper gently so that all queued messages get serviced\n\tcaduceusSenderWrapper.Shutdown(true)\n\n\treturn 0\n}\n\nfunc configServerRouter(router *mux.Router, caduceusHandler alice.Chain, serverWrapper *ServerHandler) *mux.Router {\n\tvar singleContentType = func(r *http.Request, _ *mux.RouteMatch) bool {\n\t\treturn len(r.Header[\"Content-Type\"]) == 1 \/\/require single specification for Content-Type Header\n\t}\n\n\trouter.Handle(\"\/api\/v3\/notify\", caduceusHandler.Then(serverWrapper)).Methods(\"POST\").\n\t\tHeadersRegexp(\"Content-Type\", \"application\/(json|msgpack)\").MatcherFunc(singleContentType)\n\n\t\/\/ Support the old endpoint too.\n\trouter.Handle(\"\/api\/v2\/notify\/{deviceid}\/event\/{eventtype:.*}\", caduceusHandler.Then(serverWrapper)).\n\t\tMethods(\"POST\").HeadersRegexp(\"Content-Type\", \"application\/(json|msgpack)\").\n\t\tMatcherFunc(singleContentType)\n\n\treturn router\n}\n\nfunc main() {\n\tos.Exit(caduceus(os.Args))\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"testing\"\n)\n\n\/\/ Copyright (c) 2015 Uber Technologies, Inc.\n\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\n\/\/ This test ensures that the code generator generates valid code that can be built\n\/\/ in combination with Thrift's autogenerated code.\n\nfunc TestAllThrift(t *testing.T) {\n\tfiles, err := ioutil.ReadDir(\"test_files\")\n\tif err != nil {\n\t\tt.Fatalf(\"Cannot read test_files directory: %v\", err)\n\t}\n\n\tfor _, f := range files {\n\t\tfname := f.Name()\n\t\tif filepath.Ext(fname) != \".thrift\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := runTest(t, filepath.Join(\"test_files\", f.Name())); err != nil {\n\t\t\tt.Errorf(\"Thrift file %v failed: %v\", f.Name(), err)\n\t\t}\n\t}\n}\n\nfunc copyFile(src, dst string) error {\n\tf, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\twriteF, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer writeF.Close()\n\n\t_, err = io.Copy(writeF, f)\n\treturn err\n}\n\nfunc setupDirectory(thriftFile string) (string, string, error) {\n\t\/\/ Create a temporary directory\n\ttempDir, err := ioutil.TempDir(\"\", \"thrift-gen\")\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t\/\/ Copy the .thrift file to the directory\n\toutFile := filepath.Join(tempDir, \"test.thrift\")\n\treturn tempDir, outFile, copyFile(thriftFile, outFile)\n}\n\nfunc runTest(t *testing.T, thriftFile string) error {\n\ttempDir, thriftFile, err := setupDirectory(thriftFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Run generate.sh for the given directory\n\tt.Logf(\"runTest in %v\", tempDir)\n\tif err := processFile(true \/* generateThrift *\/, thriftFile, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"processFile(%s) failed: %v\", thriftFile, err)\n\t}\n\n\t\/\/ If the generate is successful, run go build in the directory.\n\tcmd := exec.Command(\"go\", \"build\", \".\")\n\tcmd.Dir = filepath.Join(tempDir, \"gen-go\", \"test\")\n\tif output, err := cmd.CombinedOutput(); err != nil {\n\t\treturn fmt.Errorf(\"Build failed. Output = \\n%v\\n\", string(output))\n\t}\n\n\t\/\/ Only delete the temp directory on success.\n\tos.RemoveAll(tempDir)\n\treturn nil\n}\nminor comment updatespackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"testing\"\n)\n\n\/\/ Copyright (c) 2015 Uber Technologies, Inc.\n\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\n\/\/ This test ensures that the code generator generates valid code that can be built\n\/\/ in combination with Thrift's autogenerated code.\n\nfunc TestAllThrift(t *testing.T) {\n\tfiles, err := ioutil.ReadDir(\"test_files\")\n\tif err != nil {\n\t\tt.Fatalf(\"Cannot read test_files directory: %v\", err)\n\t}\n\n\tfor _, f := range files {\n\t\tfname := f.Name()\n\t\tif filepath.Ext(fname) != \".thrift\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := runTest(t, filepath.Join(\"test_files\", f.Name())); err != nil {\n\t\t\tt.Errorf(\"Thrift file %v failed: %v\", f.Name(), err)\n\t\t}\n\t}\n}\n\nfunc copyFile(src, dst string) error {\n\tf, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\twriteF, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer writeF.Close()\n\n\t_, err = io.Copy(writeF, f)\n\treturn err\n}\n\n\/\/ setupDirectory creates a temporary directory and copies the Thrift file into that directory.\nfunc setupDirectory(thriftFile string) (string, string, error) {\n\ttempDir, err := ioutil.TempDir(\"\", \"thrift-gen\")\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\toutFile := filepath.Join(tempDir, \"test.thrift\")\n\treturn tempDir, outFile, copyFile(thriftFile, outFile)\n}\n\nfunc runTest(t *testing.T, thriftFile string) error {\n\ttempDir, thriftFile, err := setupDirectory(thriftFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Generate code from the Thrift file.\n\tt.Logf(\"runTest in %v\", tempDir)\n\tif err := processFile(true \/* generateThrift *\/, thriftFile, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"processFile(%s) failed: %v\", thriftFile, err)\n\t}\n\n\t\/\/ Run go build to ensure that the generated code builds.\n\tcmd := exec.Command(\"go\", \"build\", \".\")\n\tcmd.Dir = filepath.Join(tempDir, \"gen-go\", \"test\")\n\tif output, err := cmd.CombinedOutput(); err != nil {\n\t\treturn fmt.Errorf(\"Build failed. Output = \\n%v\\n\", string(output))\n\t}\n\n\t\/\/ Only delete the temp directory on success.\n\tos.RemoveAll(tempDir)\n\treturn nil\n}\n<|endoftext|>"} {"text":"package path_matcher\n\nimport (\n\t\"strings\"\n)\n\nfunc NewMultiPathMatcher(PathMatchers ...PathMatcher) PathMatcher {\n\tif len(PathMatchers) == 0 {\n\t\tpanic(\"the multi path matcher cannot be initialized without any matcher\")\n\t}\n\n\treturn &MultiPathMatcher{PathMatchers: PathMatchers}\n}\n\ntype MultiPathMatcher struct {\n\tPathMatchers []PathMatcher\n}\n\nfunc (f *MultiPathMatcher) IsDirOrSubmodulePathMatched(path string) bool {\n\treturn f.IsPathMatched(path) || f.ShouldGoThrough(path)\n}\n\nfunc (m *MultiPathMatcher) IsPathMatched(path string) bool {\n\tfor _, matcher := range m.PathMatchers {\n\t\tif !matcher.IsPathMatched(path) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc (m *MultiPathMatcher) ShouldGoThrough(path string) bool {\n\tfor _, matcher := range m.PathMatchers {\n\t\tif !matcher.ShouldGoThrough(path) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc (m *MultiPathMatcher) TrimFileBaseFilepath(path string) string {\n\treturn m.PathMatchers[0].TrimFileBaseFilepath(path)\n}\n\nfunc (m *MultiPathMatcher) BaseFilepath() string {\n\treturn m.PathMatchers[0].BaseFilepath()\n}\n\nfunc (m *MultiPathMatcher) String() string {\n\tvar result []string\n\tfor _, matcher := range m.PathMatchers {\n\t\tresult = append(result, matcher.String())\n\t}\n\n\treturn strings.Join(result, \"; \")\n}\n[path_matcher] Update multiPathMatcher string formatpackage path_matcher\n\nimport (\n\t\"strings\"\n)\n\nfunc NewMultiPathMatcher(PathMatchers ...PathMatcher) PathMatcher {\n\tif len(PathMatchers) == 0 {\n\t\tpanic(\"the multi path matcher cannot be initialized without any matcher\")\n\t}\n\n\treturn &MultiPathMatcher{PathMatchers: PathMatchers}\n}\n\ntype MultiPathMatcher struct {\n\tPathMatchers []PathMatcher\n}\n\nfunc (f *MultiPathMatcher) IsDirOrSubmodulePathMatched(path string) bool {\n\treturn f.IsPathMatched(path) || f.ShouldGoThrough(path)\n}\n\nfunc (m *MultiPathMatcher) IsPathMatched(path string) bool {\n\tfor _, matcher := range m.PathMatchers {\n\t\tif !matcher.IsPathMatched(path) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc (m *MultiPathMatcher) ShouldGoThrough(path string) bool {\n\tfor _, matcher := range m.PathMatchers {\n\t\tif !matcher.ShouldGoThrough(path) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc (m *MultiPathMatcher) TrimFileBaseFilepath(path string) string {\n\treturn m.PathMatchers[0].TrimFileBaseFilepath(path)\n}\n\nfunc (m *MultiPathMatcher) BaseFilepath() string {\n\treturn m.PathMatchers[0].BaseFilepath()\n}\n\nfunc (m *MultiPathMatcher) String() string {\n\tvar result []string\n\tfor _, matcher := range m.PathMatchers {\n\t\tresult = append(result, matcher.String())\n\t}\n\n\treturn strings.Join(result, \" && \")\n}\n<|endoftext|>"} {"text":"package config\n\nimport (\n\t\"time\"\n\n\t\"github.com\/hellofresh\/logging-go\"\n\t\"github.com\/kelseyhightower\/envconfig\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ Specification for basic configurations\ntype Specification struct {\n\tPort int `envconfig:\"PORT\"`\n\tDebug bool `envconfig:\"DEBUG\"`\n\tGraceTimeOut int64 `envconfig:\"GRACE_TIMEOUT\"`\n\tMaxIdleConnsPerHost int `envconfig:\"MAX_IDLE_CONNS_PER_HOST\"`\n\tBackendFlushInterval time.Duration `envconfig:\"BACKEND_FLUSH_INTERVAL\"`\n\tCloseIdleConnsPeriod time.Duration `envconfig:\"CLOSE_IDLE_CONNS_PERIOD\"`\n\tLog logging.LogConfig\n\tWeb Web\n\tDatabase Database\n\tStorage Storage\n\tStats Stats\n\tTracing Tracing\n\tTLS TLS\n}\n\n\/\/ Web represents the API configurations\ntype Web struct {\n\tPort int `envconfig:\"API_PORT\"`\n\tReadOnly bool `envconfig:\"API_READONLY\"`\n\tCredentials Credentials\n\tTLS TLS\n}\n\n\/\/ TLS represents the TLS configurations\ntype TLS struct {\n\tPort int `envconfig:\"PORT\"`\n\tCertFile string `envconfig:\"CERT_PATH\"`\n\tKeyFile string `envconfig:\"KEY_PATH\"`\n\tRedirect bool `envconfig:\"REDIRECT\"`\n}\n\n\/\/ IsHTTPS checks if you have https enabled\nfunc (s *TLS) IsHTTPS() bool {\n\treturn s.CertFile != \"\" && s.KeyFile != \"\"\n}\n\n\/\/ Storage holds the configuration for a storage\ntype Storage struct {\n\tDSN string `envconfig:\"STORAGE_DSN\"`\n}\n\n\/\/ Database holds the configuration for a database\ntype Database struct {\n\tDSN string `envconfig:\"DATABASE_DSN\"`\n}\n\n\/\/ Stats holds the configuration for stats\ntype Stats struct {\n\tDSN string `envconfig:\"STATS_DSN\"`\n\tPrefix string `envconfig:\"STATS_PREFIX\"`\n\tIDs string `envconfig:\"STATS_IDS\"`\n\tAutoDiscoverThreshold uint `envconfig:\"STATS_AUTO_DISCOVER_THRESHOLD\"`\n\tAutoDiscoverWhiteList []string `envconfig:\"STATS_AUTO_DISCOVER_WHITE_LIST\"`\n\tErrorsSection string `envconfig:\"STATS_ERRORS_SECTION\"`\n}\n\n\/\/ Credentials represents the credentials that are going to be\n\/\/ used by admin JWT configuration\ntype Credentials struct {\n\t\/\/ Algorithm defines admin JWT signing algorithm.\n\t\/\/ Currently the following algorithms are supported: HS256, HS384, HS512.\n\tAlgorithm string `envconfig:\"ALGORITHM\"`\n\tSecret string `envconfig:\"SECRET\"`\n\tUsername string `envconfig:\"ADMIN_USERNAME\"`\n\tPassword string `envconfig:\"ADMIN_PASSWORD\"`\n}\n\n\/\/ GoogleCloudTracing holds the Google Application Default Credentials\ntype GoogleCloudTracing struct {\n\tProjectID string `envconfig:\"TRACING_GC_PROJECT_ID\"`\n\tEmail string `envconfig:\"TRACING_GC_EMAIL\"`\n\tPrivateKey string `envconfig:\"TRACING_GC_PRIVATE_KEY\"`\n\tPrivateKeyID string `envconfig:\"TRACING_GC_PRIVATE_ID\"`\n}\n\n\/\/ AppdashTracing holds the Appdash tracing configuration\ntype AppdashTracing struct {\n\tDSN string `envconfig:\"TRACING_APPDASH_DSN\"`\n\tURL string `envconfig:\"TRACING_APPDASH_URL\"`\n}\n\n\/\/ Tracing represents the distributed tracing configuration\ntype Tracing struct {\n\tGoogleCloudTracing GoogleCloudTracing `mapstructure:\"googleCloud\"`\n\tAppdashTracing AppdashTracing `mapstructure:\"appdash\"`\n}\n\n\/\/ IsGoogleCloudEnabled checks if google cloud is enabled\nfunc (t Tracing) IsGoogleCloudEnabled() bool {\n\treturn len(t.GoogleCloudTracing.Email) > 0 && len(t.GoogleCloudTracing.PrivateKey) > 0 && len(t.GoogleCloudTracing.PrivateKeyID) > 0 && len(t.GoogleCloudTracing.ProjectID) > 0\n}\n\n\/\/ IsAppdashEnabled checks if appdash is enabled\nfunc (t Tracing) IsAppdashEnabled() bool {\n\treturn len(t.AppdashTracing.DSN) > 0\n}\n\nfunc init() {\n\tviper.SetDefault(\"port\", \"8080\")\n\tviper.SetDefault(\"tls.port\", \"8433\")\n\tviper.SetDefault(\"tls.redirect\", true)\n\tviper.SetDefault(\"backendFlushInterval\", \"20ms\")\n\tviper.SetDefault(\"database.dsn\", \"file:\/\/\/etc\/janus\")\n\tviper.SetDefault(\"storage.dsn\", \"memory:\/\/localhost\")\n\tviper.SetDefault(\"web.port\", \"8081\")\n\tviper.SetDefault(\"web.tls.port\", \"8444\")\n\tviper.SetDefault(\"web.tls.redisrect\", true)\n\tviper.SetDefault(\"web.credentials.algorithm\", \"HS256\")\n\tviper.SetDefault(\"web.credentials.username\", \"admin\")\n\tviper.SetDefault(\"web.credentials.password\", \"admin\")\n\tviper.SetDefault(\"stats.dsn\", \"log:\/\/\")\n\tviper.SetDefault(\"stats.errorsSection\", \"error-log\")\n\n\tlogging.InitDefaults(viper.GetViper(), \"log\")\n}\n\n\/\/Load configuration variables\nfunc Load(configFile string) (*Specification, error) {\n\tif configFile != \"\" {\n\t\tviper.SetConfigFile(configFile)\n\t} else {\n\t\tviper.SetConfigName(\"janus\")\n\t\tviper.AddConfigPath(\"\/etc\/janus\")\n\t\tviper.AddConfigPath(\".\")\n\t}\n\n\tif err := viper.ReadInConfig(); err != nil {\n\t\tlog.WithError(err).Warn(\"No config file found\")\n\t\treturn LoadEnv()\n\t}\n\n\tvar config Specification\n\tif err := viper.Unmarshal(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &config, nil\n}\n\n\/\/LoadEnv loads configuration from environment variables\nfunc LoadEnv() (*Specification, error) {\n\tvar config Specification\n\n\tif err := viper.Unmarshal(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr := envconfig.Process(\"\", &config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &config, nil\n}\nAdded github configurationpackage config\n\nimport (\n\t\"time\"\n\n\t\"github.com\/hellofresh\/logging-go\"\n\t\"github.com\/kelseyhightower\/envconfig\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ Specification for basic configurations\ntype Specification struct {\n\tPort int `envconfig:\"PORT\"`\n\tDebug bool `envconfig:\"DEBUG\"`\n\tGraceTimeOut int64 `envconfig:\"GRACE_TIMEOUT\"`\n\tMaxIdleConnsPerHost int `envconfig:\"MAX_IDLE_CONNS_PER_HOST\"`\n\tBackendFlushInterval time.Duration `envconfig:\"BACKEND_FLUSH_INTERVAL\"`\n\tCloseIdleConnsPeriod time.Duration `envconfig:\"CLOSE_IDLE_CONNS_PERIOD\"`\n\tLog logging.LogConfig\n\tWeb Web\n\tDatabase Database\n\tStorage Storage\n\tStats Stats\n\tTracing Tracing\n\tTLS TLS\n}\n\n\/\/ Web represents the API configurations\ntype Web struct {\n\tPort int `envconfig:\"API_PORT\"`\n\tReadOnly bool `envconfig:\"API_READONLY\"`\n\tCredentials Credentials\n\tTLS TLS\n}\n\n\/\/ TLS represents the TLS configurations\ntype TLS struct {\n\tPort int `envconfig:\"PORT\"`\n\tCertFile string `envconfig:\"CERT_PATH\"`\n\tKeyFile string `envconfig:\"KEY_PATH\"`\n\tRedirect bool `envconfig:\"REDIRECT\"`\n}\n\n\/\/ IsHTTPS checks if you have https enabled\nfunc (s *TLS) IsHTTPS() bool {\n\treturn s.CertFile != \"\" && s.KeyFile != \"\"\n}\n\n\/\/ Storage holds the configuration for a storage\ntype Storage struct {\n\tDSN string `envconfig:\"STORAGE_DSN\"`\n}\n\n\/\/ Database holds the configuration for a database\ntype Database struct {\n\tDSN string `envconfig:\"DATABASE_DSN\"`\n}\n\n\/\/ Stats holds the configuration for stats\ntype Stats struct {\n\tDSN string `envconfig:\"STATS_DSN\"`\n\tPrefix string `envconfig:\"STATS_PREFIX\"`\n\tIDs string `envconfig:\"STATS_IDS\"`\n\tAutoDiscoverThreshold uint `envconfig:\"STATS_AUTO_DISCOVER_THRESHOLD\"`\n\tAutoDiscoverWhiteList []string `envconfig:\"STATS_AUTO_DISCOVER_WHITE_LIST\"`\n\tErrorsSection string `envconfig:\"STATS_ERRORS_SECTION\"`\n}\n\n\/\/ Credentials represents the credentials that are going to be\n\/\/ used by admin JWT configuration\ntype Credentials struct {\n\t\/\/ Algorithm defines admin JWT signing algorithm.\n\t\/\/ Currently the following algorithms are supported: HS256, HS384, HS512.\n\tAlgorithm string `envconfig:\"ALGORITHM\"`\n\tSecret string `envconfig:\"SECRET\"`\n\tGithub Github\n}\n\n\/\/ Github holds the github configurations\ntype Github struct {\n\tOrganizations []string `envconfig:\"GITHUB_ORGANIZATIONS\"`\n\tTeams []GitHubTeamConfig `envconfig:\"GITHUB_TEAMS\"`\n}\n\n\/\/ GitHubTeamConfig represents a team configuration\ntype GitHubTeamConfig struct {\n\tOrganizationName string `json:\"organization_name,omitempty\"`\n\tTeamName string `json:\"team_name,omitempty\"`\n}\n\n\/\/ IsConfigured checks if github is enabled\nfunc (auth *Github) IsConfigured() bool {\n\treturn len(auth.Organizations) > 0 ||\n\t\tlen(auth.Teams) > 0\n}\n\n\/\/ GoogleCloudTracing holds the Google Application Default Credentials\ntype GoogleCloudTracing struct {\n\tProjectID string `envconfig:\"TRACING_GC_PROJECT_ID\"`\n\tEmail string `envconfig:\"TRACING_GC_EMAIL\"`\n\tPrivateKey string `envconfig:\"TRACING_GC_PRIVATE_KEY\"`\n\tPrivateKeyID string `envconfig:\"TRACING_GC_PRIVATE_ID\"`\n}\n\n\/\/ AppdashTracing holds the Appdash tracing configuration\ntype AppdashTracing struct {\n\tDSN string `envconfig:\"TRACING_APPDASH_DSN\"`\n\tURL string `envconfig:\"TRACING_APPDASH_URL\"`\n}\n\n\/\/ Tracing represents the distributed tracing configuration\ntype Tracing struct {\n\tGoogleCloudTracing GoogleCloudTracing `mapstructure:\"googleCloud\"`\n\tAppdashTracing AppdashTracing `mapstructure:\"appdash\"`\n}\n\n\/\/ IsGoogleCloudEnabled checks if google cloud is enabled\nfunc (t Tracing) IsGoogleCloudEnabled() bool {\n\treturn len(t.GoogleCloudTracing.Email) > 0 && len(t.GoogleCloudTracing.PrivateKey) > 0 && len(t.GoogleCloudTracing.PrivateKeyID) > 0 && len(t.GoogleCloudTracing.ProjectID) > 0\n}\n\n\/\/ IsAppdashEnabled checks if appdash is enabled\nfunc (t Tracing) IsAppdashEnabled() bool {\n\treturn len(t.AppdashTracing.DSN) > 0\n}\n\nfunc init() {\n\tviper.SetDefault(\"port\", \"8080\")\n\tviper.SetDefault(\"tls.port\", \"8433\")\n\tviper.SetDefault(\"tls.redirect\", true)\n\tviper.SetDefault(\"backendFlushInterval\", \"20ms\")\n\tviper.SetDefault(\"database.dsn\", \"file:\/\/\/etc\/janus\")\n\tviper.SetDefault(\"storage.dsn\", \"memory:\/\/localhost\")\n\tviper.SetDefault(\"web.port\", \"8081\")\n\tviper.SetDefault(\"web.tls.port\", \"8444\")\n\tviper.SetDefault(\"web.tls.redisrect\", true)\n\tviper.SetDefault(\"web.credentials.algorithm\", \"HS256\")\n\tviper.SetDefault(\"web.credentials.username\", \"admin\")\n\tviper.SetDefault(\"web.credentials.password\", \"admin\")\n\tviper.SetDefault(\"stats.dsn\", \"log:\/\/\")\n\tviper.SetDefault(\"stats.errorsSection\", \"error-log\")\n\n\tlogging.InitDefaults(viper.GetViper(), \"log\")\n}\n\n\/\/Load configuration variables\nfunc Load(configFile string) (*Specification, error) {\n\tif configFile != \"\" {\n\t\tviper.SetConfigFile(configFile)\n\t} else {\n\t\tviper.SetConfigName(\"janus\")\n\t\tviper.AddConfigPath(\"\/etc\/janus\")\n\t\tviper.AddConfigPath(\".\")\n\t}\n\n\tif err := viper.ReadInConfig(); err != nil {\n\t\tlog.WithError(err).Warn(\"No config file found\")\n\t\treturn LoadEnv()\n\t}\n\n\tvar config Specification\n\tif err := viper.Unmarshal(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &config, nil\n}\n\n\/\/LoadEnv loads configuration from environment variables\nfunc LoadEnv() (*Specification, error) {\n\tvar config Specification\n\n\tif err := viper.Unmarshal(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr := envconfig.Process(\"\", &config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &config, nil\n}\n<|endoftext|>"} {"text":"package filesystem\n\nimport (\n\t\"github.com\/Symantec\/Dominator\/lib\/hash\"\n\t\"io\"\n)\n\ntype NumLinksTable map[uint64]int\n\ntype GenericInode interface {\n\tList(w io.Writer, name string, numLinksTable NumLinksTable,\n\t\tnumLinks int) error\n}\n\ntype InodeTable map[uint64]GenericInode\ntype InodeToFilenamesTable map[uint64][]string\ntype HashToInodesTable map[hash.Hash][]uint64\n\ntype FileSystem struct {\n\tInodeTable InodeTable\n\tInodeToFilenamesTable InodeToFilenamesTable\n\tHashToInodesTable HashToInodesTable\n\tNumRegularInodes uint64\n\tTotalDataBytes uint64\n\tDirectoryCount uint64\n\tDirectoryInode\n}\n\nfunc (fs *FileSystem) RebuildInodePointers() {\n\tfs.rebuildInodePointers()\n}\n\nfunc (fs *FileSystem) BuildInodeToFilenamesTable() {\n\tfs.buildInodeToFilenamesTable()\n}\n\nfunc (fs *FileSystem) BuildHashToInodesTable() {\n\tfs.buildHashToInodesTable()\n}\n\nfunc (fs *FileSystem) ComputeTotalDataBytes() {\n\tfs.computeTotalDataBytes()\n}\n\nfunc (fs *FileSystem) List(w io.Writer) error {\n\treturn fs.list(w)\n}\n\ntype DirectoryInode struct {\n\tEntryList []*DirectoryEntry\n\tEntriesByName map[string]*DirectoryEntry\n\tMode FileMode\n\tUid uint32\n\tGid uint32\n}\n\nfunc (directory *DirectoryInode) BuildEntryMap() {\n\tdirectory.buildEntryMap()\n}\n\nfunc (inode *DirectoryInode) List(w io.Writer, name string,\n\tnumLinksTable NumLinksTable, numLinks int) error {\n\treturn inode.list(w, name, numLinksTable, numLinks)\n}\n\ntype DirectoryEntry struct {\n\tName string\n\tInodeNumber uint64\n\tinode GenericInode \/\/ Keep private to avoid encoding\/transmission.\n}\n\nfunc (dirent *DirectoryEntry) Inode() GenericInode {\n\treturn dirent.inode\n}\n\nfunc (dirent *DirectoryEntry) SetInode(inode GenericInode) {\n\tdirent.inode = inode\n}\n\nfunc (dirent *DirectoryEntry) String() string {\n\treturn dirent.Name\n}\n\ntype RegularInode struct {\n\tMode FileMode\n\tUid uint32\n\tGid uint32\n\tMtimeNanoSeconds int32\n\tMtimeSeconds int64\n\tSize uint64\n\tHash hash.Hash\n}\n\nfunc (inode *RegularInode) List(w io.Writer, name string,\n\tnumLinksTable NumLinksTable, numLinks int) error {\n\treturn inode.list(w, name, numLinksTable, numLinks)\n}\n\ntype SymlinkInode struct {\n\tUid uint32\n\tGid uint32\n\tSymlink string\n}\n\nfunc (inode *SymlinkInode) List(w io.Writer, name string,\n\tnumLinksTable NumLinksTable, numLinks int) error {\n\treturn inode.list(w, name, numLinksTable, numLinks)\n}\n\ntype SpecialInode struct {\n\tMode FileMode\n\tUid uint32\n\tGid uint32\n\tMtimeNanoSeconds int32\n\tMtimeSeconds int64\n\tRdev uint64\n}\n\nfunc (inode *SpecialInode) List(w io.Writer, name string,\n\tnumLinksTable NumLinksTable, numLinks int) error {\n\treturn inode.list(w, name, numLinksTable, numLinks)\n}\n\ntype FileMode uint32\n\nfunc (mode FileMode) String() string {\n\treturn mode.string()\n}\n\nfunc CompareFileSystems(left, right *FileSystem, logWriter io.Writer) bool {\n\treturn compareFileSystems(left, right, logWriter)\n}\n\nfunc CompareDirectoryInodes(left, right *DirectoryInode,\n\tlogWriter io.Writer) bool {\n\treturn compareDirectoryInodes(left, right, logWriter)\n}\n\nfunc CompareDirectoriesMetadata(left, right *DirectoryInode,\n\tlogWriter io.Writer) bool {\n\treturn compareDirectoriesMetadata(left, right, logWriter)\n}\n\nfunc CompareDirectoryEntries(left, right *DirectoryEntry,\n\tlogWriter io.Writer) bool {\n\treturn compareDirectoryEntries(left, right, logWriter)\n}\n\nfunc CompareInodes(left, right GenericInode, logWriter io.Writer) (\n\tsameType, sameMetadata, sameData bool) {\n\treturn compareInodes(left, right, logWriter)\n}\n\nfunc CompareRegularInodes(left, right *RegularInode, logWriter io.Writer) bool {\n\treturn compareRegularInodes(left, right, logWriter)\n}\n\nfunc CompareRegularInodesMetadata(left, right *RegularInode,\n\tlogWriter io.Writer) bool {\n\treturn compareRegularInodesMetadata(left, right, logWriter)\n}\n\nfunc CompareRegularInodesData(left, right *RegularInode,\n\tlogWriter io.Writer) bool {\n\treturn compareRegularInodesData(left, right, logWriter)\n}\n\nfunc CompareSymlinkInodes(left, right *SymlinkInode, logWriter io.Writer) bool {\n\treturn compareSymlinkInodes(left, right, logWriter)\n}\n\nfunc CompareSymlinkInodesMetadata(left, right *SymlinkInode,\n\tlogWriter io.Writer) bool {\n\treturn compareSymlinkInodesMetadata(left, right, logWriter)\n}\n\nfunc CompareSymlinkInodesData(left, right *SymlinkInode,\n\tlogWriter io.Writer) bool {\n\treturn compareSymlinkInodesData(left, right, logWriter)\n}\n\nfunc CompareSpecialInodes(left, right *SpecialInode, logWriter io.Writer) bool {\n\treturn compareSpecialInodes(left, right, logWriter)\n}\n\nfunc CompareSpecialInodesMetadata(left, right *SpecialInode,\n\tlogWriter io.Writer) bool {\n\treturn compareSpecialInodesMetadata(left, right, logWriter)\n}\n\nfunc CompareSpecialInodesData(left, right *SpecialInode,\n\tlogWriter io.Writer) bool {\n\treturn compareSpecialInodesData(left, right, logWriter)\n}\nAdd GetUid() and GetGid() methods to GenericInode interface.package filesystem\n\nimport (\n\t\"github.com\/Symantec\/Dominator\/lib\/hash\"\n\t\"io\"\n)\n\ntype NumLinksTable map[uint64]int\n\ntype GenericInode interface {\n\tGetUid() uint32\n\tGetGid() uint32\n\tList(w io.Writer, name string, numLinksTable NumLinksTable,\n\t\tnumLinks int) error\n}\n\ntype InodeTable map[uint64]GenericInode\ntype InodeToFilenamesTable map[uint64][]string\ntype HashToInodesTable map[hash.Hash][]uint64\n\ntype FileSystem struct {\n\tInodeTable InodeTable\n\tInodeToFilenamesTable InodeToFilenamesTable\n\tHashToInodesTable HashToInodesTable\n\tNumRegularInodes uint64\n\tTotalDataBytes uint64\n\tDirectoryCount uint64\n\tDirectoryInode\n}\n\nfunc (fs *FileSystem) RebuildInodePointers() {\n\tfs.rebuildInodePointers()\n}\n\nfunc (fs *FileSystem) BuildInodeToFilenamesTable() {\n\tfs.buildInodeToFilenamesTable()\n}\n\nfunc (fs *FileSystem) BuildHashToInodesTable() {\n\tfs.buildHashToInodesTable()\n}\n\nfunc (fs *FileSystem) ComputeTotalDataBytes() {\n\tfs.computeTotalDataBytes()\n}\n\nfunc (fs *FileSystem) List(w io.Writer) error {\n\treturn fs.list(w)\n}\n\ntype DirectoryInode struct {\n\tEntryList []*DirectoryEntry\n\tEntriesByName map[string]*DirectoryEntry\n\tMode FileMode\n\tUid uint32\n\tGid uint32\n}\n\nfunc (directory *DirectoryInode) BuildEntryMap() {\n\tdirectory.buildEntryMap()\n}\n\nfunc (inode *DirectoryInode) GetUid() uint32 {\n\treturn inode.Uid\n}\n\nfunc (inode *DirectoryInode) GetGid() uint32 {\n\treturn inode.Gid\n}\n\nfunc (inode *DirectoryInode) List(w io.Writer, name string,\n\tnumLinksTable NumLinksTable, numLinks int) error {\n\treturn inode.list(w, name, numLinksTable, numLinks)\n}\n\ntype DirectoryEntry struct {\n\tName string\n\tInodeNumber uint64\n\tinode GenericInode \/\/ Keep private to avoid encoding\/transmission.\n}\n\nfunc (dirent *DirectoryEntry) Inode() GenericInode {\n\treturn dirent.inode\n}\n\nfunc (dirent *DirectoryEntry) SetInode(inode GenericInode) {\n\tdirent.inode = inode\n}\n\nfunc (dirent *DirectoryEntry) String() string {\n\treturn dirent.Name\n}\n\ntype RegularInode struct {\n\tMode FileMode\n\tUid uint32\n\tGid uint32\n\tMtimeNanoSeconds int32\n\tMtimeSeconds int64\n\tSize uint64\n\tHash hash.Hash\n}\n\nfunc (inode *RegularInode) GetUid() uint32 {\n\treturn inode.Uid\n}\n\nfunc (inode *RegularInode) GetGid() uint32 {\n\treturn inode.Gid\n}\n\nfunc (inode *RegularInode) List(w io.Writer, name string,\n\tnumLinksTable NumLinksTable, numLinks int) error {\n\treturn inode.list(w, name, numLinksTable, numLinks)\n}\n\ntype SymlinkInode struct {\n\tUid uint32\n\tGid uint32\n\tSymlink string\n}\n\nfunc (inode *SymlinkInode) GetUid() uint32 {\n\treturn inode.Uid\n}\n\nfunc (inode *SymlinkInode) GetGid() uint32 {\n\treturn inode.Gid\n}\n\nfunc (inode *SymlinkInode) List(w io.Writer, name string,\n\tnumLinksTable NumLinksTable, numLinks int) error {\n\treturn inode.list(w, name, numLinksTable, numLinks)\n}\n\ntype SpecialInode struct {\n\tMode FileMode\n\tUid uint32\n\tGid uint32\n\tMtimeNanoSeconds int32\n\tMtimeSeconds int64\n\tRdev uint64\n}\n\nfunc (inode *SpecialInode) GetUid() uint32 {\n\treturn inode.Uid\n}\n\nfunc (inode *SpecialInode) GetGid() uint32 {\n\treturn inode.Gid\n}\n\nfunc (inode *SpecialInode) List(w io.Writer, name string,\n\tnumLinksTable NumLinksTable, numLinks int) error {\n\treturn inode.list(w, name, numLinksTable, numLinks)\n}\n\ntype FileMode uint32\n\nfunc (mode FileMode) String() string {\n\treturn mode.string()\n}\n\nfunc CompareFileSystems(left, right *FileSystem, logWriter io.Writer) bool {\n\treturn compareFileSystems(left, right, logWriter)\n}\n\nfunc CompareDirectoryInodes(left, right *DirectoryInode,\n\tlogWriter io.Writer) bool {\n\treturn compareDirectoryInodes(left, right, logWriter)\n}\n\nfunc CompareDirectoriesMetadata(left, right *DirectoryInode,\n\tlogWriter io.Writer) bool {\n\treturn compareDirectoriesMetadata(left, right, logWriter)\n}\n\nfunc CompareDirectoryEntries(left, right *DirectoryEntry,\n\tlogWriter io.Writer) bool {\n\treturn compareDirectoryEntries(left, right, logWriter)\n}\n\nfunc CompareInodes(left, right GenericInode, logWriter io.Writer) (\n\tsameType, sameMetadata, sameData bool) {\n\treturn compareInodes(left, right, logWriter)\n}\n\nfunc CompareRegularInodes(left, right *RegularInode, logWriter io.Writer) bool {\n\treturn compareRegularInodes(left, right, logWriter)\n}\n\nfunc CompareRegularInodesMetadata(left, right *RegularInode,\n\tlogWriter io.Writer) bool {\n\treturn compareRegularInodesMetadata(left, right, logWriter)\n}\n\nfunc CompareRegularInodesData(left, right *RegularInode,\n\tlogWriter io.Writer) bool {\n\treturn compareRegularInodesData(left, right, logWriter)\n}\n\nfunc CompareSymlinkInodes(left, right *SymlinkInode, logWriter io.Writer) bool {\n\treturn compareSymlinkInodes(left, right, logWriter)\n}\n\nfunc CompareSymlinkInodesMetadata(left, right *SymlinkInode,\n\tlogWriter io.Writer) bool {\n\treturn compareSymlinkInodesMetadata(left, right, logWriter)\n}\n\nfunc CompareSymlinkInodesData(left, right *SymlinkInode,\n\tlogWriter io.Writer) bool {\n\treturn compareSymlinkInodesData(left, right, logWriter)\n}\n\nfunc CompareSpecialInodes(left, right *SpecialInode, logWriter io.Writer) bool {\n\treturn compareSpecialInodes(left, right, logWriter)\n}\n\nfunc CompareSpecialInodesMetadata(left, right *SpecialInode,\n\tlogWriter io.Writer) bool {\n\treturn compareSpecialInodesMetadata(left, right, logWriter)\n}\n\nfunc CompareSpecialInodesData(left, right *SpecialInode,\n\tlogWriter io.Writer) bool {\n\treturn compareSpecialInodesData(left, right, logWriter)\n}\n<|endoftext|>"} {"text":"package cmd\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/driusan\/dgit\/git\"\n\t\"os\"\n)\n\nfunc Branch(c *git.Client, args []string) {\n\tflags := flag.NewFlagSet(\"branch\", flag.ExitOnError)\n\tflags.SetOutput(flag.CommandLine.Output())\n\tflags.Usage = func() {\n\t\tflag.Usage()\n\t\tfmt.Fprintf(flag.CommandLine.Output(), \"\\n\\nOptions:\\n\")\n\t\tflags.PrintDefaults()\n\t}\n\n\t\/\/ These flags can be moved out of these lists and below as proper flags as they are implemented\n\tfor _, bf := range []string{\"d\", \"delete\", \"D\", \"create-reflog\", \"f\", \"force\", \"m\", \"move\", \"M\", \"c\", \"copy\", \"C\", \"no-color\", \"i\", \"ignore-case\", \"no-column\", \"r\", \"remotes\", \"a\", \"all\", \"v\", \"vv\", \"verbose\", \"q\", \"quiet\", \"no-abbrev\", \"no-track\", \"unset-upstream\", \"edit-description\"} {\n\t\tflags.Var(newNotimplBoolValue(), bf, \"Not implemented\")\n\t}\n\tfor _, sf := range []string{\"color\", \"abbrev\", \"column\", \"sort\", \"no-merged\", \"contains\", \"no-contains\", \"points-at\", \"format\", \"set-upstream-to\", \"u\"} {\n\t\tflags.Var(newNotimplStringValue(), sf, \"Not implemented\")\n\t}\n\n\tflags.Parse(args)\n\n\tswitch flags.NArg() {\n\tcase 0:\n\t\tbranches, err := c.GetBranches()\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"Could not get list of branches.\")\n\t\t\treturn\n\t\t}\n\t\thead := c.GetHeadBranch()\n\t\tfor _, b := range branches {\n\t\t\tif head == b {\n\t\t\t\tfmt.Print(\"* \")\n\t\t\t} else {\n\t\t\t\tfmt.Print(\" \")\n\t\t\t}\n\t\t\tfmt.Println(b.BranchName())\n\t\t}\n\tcase 1:\n\t\theadref, err := git.SymbolicRefGet(c, git.SymbolicRefOptions{}, \"HEAD\")\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tb := git.Branch(headref)\n\t\tif err := c.CreateBranch(flags.Arg(0), b); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Could not create branch (%v): %v\\n\", flags.Arg(0), err)\n\t\t}\n\tdefault:\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n}\nAdded the ability to create a branch at a startpointpackage cmd\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/driusan\/dgit\/git\"\n\t\"os\"\n)\n\nfunc Branch(c *git.Client, args []string) {\n\tflags := flag.NewFlagSet(\"branch\", flag.ExitOnError)\n\tflags.SetOutput(flag.CommandLine.Output())\n\tflags.Usage = func() {\n\t\tflag.Usage()\n\t\tfmt.Fprintf(flag.CommandLine.Output(), \"\\n\\nOptions:\\n\")\n\t\tflags.PrintDefaults()\n\t}\n\n\t\/\/ These flags can be moved out of these lists and below as proper flags as they are implemented\n\tfor _, bf := range []string{\"d\", \"delete\", \"D\", \"create-reflog\", \"f\", \"force\", \"m\", \"move\", \"M\", \"c\", \"copy\", \"C\", \"no-color\", \"i\", \"ignore-case\", \"no-column\", \"r\", \"remotes\", \"a\", \"all\", \"v\", \"vv\", \"verbose\", \"q\", \"quiet\", \"no-abbrev\", \"no-track\", \"unset-upstream\", \"edit-description\"} {\n\t\tflags.Var(newNotimplBoolValue(), bf, \"Not implemented\")\n\t}\n\tfor _, sf := range []string{\"color\", \"abbrev\", \"column\", \"sort\", \"no-merged\", \"contains\", \"no-contains\", \"points-at\", \"format\", \"set-upstream-to\", \"u\"} {\n\t\tflags.Var(newNotimplStringValue(), sf, \"Not implemented\")\n\t}\n\n\tflags.Parse(args)\n\n\tswitch flags.NArg() {\n\tcase 0:\n\t\tbranches, err := c.GetBranches()\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"Could not get list of branches.\")\n\t\t\treturn\n\t\t}\n\t\thead := c.GetHeadBranch()\n\t\tfor _, b := range branches {\n\t\t\tif head == b {\n\t\t\t\tfmt.Print(\"* \")\n\t\t\t} else {\n\t\t\t\tfmt.Print(\" \")\n\t\t\t}\n\t\t\tfmt.Println(b.BranchName())\n\t\t}\n\tcase 1:\n\t\theadref, err := git.SymbolicRefGet(c, git.SymbolicRefOptions{}, \"HEAD\")\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tb := git.Branch(headref)\n\t\tif err := c.CreateBranch(flags.Arg(0), b); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Could not create branch (%v): %v\\n\", flags.Arg(0), err)\n\t\t}\n\tcase 2:\n\t\tstartpoint, err := git.RevParseCommitish(c, &git.RevParseOptions{}, flags.Arg(1))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err := c.CreateBranch(flags.Arg(0), startpoint); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Could not create branch (%v): %v\\n\", flags.Arg(0), err)\n\t\t}\n\tdefault:\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n}\n<|endoftext|>"} {"text":"package asciidocgo\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n)\n\ntype testSubstDocumentAble struct {\n}\n\nfunc (tsd *testSubstDocumentAble) Attr(name string, defaultValue interface{}, inherit bool) interface{} {\n\treturn \"mathtest\"\n}\nfunc (tsd *testSubstDocumentAble) Basebackend(base interface{}) bool {\n\treturn true\n}\n\nfunc TestSubstitutor(t *testing.T) {\n\n\tConvey(\"A substitutors can be initialized\", t, func() {\n\n\t\tConvey(\"By default, a substitutors can be created\", func() {\n\t\t\tSo(&substitutors{}, ShouldNotBeNil)\n\t\t})\n\n\t\tConvey(\"A substitutors has an empty passthroughs array\", func() {\n\t\t\ts := substitutors{}\n\t\t\tSo(len(s.passthroughs), ShouldEqual, 0)\n\t\t})\n\t})\n\n\tConvey(\"A substitutors has subs type\", t, func() {\n\t\tSo(len(subs[sub.basic]), ShouldEqual, 1)\n\t\tSo(len(subs[sub.normal]), ShouldEqual, 6)\n\t\tSo(len(subs[sub.verbatim]), ShouldEqual, 2)\n\t\tSo(len(subs[sub.title]), ShouldEqual, 6)\n\t\tSo(len(subs[sub.header]), ShouldEqual, 2)\n\t\tSo(len(subs[sub.pass]), ShouldEqual, 0)\n\t\tSo(len(subs[sub.unknown]), ShouldEqual, 0)\n\t})\n\n\tConvey(\"A substitutors can apply substitutions\", t, func() {\n\n\t\tsource := \"test\"\n\t\ts := &substitutors{}\n\n\t\tConvey(\"By default, no substitution or a pass subs will return source unchanged\", func() {\n\t\t\tSo(s.ApplySubs(source, nil), ShouldEqual, source)\n\t\t\tSo(s.ApplySubs(source, subArray{sub.pass}), ShouldResemble, source)\n\t\t\tSo(len(s.ApplySubs(source, subArray{sub.unknown})), ShouldEqual, 0)\n\t\t\tSo(s.ApplySubs(source, subArray{sub.title}), ShouldEqual, \"test\")\n\t\t})\n\n\t\tConvey(\"A normal substition will use normal substitution modes\", func() {\n\t\t\ttestsub = \"test_ApplySubs_allsubs\"\n\t\t\tSo(s.ApplySubs(source, subArray{sub.normal}), ShouldEqual, \"[specialcharacters quotes attributes replacements macros post_replacements]\")\n\t\t\tSo(s.ApplySubs(source, subArray{sub.title}), ShouldEqual, \"[title]\")\n\t\t\ttestsub = \"\"\n\t\t})\n\t\tConvey(\"A macros substition will call extractPassthroughs\", func() {\n\t\t\ttestsub = \"test_ApplySubs_extractPassthroughs\"\n\t\t\tSo(s.ApplySubs(source, subArray{subValue.macros}), ShouldEqual, \"test\")\n\t\t\ttestsub = \"\"\n\t\t})\n\n\t})\n\n\tConvey(\"A substitutors can Extract the passthrough text from the document for reinsertion without processing if escaped\", t, func() {\n\t\tsource := `test \\+++for\n\t\ta\n\t\tpassthrough+++ by test2 \\$$text\n\t\t\tmultiple\n\t\t\tline$$ for\n\t\t\ttest3 \\pass:quotes[text\n\t\t\tline2\n\t\t\tline3] end test4`\n\t\ts := &substitutors{}\n\t\tSo(s.ApplySubs(source, subArray{subValue.macros}), ShouldEqual, `test +++for\n\t\ta\n\t\tpassthrough+++ by test2 $$text\n\t\t\tmultiple\n\t\t\tline$$ for\n\t\t\ttest3 pass:quotes[text\n\t\t\tline2\n\t\t\tline3] end test4`)\n\t})\n\n\tConvey(\"A substitutors can Extract the passthrough text from the document for reinsertion after processing\", t, func() {\n\t\tsource := `test +++for\n\t\ta\n\t\tpassthrough+++ by test2 $$text\n\t\t\tmultiple\n\t\t\tline$$ for\n\t\t\ttest3 pass:quotes[text\n\t\t\tline2\n\t\t\tline3] end test4`\n\t\ts := &substitutors{}\n\n\t\tConvey(\"If no inline macros substitution detected, return text unchanged\", func() {\n\t\t\tSo(s.ApplySubs(\"test ++ nosub\", subArray{subValue.macros}), ShouldEqual, \"test ++ nosub\")\n\t\t})\n\n\t\tSo(s.ApplySubs(source, subArray{subValue.macros}), ShouldEqual, fmt.Sprintf(`test %s0%s by test2 %s1%s for\n\t\t\ttest3 %s2%s end test4`, subPASS_START, subPASS_END, subPASS_START, subPASS_END, subPASS_START, subPASS_END))\n\t})\n\tConvey(\"A substitutors can unescape escaped branckets\", t, func() {\n\t\tSo(unescapeBrackets(\"\"), ShouldEqual, \"\")\n\t\tSo(unescapeBrackets(`a\\]b]c\\]`), ShouldEqual, `a]b]c]`)\n\t})\n\n\tConvey(\"A substitutors can Extract inline text\", t, func() {\n\t\tsource := \"`a few <\\\\{monospaced\\\\}> words`\" +\n\t\t\t\"[input]`A few <\\\\{monospaced\\\\}> words`\\n\" +\n\t\t\t\"\\\\[input]`a few words`\\n\" +\n\t\t\t\"\\\\[input]\\\\`a few words`\\n\" +\n\t\t\t\"`a few\\n<\\\\{monospaced\\\\}> words`\" +\n\t\t\t\"\\\\[input]`a few <monospaced> words`\\n\" +\n\t\t\t\"the text `asciimath:[x = y]` should be passed through as `literal` text\\n\" +\n\t\t\t\"`Here`s Johnny!\"\n\t\ts := &substitutors{}\n\n\t\tSo(s.ApplySubs(source, subArray{subValue.macros}), ShouldEqual, fmt.Sprintf(`%s0%s[input]%s1%s\n[input]%s2%s\n\\input`+\"`\"+`a few words`+\"`\"+` : \\`+\"`\"+`a few words`+\"`\"+`\n%s3%s[input]%s4%s\nthe text %s5%s should be passed through as %s6%s text\n`+\"`\"+`Here`+\"`\"+`s Johnny!`, subPASS_START, subPASS_END, subPASS_START, subPASS_END, subPASS_START, subPASS_END, subPASS_START, subPASS_END, subPASS_START, subPASS_END, subPASS_START, subPASS_END, subPASS_START, subPASS_END))\n\n\t\tConvey(\"If no literal text substitution detected, return text unchanged\", func() {\n\t\t\tSo(s.ApplySubs(\"test`nosub\", subArray{subValue.macros}), ShouldEqual, \"test`nosub\")\n\t\t})\n\t})\n\n\tConvey(\"A substitutors can Extract math inline text\", t, func() {\n\t\tsource := `math:[x != 0]\n \\math:[x != 0]\n asciimath:[x != 0]\n latexmath:abc[\\sqrt{4} = 2]`\n\t\ts := &substitutors{document: &testSubstDocumentAble{}}\n\n\t\tSo(s.ApplySubs(source, subArray{subValue.macros}), ShouldEqual, fmt.Sprintf(`%s0%s\n math:[x != 0]\n %s1%s\n %s2%s`, subPASS_START, subPASS_END, subPASS_START, subPASS_END, subPASS_START, subPASS_END))\n\n\t\tConvey(\"If no math literal substitution detected, return text unchanged\", func() {\n\t\t\tSo(s.ApplySubs(\"math:nosub\", subArray{subValue.macros}), ShouldEqual, \"math:nosub\")\n\t\t})\n\t})\n}\nTest subsitutors with nil (Subst)Document(able).package asciidocgo\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n)\n\ntype testSubstDocumentAble struct {\n}\n\nfunc (tsd *testSubstDocumentAble) Attr(name string, defaultValue interface{}, inherit bool) interface{} {\n\treturn \"mathtest\"\n}\nfunc (tsd *testSubstDocumentAble) Basebackend(base interface{}) bool {\n\treturn true\n}\n\nfunc TestSubstitutor(t *testing.T) {\n\n\tConvey(\"A substitutors can be initialized\", t, func() {\n\n\t\tConvey(\"By default, a substitutors can be created\", func() {\n\t\t\tSo(&substitutors{}, ShouldNotBeNil)\n\t\t})\n\n\t\tConvey(\"A substitutors has an empty passthroughs array\", func() {\n\t\t\ts := substitutors{}\n\t\t\tSo(len(s.passthroughs), ShouldEqual, 0)\n\t\t})\n\t})\n\n\tConvey(\"A substitutors has subs type\", t, func() {\n\t\tSo(len(subs[sub.basic]), ShouldEqual, 1)\n\t\tSo(len(subs[sub.normal]), ShouldEqual, 6)\n\t\tSo(len(subs[sub.verbatim]), ShouldEqual, 2)\n\t\tSo(len(subs[sub.title]), ShouldEqual, 6)\n\t\tSo(len(subs[sub.header]), ShouldEqual, 2)\n\t\tSo(len(subs[sub.pass]), ShouldEqual, 0)\n\t\tSo(len(subs[sub.unknown]), ShouldEqual, 0)\n\t})\n\n\tConvey(\"A substitutors can apply substitutions\", t, func() {\n\n\t\tsource := \"test\"\n\t\ts := &substitutors{}\n\n\t\tConvey(\"By default, no substitution or a pass subs will return source unchanged\", func() {\n\t\t\tSo(s.ApplySubs(source, nil), ShouldEqual, source)\n\t\t\tSo(s.ApplySubs(source, subArray{sub.pass}), ShouldResemble, source)\n\t\t\tSo(len(s.ApplySubs(source, subArray{sub.unknown})), ShouldEqual, 0)\n\t\t\tSo(s.ApplySubs(source, subArray{sub.title}), ShouldEqual, \"test\")\n\t\t})\n\n\t\tConvey(\"A normal substition will use normal substitution modes\", func() {\n\t\t\ttestsub = \"test_ApplySubs_allsubs\"\n\t\t\tSo(s.ApplySubs(source, subArray{sub.normal}), ShouldEqual, \"[specialcharacters quotes attributes replacements macros post_replacements]\")\n\t\t\tSo(s.ApplySubs(source, subArray{sub.title}), ShouldEqual, \"[title]\")\n\t\t\ttestsub = \"\"\n\t\t})\n\t\tConvey(\"A macros substition will call extractPassthroughs\", func() {\n\t\t\ttestsub = \"test_ApplySubs_extractPassthroughs\"\n\t\t\tSo(s.ApplySubs(source, subArray{subValue.macros}), ShouldEqual, \"test\")\n\t\t\ttestsub = \"\"\n\t\t})\n\n\t})\n\n\tConvey(\"A substitutors can Extract the passthrough text from the document for reinsertion without processing if escaped\", t, func() {\n\t\tsource := `test \\+++for\n\t\ta\n\t\tpassthrough+++ by test2 \\$$text\n\t\t\tmultiple\n\t\t\tline$$ for\n\t\t\ttest3 \\pass:quotes[text\n\t\t\tline2\n\t\t\tline3] end test4`\n\t\ts := &substitutors{}\n\t\tSo(s.ApplySubs(source, subArray{subValue.macros}), ShouldEqual, `test +++for\n\t\ta\n\t\tpassthrough+++ by test2 $$text\n\t\t\tmultiple\n\t\t\tline$$ for\n\t\t\ttest3 pass:quotes[text\n\t\t\tline2\n\t\t\tline3] end test4`)\n\t})\n\n\tConvey(\"A substitutors can Extract the passthrough text from the document for reinsertion after processing\", t, func() {\n\t\tsource := `test +++for\n\t\ta\n\t\tpassthrough+++ by test2 $$text\n\t\t\tmultiple\n\t\t\tline$$ for\n\t\t\ttest3 pass:quotes[text\n\t\t\tline2\n\t\t\tline3] end test4`\n\t\ts := &substitutors{}\n\n\t\tConvey(\"If no inline macros substitution detected, return text unchanged\", func() {\n\t\t\tSo(s.ApplySubs(\"test ++ nosub\", subArray{subValue.macros}), ShouldEqual, \"test ++ nosub\")\n\t\t})\n\n\t\tSo(s.ApplySubs(source, subArray{subValue.macros}), ShouldEqual, fmt.Sprintf(`test %s0%s by test2 %s1%s for\n\t\t\ttest3 %s2%s end test4`, subPASS_START, subPASS_END, subPASS_START, subPASS_END, subPASS_START, subPASS_END))\n\t})\n\tConvey(\"A substitutors can unescape escaped branckets\", t, func() {\n\t\tSo(unescapeBrackets(\"\"), ShouldEqual, \"\")\n\t\tSo(unescapeBrackets(`a\\]b]c\\]`), ShouldEqual, `a]b]c]`)\n\t})\n\n\tConvey(\"A substitutors can Extract inline text\", t, func() {\n\t\tsource := \"`a few <\\\\{monospaced\\\\}> words`\" +\n\t\t\t\"[input]`A few <\\\\{monospaced\\\\}> words`\\n\" +\n\t\t\t\"\\\\[input]`a few words`\\n\" +\n\t\t\t\"\\\\[input]\\\\`a few words`\\n\" +\n\t\t\t\"`a few\\n<\\\\{monospaced\\\\}> words`\" +\n\t\t\t\"\\\\[input]`a few <monospaced> words`\\n\" +\n\t\t\t\"the text `asciimath:[x = y]` should be passed through as `literal` text\\n\" +\n\t\t\t\"`Here`s Johnny!\"\n\t\ts := &substitutors{}\n\n\t\tSo(s.ApplySubs(source, subArray{subValue.macros}), ShouldEqual, fmt.Sprintf(`%s0%s[input]%s1%s\n[input]%s2%s\n\\input`+\"`\"+`a few words`+\"`\"+` : \\`+\"`\"+`a few words`+\"`\"+`\n%s3%s[input]%s4%s\nthe text %s5%s should be passed through as %s6%s text\n`+\"`\"+`Here`+\"`\"+`s Johnny!`, subPASS_START, subPASS_END, subPASS_START, subPASS_END, subPASS_START, subPASS_END, subPASS_START, subPASS_END, subPASS_START, subPASS_END, subPASS_START, subPASS_END, subPASS_START, subPASS_END))\n\n\t\tConvey(\"If no literal text substitution detected, return text unchanged\", func() {\n\t\t\tSo(s.ApplySubs(\"test`nosub\", subArray{subValue.macros}), ShouldEqual, \"test`nosub\")\n\t\t})\n\t})\n\n\tConvey(\"A substitutors can Extract math inline text\", t, func() {\n\t\tsource := `math:[x != 0]\n \\math:[x != 0]\n asciimath:[x != 0]\n latexmath:abc[\\sqrt{4} = 2]`\n\t\ts := &substitutors{}\n\n\t\tSo(s.ApplySubs(source, subArray{subValue.macros}), ShouldEqual, fmt.Sprintf(`%s0%s\n math:[x != 0]\n %s1%s\n %s2%s`, subPASS_START, subPASS_END, subPASS_START, subPASS_END, subPASS_START, subPASS_END))\n\n\t\ts.document = &testSubstDocumentAble{}\n\n\t\tSo(s.ApplySubs(source, subArray{subValue.macros}), ShouldEqual, fmt.Sprintf(`%s3%s\n math:[x != 0]\n %s4%s\n %s5%s`, subPASS_START, subPASS_END, subPASS_START, subPASS_END, subPASS_START, subPASS_END))\n\n\t\tConvey(\"If no math literal substitution detected, return text unchanged\", func() {\n\t\t\tSo(s.ApplySubs(\"math:nosub\", subArray{subValue.macros}), ShouldEqual, \"math:nosub\")\n\t\t})\n\t})\n}\n<|endoftext|>"} {"text":"package triggerbuild\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/concourse\/atc\/builder\"\n\t\"github.com\/concourse\/atc\/db\"\n\t\"github.com\/pivotal-golang\/lager\"\n\t\"github.com\/tedsuo\/rata\"\n\n\t\"github.com\/concourse\/atc\/config\"\n\t\"github.com\/concourse\/atc\/server\/routes\"\n)\n\ntype handler struct {\n\tlogger lager.Logger\n\n\tjobs config.Jobs\n\n\tdb db.DB\n\tbuilder builder.Builder\n}\n\nfunc NewHandler(\n\tlogger lager.Logger,\n\tjobs config.Jobs,\n\tdb db.DB,\n\tbuilder builder.Builder,\n) http.Handler {\n\treturn &handler{\n\t\tlogger: logger,\n\n\t\tjobs: jobs,\n\n\t\tdb: db,\n\t\tbuilder: builder,\n\t}\n}\n\nfunc (handler *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tjob, found := handler.jobs.Lookup(r.FormValue(\":job\"))\n\tif !found {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tlog := handler.logger.Session(\"trigger-build\", lager.Data{\n\t\t\"job\": job.Name,\n\t})\n\n\tlog.Debug(\"triggering\")\n\n\tbuild, err := handler.db.CreateBuild(job.Name)\n\tif err != nil {\n\t\tlog.Error(\"failed-to-create-build\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\terr = handler.builder.Build(build, job, nil)\n\tif err != nil {\n\t\tlog.Error(\"triggering-failed\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tredirectPath, err := routes.Routes.CreatePathForRoute(routes.GetBuild, rata.Params{\n\t\t\"job\": job.Name,\n\t\t\"build\": fmt.Sprintf(\"%d\", build.ID),\n\t})\n\tif err != nil {\n\t\tlog.Fatal(\"failed-to-construct-redirect-uri\", err, lager.Data{\n\t\t\t\"build\": build.ID,\n\t\t})\n\t}\n\n\thttp.Redirect(w, r, redirectPath, 302)\n}\nwhen triggering, don't sidestep passed: configpackage triggerbuild\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/concourse\/atc\/builder\"\n\t\"github.com\/concourse\/atc\/builds\"\n\t\"github.com\/concourse\/atc\/db\"\n\t\"github.com\/pivotal-golang\/lager\"\n\t\"github.com\/tedsuo\/rata\"\n\n\t\"github.com\/concourse\/atc\/config\"\n\t\"github.com\/concourse\/atc\/server\/routes\"\n)\n\ntype handler struct {\n\tlogger lager.Logger\n\n\tjobs config.Jobs\n\n\tdb db.DB\n\tbuilder builder.Builder\n}\n\nfunc NewHandler(\n\tlogger lager.Logger,\n\tjobs config.Jobs,\n\tdb db.DB,\n\tbuilder builder.Builder,\n) http.Handler {\n\treturn &handler{\n\t\tlogger: logger,\n\n\t\tjobs: jobs,\n\n\t\tdb: db,\n\t\tbuilder: builder,\n\t}\n}\n\nfunc (handler *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tjob, found := handler.jobs.Lookup(r.FormValue(\":job\"))\n\tif !found {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tlog := handler.logger.Session(\"trigger-build\", lager.Data{\n\t\t\"job\": job.Name,\n\t})\n\n\tlog.Debug(\"triggering\")\n\n\tbuild, err := handler.db.CreateBuild(job.Name)\n\tif err != nil {\n\t\tlog.Error(\"failed-to-create-build\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tpassedInputs := []config.Input{}\n\tfor _, input := range job.Inputs {\n\t\tif len(input.Passed) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tpassedInputs = append(passedInputs, input)\n\t}\n\n\tvar inputs builds.VersionedResources\n\n\tif len(passedInputs) > 0 {\n\t\tinputs, err = handler.db.GetLatestInputVersions(passedInputs)\n\t\tif err != nil {\n\t\t\tlog.Error(\"failed-to-get-build-inputs\", err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\terr = handler.builder.Build(build, job, inputs)\n\tif err != nil {\n\t\tlog.Error(\"triggering-failed\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tredirectPath, err := routes.Routes.CreatePathForRoute(routes.GetBuild, rata.Params{\n\t\t\"job\": job.Name,\n\t\t\"build\": fmt.Sprintf(\"%d\", build.ID),\n\t})\n\tif err != nil {\n\t\tlog.Fatal(\"failed-to-construct-redirect-uri\", err, lager.Data{\n\t\t\t\"build\": build.ID,\n\t\t})\n\t}\n\n\thttp.Redirect(w, r, redirectPath, 302)\n}\n<|endoftext|>"} {"text":"package cache\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"sync\"\n)\n\nvar blobNameSHA256 = regexp.MustCompile(\"^\/?(ac\/|cas\/)?([a-f0-9]{64})$\")\n\n\/\/ HTTPCache ...\ntype HTTPCache interface {\n\tServe()\n}\n\ntype httpCache struct {\n\taddr string\n\tcache Cache\n\tensureSpacer EnsureSpacer\n\tongoingUploads map[string]*sync.Mutex\n\tongoingUploadsMux *sync.Mutex\n}\n\n\/\/ NewHTTPCache ...\nfunc NewHTTPCache(listenAddr string, cacheDir string, maxBytes int64, ensureSpacer EnsureSpacer) HTTPCache {\n\tensureCacheDir(cacheDir)\n\tcache := NewCache(cacheDir, maxBytes)\n\tloadFilesIntoCache(cache)\n\treturn &httpCache{listenAddr, cache, ensureSpacer, make(map[string]*sync.Mutex), &sync.Mutex{}}\n}\n\n\/\/ Serve ...\nfunc (h *httpCache) Serve() {\n\ts := &http.Server{\n\t\tAddr: h.addr,\n\t\tHandler: h,\n\t}\n\tlog.Fatal(s.ListenAndServe())\n}\n\nfunc ensureCacheDir(path string) {\n\td, err := os.Open(path)\n\tif err != nil {\n\t\terr := os.MkdirAll(path, os.FileMode(0644))\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\td.Close()\n}\n\nfunc loadFilesIntoCache(cache Cache) {\n\tfilepath.Walk(cache.Dir(), func(name string, info os.FileInfo, err error) error {\n\t\tif !info.IsDir() {\n\t\t\tcache.AddFile(filepath.Base(name), info.Size())\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (h *httpCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tparts, err := parseURL(r.URL.Path)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar hash string\n\tvar verifyHash bool\n\tif len(parts) == 1 {\n\t\t\/\/ For backwards compatibiliy with older Bazel version's that don't\n\t\t\/\/ support {cas,actioncache} prefixes.\n\t\tverifyHash = false\n\t\thash = parts[0]\n\t} else {\n\t\tverifyHash = parts[0] == \"cas\/\"\n\t\thash = parts[1]\n\t}\n\n\tswitch m := r.Method; m {\n\tcase http.MethodGet:\n\t\tif !h.cache.ContainsFile(hash) {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\thttp.ServeFile(w, r, h.filePath(hash))\n\tcase http.MethodPut:\n\t\tif h.cache.ContainsFile(hash) {\n\t\t\th.discardUpload(w, r.Body)\n\t\t\treturn\n\t\t}\n\t\tuploadMux := h.startUpload(hash)\n\t\tuploadMux.Lock()\n\t\tdefer h.stopUpload(hash)\n\t\tdefer uploadMux.Unlock()\n\t\tif h.cache.ContainsFile(hash) {\n\t\t\th.discardUpload(w, r.Body)\n\t\t\treturn\n\t\t}\n\t\tif !h.ensureSpacer.EnsureSpace(h.cache, r.ContentLength) {\n\t\t\thttp.Error(w, \"The disk is full. File could not be uploaded.\",\n\t\t\t\thttp.StatusInsufficientStorage)\n\t\t\treturn\n\t\t}\n\t\twritten, err := h.saveToDisk(r.Body, hash, verifyHash)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\th.cache.AddFile(hash, written)\n\t\tw.WriteHeader(http.StatusOK)\n\tcase http.MethodHead:\n\t\tif !h.cache.ContainsFile(hash) {\n\t\t\thttp.Error(w, err.Error(), http.StatusNotFound)\n\t\t}\n\t\tw.WriteHeader(http.StatusOK)\n\tdefault:\n\t\tmsg := fmt.Sprintf(\"Method '%s' not supported.\", m)\n\t\thttp.Error(w, msg, http.StatusMethodNotAllowed)\n\t}\n}\n\nfunc (h *httpCache) startUpload(hash string) *sync.Mutex {\n\th.ongoingUploadsMux.Lock()\n\tdefer h.ongoingUploadsMux.Unlock()\n\tmux, ok := h.ongoingUploads[hash]\n\tif !ok {\n\t\tmux = &sync.Mutex{}\n\t\th.ongoingUploads[hash] = mux\n\t\treturn mux\n\t}\n\treturn mux\n}\n\nfunc (h *httpCache) stopUpload(hash string) {\n\th.ongoingUploadsMux.Lock()\n\tdefer h.ongoingUploadsMux.Unlock()\n\tdelete(h.ongoingUploads, hash)\n}\n\nfunc (h *httpCache) discardUpload(w http.ResponseWriter, r io.Reader) {\n\tio.Copy(ioutil.Discard, r)\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc parseURL(url string) ([]string, error) {\n\tm := blobNameSHA256.FindStringSubmatch(url)\n\tif m == nil {\n\t\tmsg := fmt.Sprintf(\"Resource name must be a SHA256 hash in hex. \"+\n\t\t\t\"Got '%s'.\", url)\n\t\treturn nil, errors.New(msg)\n\t}\n\treturn m[1:], nil\n}\n\nfunc (h *httpCache) saveToDisk(content io.Reader, hash string, verifyHash bool) (written int64, err error) {\n\tf, err := ioutil.TempFile(h.cache.Dir(), \"upload\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\ttmpName := f.Name()\n\tif verifyHash {\n\t\thasher := sha256.New()\n\t\twritten, err = io.Copy(io.MultiWriter(f, hasher), content)\n\t\tactualHash := hex.EncodeToString(hasher.Sum(nil))\n\t\tif hash != actualHash {\n\t\t\tos.Remove(tmpName)\n\t\t\tmsg := fmt.Sprintf(\"Hashes don't match. Provided '%s', Actual '%s'.\",\n\t\t\t\thash, actualHash)\n\t\t\treturn 0, errors.New(msg)\n\t\t}\n\t} else {\n\t\twritten, err = io.Copy(f, content)\n\t}\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\terr = f.Sync()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tf.Close()\n\terr2 := os.Rename(tmpName, h.filePath(hash))\n\tif err2 != nil {\n\t\treturn 0, err2\n\t}\n\treturn written, nil\n}\n\nfunc (h httpCache) filePath(hash string) string {\n\treturn fmt.Sprintf(\"%s%c%s\", h.cache.Dir(), os.PathSeparator, hash)\n}\ncache\/http: fix potential XSS vulnerabilitypackage cache\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"html\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"sync\"\n)\n\nvar blobNameSHA256 = regexp.MustCompile(\"^\/?(ac\/|cas\/)?([a-f0-9]{64})$\")\n\n\/\/ HTTPCache ...\ntype HTTPCache interface {\n\tServe()\n}\n\ntype httpCache struct {\n\taddr string\n\tcache Cache\n\tensureSpacer EnsureSpacer\n\tongoingUploads map[string]*sync.Mutex\n\tongoingUploadsMux *sync.Mutex\n}\n\n\/\/ NewHTTPCache ...\nfunc NewHTTPCache(listenAddr string, cacheDir string, maxBytes int64, ensureSpacer EnsureSpacer) HTTPCache {\n\tensureCacheDir(cacheDir)\n\tcache := NewCache(cacheDir, maxBytes)\n\tloadFilesIntoCache(cache)\n\treturn &httpCache{listenAddr, cache, ensureSpacer, make(map[string]*sync.Mutex), &sync.Mutex{}}\n}\n\n\/\/ Serve ...\nfunc (h *httpCache) Serve() {\n\ts := &http.Server{\n\t\tAddr: h.addr,\n\t\tHandler: h,\n\t}\n\tlog.Fatal(s.ListenAndServe())\n}\n\nfunc ensureCacheDir(path string) {\n\td, err := os.Open(path)\n\tif err != nil {\n\t\terr := os.MkdirAll(path, os.FileMode(0644))\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\td.Close()\n}\n\nfunc loadFilesIntoCache(cache Cache) {\n\tfilepath.Walk(cache.Dir(), func(name string, info os.FileInfo, err error) error {\n\t\tif !info.IsDir() {\n\t\t\tcache.AddFile(filepath.Base(name), info.Size())\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (h *httpCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tparts, err := parseURL(r.URL.Path)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar hash string\n\tvar verifyHash bool\n\tif len(parts) == 1 {\n\t\t\/\/ For backwards compatibiliy with older Bazel version's that don't\n\t\t\/\/ support {cas,actioncache} prefixes.\n\t\tverifyHash = false\n\t\thash = parts[0]\n\t} else {\n\t\tverifyHash = parts[0] == \"cas\/\"\n\t\thash = parts[1]\n\t}\n\n\tswitch m := r.Method; m {\n\tcase http.MethodGet:\n\t\tif !h.cache.ContainsFile(hash) {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\thttp.ServeFile(w, r, h.filePath(hash))\n\tcase http.MethodPut:\n\t\tif h.cache.ContainsFile(hash) {\n\t\t\th.discardUpload(w, r.Body)\n\t\t\treturn\n\t\t}\n\t\tuploadMux := h.startUpload(hash)\n\t\tuploadMux.Lock()\n\t\tdefer h.stopUpload(hash)\n\t\tdefer uploadMux.Unlock()\n\t\tif h.cache.ContainsFile(hash) {\n\t\t\th.discardUpload(w, r.Body)\n\t\t\treturn\n\t\t}\n\t\tif !h.ensureSpacer.EnsureSpace(h.cache, r.ContentLength) {\n\t\t\thttp.Error(w, \"The disk is full. File could not be uploaded.\",\n\t\t\t\thttp.StatusInsufficientStorage)\n\t\t\treturn\n\t\t}\n\t\twritten, err := h.saveToDisk(r.Body, hash, verifyHash)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\th.cache.AddFile(hash, written)\n\t\tw.WriteHeader(http.StatusOK)\n\tcase http.MethodHead:\n\t\tif !h.cache.ContainsFile(hash) {\n\t\t\thttp.Error(w, err.Error(), http.StatusNotFound)\n\t\t}\n\t\tw.WriteHeader(http.StatusOK)\n\tdefault:\n\t\tmsg := fmt.Sprintf(\"Method '%s' not supported.\", html.EscapeString(m))\n\t\thttp.Error(w, msg, http.StatusMethodNotAllowed)\n\t}\n}\n\nfunc (h *httpCache) startUpload(hash string) *sync.Mutex {\n\th.ongoingUploadsMux.Lock()\n\tdefer h.ongoingUploadsMux.Unlock()\n\tmux, ok := h.ongoingUploads[hash]\n\tif !ok {\n\t\tmux = &sync.Mutex{}\n\t\th.ongoingUploads[hash] = mux\n\t\treturn mux\n\t}\n\treturn mux\n}\n\nfunc (h *httpCache) stopUpload(hash string) {\n\th.ongoingUploadsMux.Lock()\n\tdefer h.ongoingUploadsMux.Unlock()\n\tdelete(h.ongoingUploads, hash)\n}\n\nfunc (h *httpCache) discardUpload(w http.ResponseWriter, r io.Reader) {\n\tio.Copy(ioutil.Discard, r)\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc parseURL(url string) ([]string, error) {\n\tm := blobNameSHA256.FindStringSubmatch(url)\n\tif m == nil {\n\t\tmsg := fmt.Sprintf(\"Resource name must be a SHA256 hash in hex. \"+\n\t\t\t\"Got '%s'.\", html.EscapeString(url))\n\t\treturn nil, errors.New(msg)\n\t}\n\treturn m[1:], nil\n}\n\nfunc (h *httpCache) saveToDisk(content io.Reader, hash string, verifyHash bool) (written int64, err error) {\n\tf, err := ioutil.TempFile(h.cache.Dir(), \"upload\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\ttmpName := f.Name()\n\tif verifyHash {\n\t\thasher := sha256.New()\n\t\twritten, err = io.Copy(io.MultiWriter(f, hasher), content)\n\t\tactualHash := hex.EncodeToString(hasher.Sum(nil))\n\t\tif hash != actualHash {\n\t\t\tos.Remove(tmpName)\n\t\t\tmsg := fmt.Sprintf(\"Hashes don't match. Provided '%s', Actual '%s'.\",\n\t\t\t\thash, html.EscapeString(actualHash))\n\t\t\treturn 0, errors.New(msg)\n\t\t}\n\t} else {\n\t\twritten, err = io.Copy(f, content)\n\t}\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\terr = f.Sync()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tf.Close()\n\terr2 := os.Rename(tmpName, h.filePath(hash))\n\tif err2 != nil {\n\t\treturn 0, err2\n\t}\n\treturn written, nil\n}\n\nfunc (h httpCache) filePath(hash string) string {\n\treturn fmt.Sprintf(\"%s%c%s\", h.cache.Dir(), os.PathSeparator, hash)\n}\n<|endoftext|>"} {"text":"package nsf\n\ntype Apu struct {\n\tS1, S2 Square\n\tTriangle\n\n\tOdd bool\n\tFC byte\n\tFT byte\n\tIrqDisable bool\n}\n\ntype Triangle struct {\n\tLinear\n\tTimer\n\tLength\n\tSI int \/\/ sequence index\n\n\tEnable bool\n}\n\ntype Linear struct {\n\tReload byte\n\tHalt bool\n\tFlag bool\n\tCounter byte\n}\n\ntype Square struct {\n\tEnvelope\n\tTimer\n\tLength\n\tSweep\n\tDuty\n\n\tEnable bool\n}\n\ntype Duty struct {\n\tType byte\n\tCounter byte\n}\n\ntype Sweep struct {\n\tShift byte\n\tNegate bool\n\tPeriod byte\n\tEnable bool\n\tDivider byte\n\tReset bool\n\tNegOffset int\n}\n\ntype Envelope struct {\n\tVolume byte\n\tDivider byte\n\tCounter byte\n\tLoop bool\n\tConstant bool\n\tStart bool\n}\n\ntype Timer struct {\n\tTick uint16\n\tLength uint16\n}\n\ntype Length struct {\n\tHalt bool\n\tCounter byte\n}\n\nfunc (a *Apu) Init() {\n\ta.S1.Sweep.NegOffset = -1\n\tfor i := uint16(0x4000); i <= 0x400f; i++ {\n\t\ta.Write(i, 0)\n\t}\n\ta.Write(0x4010, 0x10)\n\ta.Write(0x4011, 0)\n\ta.Write(0x4012, 0)\n\ta.Write(0x4013, 0)\n\ta.Write(0x4015, 0xf)\n\ta.Write(0x4017, 0)\n}\n\nfunc (a *Apu) Write(v uint16, b byte) {\n\tswitch v & 0xff {\n\tcase 0x00:\n\t\ta.S1.Control1(b)\n\tcase 0x01:\n\t\ta.S1.Control2(b)\n\tcase 0x02:\n\t\ta.S1.Control3(b)\n\tcase 0x03:\n\t\ta.S1.Control4(b)\n\tcase 0x04:\n\t\ta.S2.Control1(b)\n\tcase 0x05:\n\t\ta.S2.Control2(b)\n\tcase 0x06:\n\t\ta.S2.Control3(b)\n\tcase 0x07:\n\t\ta.S2.Control4(b)\n\tcase 0x08:\n\t\ta.Triangle.Control1(b)\n\tcase 0x0a:\n\t\ta.Triangle.Control2(b)\n\tcase 0x0b:\n\t\ta.Triangle.Control3(b)\n\tcase 0x15:\n\t\ta.S1.Disable(b&0x1 == 0)\n\t\ta.S2.Disable(b&0x2 == 0)\n\t\ta.Triangle.Disable(b&0x4 == 0)\n\tcase 0x17:\n\t\ta.FT = 0\n\t\tif b&0x80 != 0 {\n\t\t\ta.FC = 5\n\t\t\ta.FrameStep()\n\t\t} else {\n\t\t\ta.FC = 4\n\t\t}\n\t\ta.IrqDisable = b&0x40 != 0\n\t}\n}\n\nfunc (t *Triangle) Control1(b byte) {\n\tt.Linear.Control(b)\n\tt.Length.Halt = b&0x80 != 0\n}\n\nfunc (l *Linear) Control(b byte) {\n\tl.Flag = b&0x80 != 0\n\tl.Reload = b & 0x7f\n}\n\nfunc (t *Triangle) Control2(b byte) {\n\tt.Timer.Length &= 0xff00\n\tt.Timer.Length |= uint16(b)\n}\n\nfunc (t *Triangle) Control3(b byte) {\n\tt.Timer.Length &= 0xff\n\tt.Timer.Length |= uint16(b&0x7) << 8\n\tt.Length.Set(b >> 3)\n\tt.Linear.Halt = true\n}\n\nfunc (s *Square) Control1(b byte) {\n\ts.Envelope.Control(b)\n\ts.Duty.Control(b)\n\ts.Length.Halt = b&0x20 != 0\n}\n\nfunc (s *Square) Control2(b byte) {\n\ts.Sweep.Control(b)\n}\n\nfunc (s *Square) Control3(b byte) {\n\ts.Timer.Length &= 0xff00\n\ts.Timer.Length |= uint16(b)\n}\n\nfunc (s *Square) Control4(b byte) {\n\ts.Timer.Length &= 0xff\n\ts.Timer.Length |= uint16(b&0x7) << 8\n\ts.Length.Set(b >> 3)\n\n\ts.Envelope.Start = true\n\ts.Duty.Counter = 0\n}\n\nfunc (d *Duty) Control(b byte) {\n\td.Type = b >> 6\n}\n\nfunc (s *Sweep) Control(b byte) {\n\ts.Shift = b & 0x7\n\ts.Negate = b&0x8 != 0\n\ts.Period = (b >> 4) & 0x7\n\ts.Enable = b&0x80 != 0\n\ts.Reset = true\n}\n\nfunc (e *Envelope) Control(b byte) {\n\te.Volume = b & 0xf\n\te.Constant = b&0x10 != 0\n\te.Loop = b&0x20 != 0\n}\n\nfunc (l *Length) Set(b byte) {\n\tif !l.Halt {\n\t\tl.Counter = LenLookup[b]\n\t}\n}\n\nfunc (l *Length) Enabled() bool {\n\treturn l.Counter != 0\n}\n\nfunc (s *Square) Disable(b bool) {\n\ts.Enable = !b\n\tif b {\n\t\ts.Length.Counter = 0\n\t}\n}\n\nfunc (t *Triangle) Disable(b bool) {\n\tt.Enable = !b\n\tif b {\n\t\tt.Length.Counter = 0\n\t}\n}\n\nfunc (a *Apu) Read(v uint16) byte {\n\tvar b byte\n\tif v == 0x4015 {\n\t\tif a.S1.Length.Counter > 0 {\n\t\t\tb |= 0x1\n\t\t}\n\t\tif a.S2.Length.Counter > 0 {\n\t\t\tb |= 0x2\n\t\t}\n\t\tif a.Triangle.Length.Counter > 0 {\n\t\t\tb |= 0x4\n\t\t}\n\t}\n\treturn b\n}\n\nfunc (d *Duty) Clock() {\n\tif d.Counter == 0 {\n\t\td.Counter = 7\n\t} else {\n\t\td.Counter--\n\t}\n}\n\nfunc (s *Sweep) Clock() (r bool) {\n\tif s.Divider == 0 {\n\t\ts.Divider = s.Period\n\t\tr = true\n\t} else {\n\t\ts.Divider--\n\t}\n\tif s.Reset {\n\t\ts.Divider = 0\n\t\ts.Reset = false\n\t}\n\treturn\n}\n\nfunc (e *Envelope) Clock() {\n\tif e.Start {\n\t\te.Start = false\n\t\te.Counter = 15\n\t} else {\n\t\tif e.Divider == 0 {\n\t\t\te.Divider = e.Volume\n\t\t\tif e.Counter != 0 {\n\t\t\t\te.Counter--\n\t\t\t} else if e.Loop {\n\t\t\t\te.Counter = 15\n\t\t\t}\n\t\t} else {\n\t\t\te.Divider--\n\t\t}\n\t}\n}\n\nfunc (t *Timer) Clock() bool {\n\tif t.Tick == 0 {\n\t\tt.Tick = t.Length\n\t} else {\n\t\tt.Tick--\n\t}\n\treturn t.Tick == t.Length\n}\n\nfunc (s *Square) Clock() {\n\tif s.Timer.Clock() {\n\t\ts.Duty.Clock()\n\t}\n}\n\nfunc (t *Triangle) Clock() {\n\tif t.Timer.Clock() && t.Length.Counter > 0 && t.Linear.Counter > 0 {\n\t\tif t.SI == 31 {\n\t\t\tt.SI = 0\n\t\t} else {\n\t\t\tt.SI++\n\t\t}\n\t}\n}\n\nfunc (a *Apu) Step() {\n\tif a.Odd {\n\t\tif a.S1.Enable {\n\t\t\ta.S1.Clock()\n\t\t}\n\t\tif a.S2.Enable {\n\t\t\ta.S2.Clock()\n\t\t}\n\t}\n\ta.Odd = !a.Odd\n\tif a.Triangle.Enable {\n\t\ta.Triangle.Clock()\n\t}\n}\n\nfunc (a *Apu) FrameStep() {\n\ta.FT++\n\tif a.FT == a.FC {\n\t\ta.FT = 0\n\t}\n\tif a.FT <= 3 {\n\t\ta.S1.Envelope.Clock()\n\t\ta.Triangle.Linear.Clock()\n\t}\n\tif a.FT == 1 || a.FT == 3 {\n\t\ta.S1.FrameStep()\n\t\ta.S2.FrameStep()\n\t\ta.Triangle.Length.Clock()\n\t}\n\tif a.FC == 4 && a.FT == 3 && !a.IrqDisable {\n\t\t\/\/ todo: assert cpu irq line\n\t}\n}\n\nfunc (l *Linear) Clock() {\n\tif l.Halt {\n\t\tl.Counter = l.Reload\n\t} else if l.Counter != 0 {\n\t\tl.Counter--\n\t}\n\tif !l.Flag {\n\t\tl.Halt = false\n\t}\n}\n\nfunc (s *Square) FrameStep() {\n\ts.Length.Clock()\n\tif s.Sweep.Clock() && s.Sweep.Enable && s.Sweep.Shift > 0 {\n\t\tr := s.SweepResult()\n\t\tif r <= 0x7ff {\n\t\t\ts.Timer.Tick = r\n\t\t}\n\t}\n}\n\nfunc (l *Length) Clock() {\n\tif !l.Halt && l.Counter > 0 {\n\t\tl.Counter--\n\t}\n}\n\nfunc (a *Apu) Volume() float32 {\n\tp := PulseOut[a.S1.Volume()+a.S2.Volume()]\n\tt := TndOut[3*a.Triangle.Volume()]\n\treturn p + t\n}\n\nfunc (t *Triangle) Volume() uint8 {\n\tif t.Enable && t.Linear.Counter > 0 && t.Length.Counter > 0 {\n\t\treturn TriLookup[t.SI]\n\t}\n\treturn 0\n}\n\nfunc (s *Square) Volume() uint8 {\n\tif s.Enable && s.Duty.Enabled() && s.Length.Enabled() && s.Timer.Tick >= 8 && s.SweepResult() <= 0x7ff {\n\t\treturn s.Envelope.Output()\n\t}\n\treturn 0\n}\n\nfunc (e *Envelope) Output() byte {\n\tif e.Constant {\n\t\treturn e.Volume\n\t}\n\treturn e.Counter\n}\n\nfunc (s *Square) SweepResult() uint16 {\n\tr := int(s.Timer.Tick >> s.Sweep.Shift)\n\tif s.Sweep.Negate {\n\t\tr = -r\n\t}\n\tr += int(s.Timer.Tick)\n\tif r > 0x7ff {\n\t\tr = 0x800\n\t}\n\treturn uint16(r)\n}\n\nfunc (d *Duty) Enabled() bool {\n\treturn DutyCycle[d.Type][d.Counter] == 1\n}\n\nvar (\n\tPulseOut [31]float32\n\tTndOut [203]float32\n\tDutyCycle = [4][8]byte{\n\t\t{0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 1, 1, 0, 0, 0, 0, 0},\n\t\t{0, 1, 1, 1, 1, 0, 0, 0},\n\t\t{1, 0, 0, 1, 1, 1, 1, 1},\n\t}\n\tLenLookup = [...]byte{\n\t\t0x0a, 0xfe, 0x14, 0x02,\n\t\t0x28, 0x04, 0x50, 0x06,\n\t\t0xa0, 0x08, 0x3c, 0x0a,\n\t\t0x0e, 0x0c, 0x1a, 0x0e,\n\t\t0x0c, 0x10, 0x18, 0x12,\n\t\t0x30, 0x14, 0x60, 0x16,\n\t\t0xc0, 0x18, 0x48, 0x1a,\n\t\t0x10, 0x1c, 0x20, 0x1e,\n\t}\n\tTriLookup = [...]byte{\n\t\t0xF, 0xE, 0xD, 0xC,\n\t\t0xB, 0xA, 0x9, 0x8,\n\t\t0x7, 0x6, 0x5, 0x4,\n\t\t0x3, 0x2, 0x1, 0x0,\n\t\t0x0, 0x1, 0x2, 0x3,\n\t\t0x4, 0x5, 0x6, 0x7,\n\t\t0x8, 0x9, 0xA, 0xB,\n\t\t0xC, 0xD, 0xE, 0xF,\n\t}\n)\n\nfunc init() {\n\tfor i := range PulseOut {\n\t\tPulseOut[i] = 95.88 \/ (8128\/float32(i) + 100)\n\t}\n\tfor i := range TndOut {\n\t\tTndOut[i] = 163.67 \/ (24329\/float32(i) + 100)\n\t}\n}\nAllow setting of length counter when haltedpackage nsf\n\ntype Apu struct {\n\tS1, S2 Square\n\tTriangle\n\n\tOdd bool\n\tFC byte\n\tFT byte\n\tIrqDisable bool\n}\n\ntype Triangle struct {\n\tLinear\n\tTimer\n\tLength\n\tSI int \/\/ sequence index\n\n\tEnable bool\n}\n\ntype Linear struct {\n\tReload byte\n\tHalt bool\n\tFlag bool\n\tCounter byte\n}\n\ntype Square struct {\n\tEnvelope\n\tTimer\n\tLength\n\tSweep\n\tDuty\n\n\tEnable bool\n}\n\ntype Duty struct {\n\tType byte\n\tCounter byte\n}\n\ntype Sweep struct {\n\tShift byte\n\tNegate bool\n\tPeriod byte\n\tEnable bool\n\tDivider byte\n\tReset bool\n\tNegOffset int\n}\n\ntype Envelope struct {\n\tVolume byte\n\tDivider byte\n\tCounter byte\n\tLoop bool\n\tConstant bool\n\tStart bool\n}\n\ntype Timer struct {\n\tTick uint16\n\tLength uint16\n}\n\ntype Length struct {\n\tHalt bool\n\tCounter byte\n}\n\nfunc (a *Apu) Init() {\n\ta.S1.Sweep.NegOffset = -1\n\tfor i := uint16(0x4000); i <= 0x400f; i++ {\n\t\ta.Write(i, 0)\n\t}\n\ta.Write(0x4010, 0x10)\n\ta.Write(0x4011, 0)\n\ta.Write(0x4012, 0)\n\ta.Write(0x4013, 0)\n\ta.Write(0x4015, 0xf)\n\ta.Write(0x4017, 0)\n}\n\nfunc (a *Apu) Write(v uint16, b byte) {\n\tswitch v & 0xff {\n\tcase 0x00:\n\t\ta.S1.Control1(b)\n\tcase 0x01:\n\t\ta.S1.Control2(b)\n\tcase 0x02:\n\t\ta.S1.Control3(b)\n\tcase 0x03:\n\t\ta.S1.Control4(b)\n\tcase 0x04:\n\t\ta.S2.Control1(b)\n\tcase 0x05:\n\t\ta.S2.Control2(b)\n\tcase 0x06:\n\t\ta.S2.Control3(b)\n\tcase 0x07:\n\t\ta.S2.Control4(b)\n\tcase 0x08:\n\t\ta.Triangle.Control1(b)\n\tcase 0x0a:\n\t\ta.Triangle.Control2(b)\n\tcase 0x0b:\n\t\ta.Triangle.Control3(b)\n\tcase 0x15:\n\t\ta.S1.Disable(b&0x1 == 0)\n\t\ta.S2.Disable(b&0x2 == 0)\n\t\ta.Triangle.Disable(b&0x4 == 0)\n\tcase 0x17:\n\t\ta.FT = 0\n\t\tif b&0x80 != 0 {\n\t\t\ta.FC = 5\n\t\t\ta.FrameStep()\n\t\t} else {\n\t\t\ta.FC = 4\n\t\t}\n\t\ta.IrqDisable = b&0x40 != 0\n\t}\n}\n\nfunc (t *Triangle) Control1(b byte) {\n\tt.Linear.Control(b)\n\tt.Length.Halt = b&0x80 != 0\n}\n\nfunc (l *Linear) Control(b byte) {\n\tl.Flag = b&0x80 != 0\n\tl.Reload = b & 0x7f\n}\n\nfunc (t *Triangle) Control2(b byte) {\n\tt.Timer.Length &= 0xff00\n\tt.Timer.Length |= uint16(b)\n}\n\nfunc (t *Triangle) Control3(b byte) {\n\tt.Timer.Length &= 0xff\n\tt.Timer.Length |= uint16(b&0x7) << 8\n\tt.Length.Set(b >> 3)\n\tt.Linear.Halt = true\n}\n\nfunc (s *Square) Control1(b byte) {\n\ts.Envelope.Control(b)\n\ts.Duty.Control(b)\n\ts.Length.Halt = b&0x20 != 0\n}\n\nfunc (s *Square) Control2(b byte) {\n\ts.Sweep.Control(b)\n}\n\nfunc (s *Square) Control3(b byte) {\n\ts.Timer.Length &= 0xff00\n\ts.Timer.Length |= uint16(b)\n}\n\nfunc (s *Square) Control4(b byte) {\n\ts.Timer.Length &= 0xff\n\ts.Timer.Length |= uint16(b&0x7) << 8\n\ts.Length.Set(b >> 3)\n\n\ts.Envelope.Start = true\n\ts.Duty.Counter = 0\n}\n\nfunc (d *Duty) Control(b byte) {\n\td.Type = b >> 6\n}\n\nfunc (s *Sweep) Control(b byte) {\n\ts.Shift = b & 0x7\n\ts.Negate = b&0x8 != 0\n\ts.Period = (b >> 4) & 0x7\n\ts.Enable = b&0x80 != 0\n\ts.Reset = true\n}\n\nfunc (e *Envelope) Control(b byte) {\n\te.Volume = b & 0xf\n\te.Constant = b&0x10 != 0\n\te.Loop = b&0x20 != 0\n}\n\nfunc (l *Length) Set(b byte) {\n\tl.Counter = LenLookup[b]\n}\n\nfunc (l *Length) Enabled() bool {\n\treturn l.Counter != 0\n}\n\nfunc (s *Square) Disable(b bool) {\n\ts.Enable = !b\n\tif b {\n\t\ts.Length.Counter = 0\n\t}\n}\n\nfunc (t *Triangle) Disable(b bool) {\n\tt.Enable = !b\n\tif b {\n\t\tt.Length.Counter = 0\n\t}\n}\n\nfunc (a *Apu) Read(v uint16) byte {\n\tvar b byte\n\tif v == 0x4015 {\n\t\tif a.S1.Length.Counter > 0 {\n\t\t\tb |= 0x1\n\t\t}\n\t\tif a.S2.Length.Counter > 0 {\n\t\t\tb |= 0x2\n\t\t}\n\t\tif a.Triangle.Length.Counter > 0 {\n\t\t\tb |= 0x4\n\t\t}\n\t}\n\treturn b\n}\n\nfunc (d *Duty) Clock() {\n\tif d.Counter == 0 {\n\t\td.Counter = 7\n\t} else {\n\t\td.Counter--\n\t}\n}\n\nfunc (s *Sweep) Clock() (r bool) {\n\tif s.Divider == 0 {\n\t\ts.Divider = s.Period\n\t\tr = true\n\t} else {\n\t\ts.Divider--\n\t}\n\tif s.Reset {\n\t\ts.Divider = 0\n\t\ts.Reset = false\n\t}\n\treturn\n}\n\nfunc (e *Envelope) Clock() {\n\tif e.Start {\n\t\te.Start = false\n\t\te.Counter = 15\n\t} else {\n\t\tif e.Divider == 0 {\n\t\t\te.Divider = e.Volume\n\t\t\tif e.Counter != 0 {\n\t\t\t\te.Counter--\n\t\t\t} else if e.Loop {\n\t\t\t\te.Counter = 15\n\t\t\t}\n\t\t} else {\n\t\t\te.Divider--\n\t\t}\n\t}\n}\n\nfunc (t *Timer) Clock() bool {\n\tif t.Tick == 0 {\n\t\tt.Tick = t.Length\n\t} else {\n\t\tt.Tick--\n\t}\n\treturn t.Tick == t.Length\n}\n\nfunc (s *Square) Clock() {\n\tif s.Timer.Clock() {\n\t\ts.Duty.Clock()\n\t}\n}\n\nfunc (t *Triangle) Clock() {\n\tif t.Timer.Clock() && t.Length.Counter > 0 && t.Linear.Counter > 0 {\n\t\tif t.SI == 31 {\n\t\t\tt.SI = 0\n\t\t} else {\n\t\t\tt.SI++\n\t\t}\n\t}\n}\n\nfunc (a *Apu) Step() {\n\tif a.Odd {\n\t\tif a.S1.Enable {\n\t\t\ta.S1.Clock()\n\t\t}\n\t\tif a.S2.Enable {\n\t\t\ta.S2.Clock()\n\t\t}\n\t}\n\ta.Odd = !a.Odd\n\tif a.Triangle.Enable {\n\t\ta.Triangle.Clock()\n\t}\n}\n\nfunc (a *Apu) FrameStep() {\n\ta.FT++\n\tif a.FT == a.FC {\n\t\ta.FT = 0\n\t}\n\tif a.FT <= 3 {\n\t\ta.S1.Envelope.Clock()\n\t\ta.Triangle.Linear.Clock()\n\t}\n\tif a.FT == 1 || a.FT == 3 {\n\t\ta.S1.FrameStep()\n\t\ta.S2.FrameStep()\n\t\ta.Triangle.Length.Clock()\n\t}\n\tif a.FC == 4 && a.FT == 3 && !a.IrqDisable {\n\t\t\/\/ todo: assert cpu irq line\n\t}\n}\n\nfunc (l *Linear) Clock() {\n\tif l.Halt {\n\t\tl.Counter = l.Reload\n\t} else if l.Counter != 0 {\n\t\tl.Counter--\n\t}\n\tif !l.Flag {\n\t\tl.Halt = false\n\t}\n}\n\nfunc (s *Square) FrameStep() {\n\ts.Length.Clock()\n\tif s.Sweep.Clock() && s.Sweep.Enable && s.Sweep.Shift > 0 {\n\t\tr := s.SweepResult()\n\t\tif r <= 0x7ff {\n\t\t\ts.Timer.Tick = r\n\t\t}\n\t}\n}\n\nfunc (l *Length) Clock() {\n\tif !l.Halt && l.Counter > 0 {\n\t\tl.Counter--\n\t}\n}\n\nfunc (a *Apu) Volume() float32 {\n\tp := PulseOut[a.S1.Volume()+a.S2.Volume()]\n\tt := TndOut[3*a.Triangle.Volume()]\n\treturn p + t\n}\n\nfunc (t *Triangle) Volume() uint8 {\n\tif t.Enable && t.Linear.Counter > 0 && t.Length.Counter > 0 {\n\t\treturn TriLookup[t.SI]\n\t}\n\treturn 0\n}\n\nfunc (s *Square) Volume() uint8 {\n\tif s.Enable && s.Duty.Enabled() && s.Length.Enabled() && s.Timer.Tick >= 8 && s.SweepResult() <= 0x7ff {\n\t\treturn s.Envelope.Output()\n\t}\n\treturn 0\n}\n\nfunc (e *Envelope) Output() byte {\n\tif e.Constant {\n\t\treturn e.Volume\n\t}\n\treturn e.Counter\n}\n\nfunc (s *Square) SweepResult() uint16 {\n\tr := int(s.Timer.Tick >> s.Sweep.Shift)\n\tif s.Sweep.Negate {\n\t\tr = -r\n\t}\n\tr += int(s.Timer.Tick)\n\tif r > 0x7ff {\n\t\tr = 0x800\n\t}\n\treturn uint16(r)\n}\n\nfunc (d *Duty) Enabled() bool {\n\treturn DutyCycle[d.Type][d.Counter] == 1\n}\n\nvar (\n\tPulseOut [31]float32\n\tTndOut [203]float32\n\tDutyCycle = [4][8]byte{\n\t\t{0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 1, 1, 0, 0, 0, 0, 0},\n\t\t{0, 1, 1, 1, 1, 0, 0, 0},\n\t\t{1, 0, 0, 1, 1, 1, 1, 1},\n\t}\n\tLenLookup = [...]byte{\n\t\t0x0a, 0xfe, 0x14, 0x02,\n\t\t0x28, 0x04, 0x50, 0x06,\n\t\t0xa0, 0x08, 0x3c, 0x0a,\n\t\t0x0e, 0x0c, 0x1a, 0x0e,\n\t\t0x0c, 0x10, 0x18, 0x12,\n\t\t0x30, 0x14, 0x60, 0x16,\n\t\t0xc0, 0x18, 0x48, 0x1a,\n\t\t0x10, 0x1c, 0x20, 0x1e,\n\t}\n\tTriLookup = [...]byte{\n\t\t0xF, 0xE, 0xD, 0xC,\n\t\t0xB, 0xA, 0x9, 0x8,\n\t\t0x7, 0x6, 0x5, 0x4,\n\t\t0x3, 0x2, 0x1, 0x0,\n\t\t0x0, 0x1, 0x2, 0x3,\n\t\t0x4, 0x5, 0x6, 0x7,\n\t\t0x8, 0x9, 0xA, 0xB,\n\t\t0xC, 0xD, 0xE, 0xF,\n\t}\n)\n\nfunc init() {\n\tfor i := range PulseOut {\n\t\tPulseOut[i] = 95.88 \/ (8128\/float32(i) + 100)\n\t}\n\tfor i := range TndOut {\n\t\tTndOut[i] = 163.67 \/ (24329\/float32(i) + 100)\n\t}\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/perf-tests\/clusterloader2\/api\"\n\t\"k8s.io\/perf-tests\/clusterloader2\/pkg\/framework\"\n\t\"k8s.io\/perf-tests\/clusterloader2\/pkg\/state\"\n\t\"k8s.io\/perf-tests\/clusterloader2\/pkg\/util\"\n)\n\nconst (\n\tnamePlaceholder = \"Name\"\n\tindexPlaceholder = \"Index\"\n)\n\ntype simpleTestExecutor struct{}\n\nfunc createSimpleTestExecutor() TestExecutor {\n\treturn &simpleTestExecutor{}\n}\n\n\/\/ ExecuteTest executes test based on provided configuration.\nfunc (ste *simpleTestExecutor) ExecuteTest(ctx Context, conf *api.Config) *util.ErrorList {\n\tdefer cleanupResources(ctx)\n\tctx.GetTickerFactory().Init(conf.TuningSets)\n\tautomanagedNamespacesList, err := ctx.GetFramework().ListAutomanagedNamespaces()\n\tif err != nil {\n\t\treturn util.NewErrorList(fmt.Errorf(\"automanaged namespaces listing failed: %v\", err))\n\t}\n\tif len(automanagedNamespacesList) > 0 {\n\t\treturn util.NewErrorList(fmt.Errorf(\"pre-existing automanaged namespaces found\"))\n\t}\n\terr = ctx.GetFramework().CreateAutomanagedNamespaces(int(conf.AutomanagedNamespaces))\n\tif err != nil {\n\t\treturn util.NewErrorList(fmt.Errorf(\"automanaged namespaces creation failed: %v\", err))\n\t}\n\n\terrList := util.NewErrorList()\n\tfor i := range conf.Steps {\n\t\tif stepErrList := ste.ExecuteStep(ctx, &conf.Steps[i]); !stepErrList.IsEmpty() {\n\t\t\terrList.Concat(stepErrList)\n\t\t\tif isErrsCritical(stepErrList) {\n\t\t\t\treturn errList\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, summary := range ctx.GetMeasurementManager().GetSummaries() {\n\t\tsummaryText, err := summary.PrintSummary()\n\t\tif err != nil {\n\t\t\terrList.Append(fmt.Errorf(\"printing summary %s error: %v\", summary.SummaryName(), err))\n\t\t\tcontinue\n\t\t}\n\t\tif ctx.GetClusterLoaderConfig().ReportDir == \"\" {\n\t\t\tglog.Infof(\"%v: %v\", summary.SummaryName(), summaryText)\n\t\t} else {\n\t\t\t\/\/ TODO(krzysied): Remeber to keep original filename style for backward compatibility.\n\t\t\tfilePath := path.Join(ctx.GetClusterLoaderConfig().ReportDir, summary.SummaryName()+\"_\"+conf.Name+\"_\"+time.Now().Format(time.RFC3339)+\".txt\")\n\t\t\tif err := ioutil.WriteFile(filePath, []byte(summaryText), 0644); err != nil {\n\t\t\t\terrList.Append(fmt.Errorf(\"writing to file %v error: %v\", filePath, err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\treturn errList\n}\n\n\/\/ ExecuteStep executes single test step based on provided step configuration.\nfunc (ste *simpleTestExecutor) ExecuteStep(ctx Context, step *api.Step) *util.ErrorList {\n\tvar wg wait.Group\n\t\/\/ TODO(krzysied): Consider moving lock and errList to separate structure.\n\terrList := util.NewErrorList()\n\tif len(step.Measurements) > 0 {\n\t\tfor i := range step.Measurements {\n\t\t\t\/\/ index is created to make i value unchangeable during thread execution.\n\t\t\tindex := i\n\t\t\twg.Start(func() {\n\t\t\t\terr := ctx.GetMeasurementManager().Execute(step.Measurements[index].Method, step.Measurements[index].Identifier, step.Measurements[index].Params)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrList.Append(fmt.Errorf(\"measurement call %s - %s error: %v\", step.Measurements[index].Method, step.Measurements[index].Identifier, err))\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t} else {\n\t\tfor i := range step.Phases {\n\t\t\tphase := &step.Phases[i]\n\t\t\twg.Start(func() {\n\t\t\t\tif phaseErrList := ste.ExecutePhase(ctx, phase); !phaseErrList.IsEmpty() {\n\t\t\t\t\terrList.Concat(phaseErrList)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n\twg.Wait()\n\treturn errList\n}\n\n\/\/ ExecutePhase executes single test phase based on provided phase configuration.\nfunc (ste *simpleTestExecutor) ExecutePhase(ctx Context, phase *api.Phase) *util.ErrorList {\n\t\/\/ TODO: add tuning set\n\terrList := util.NewErrorList()\n\tnsList := createNamespacesList(phase.NamespaceRange)\n\tticker, err := ctx.GetTickerFactory().CreateTicker(phase.TuningSet)\n\tif err != nil {\n\t\treturn util.NewErrorList(fmt.Errorf(\"ticker creation error: %v\", err))\n\t}\n\tdefer ticker.Stop()\n\tfor _, nsName := range nsList {\n\t\tinstancesStates := make([]*state.InstancesState, 0)\n\t\t\/\/ Updating state (DesiredReplicaCount) of every object in object bundle.\n\t\tfor j := range phase.ObjectBundle {\n\t\t\tid, err := getIdentifier(ctx, &phase.ObjectBundle[j])\n\t\t\tif err != nil {\n\t\t\t\terrList.Append(err)\n\t\t\t\treturn errList\n\t\t\t}\n\t\t\tinstances, exists := ctx.GetState().Get(nsName, id)\n\t\t\tif !exists {\n\t\t\t\tinstances = &state.InstancesState{\n\t\t\t\t\tDesiredReplicaCount: 0,\n\t\t\t\t\tCurrentReplicaCount: 0,\n\t\t\t\t\tObject: phase.ObjectBundle[j],\n\t\t\t\t}\n\t\t\t}\n\t\t\tinstances.DesiredReplicaCount = phase.ReplicasPerNamespace\n\t\t\tctx.GetState().Set(nsName, id, instances)\n\t\t\tinstancesStates = append(instancesStates, instances)\n\t\t}\n\n\t\t\/\/ Calculating maximal replica count of objects from object bundle.\n\t\tvar maxCurrentReplicaCount int32\n\t\tfor j := range instancesStates {\n\t\t\tif instancesStates[j].CurrentReplicaCount > maxCurrentReplicaCount {\n\t\t\t\tmaxCurrentReplicaCount = instancesStates[j].CurrentReplicaCount\n\t\t\t}\n\t\t}\n\t\t\/\/ Deleting objects with index greater or equal requested replicas per namespace number.\n\t\t\/\/ Objects will be deleted in reversed order.\n\t\tfor replicaIndex := phase.ReplicasPerNamespace; replicaIndex < maxCurrentReplicaCount; replicaIndex++ {\n\t\t\tfor j := len(phase.ObjectBundle) - 1; j >= 0; j-- {\n\t\t\t\tif replicaIndex < instancesStates[j].CurrentReplicaCount {\n\t\t\t\t\t<-ticker.C\n\t\t\t\t\tif objectErrList := ste.ExecuteObject(ctx, &phase.ObjectBundle[j], nsName, replicaIndex, DELETE_OBJECT); !objectErrList.IsEmpty() {\n\t\t\t\t\t\terrList.Concat(objectErrList)\n\t\t\t\t\t\tif isErrsCritical(objectErrList) {\n\t\t\t\t\t\t\treturn errList\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\/\/ Handling for update\/create objects.\n\t\tfor replicaIndex := int32(0); replicaIndex < phase.ReplicasPerNamespace; replicaIndex++ {\n\t\t\tfor j := range phase.ObjectBundle {\n\t\t\t\tif instancesStates[j].CurrentReplicaCount == phase.ReplicasPerNamespace {\n\t\t\t\t\t<-ticker.C\n\t\t\t\t\tif objectErrList := ste.ExecuteObject(ctx, &phase.ObjectBundle[j], nsName, replicaIndex, PATCH_OBJECT); !objectErrList.IsEmpty() {\n\t\t\t\t\t\terrList.Concat(objectErrList)\n\t\t\t\t\t\tif isErrsCritical(objectErrList) {\n\t\t\t\t\t\t\treturn errList\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\/\/ If error then skip this bundle\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t} else if replicaIndex >= instancesStates[j].CurrentReplicaCount {\n\t\t\t\t\t<-ticker.C\n\t\t\t\t\tif objectErrList := ste.ExecuteObject(ctx, &phase.ObjectBundle[j], nsName, replicaIndex, CREATE_OBJECT); !objectErrList.IsEmpty() {\n\t\t\t\t\t\terrList.Concat(objectErrList)\n\t\t\t\t\t\tif isErrsCritical(objectErrList) {\n\t\t\t\t\t\t\treturn errList\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\/\/ If error then skip this bundle\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\/\/ Updating state (CurrentReplicaCount) of every object in object bundle.\n\t\tfor j := range phase.ObjectBundle {\n\t\t\tid, _ := getIdentifier(ctx, &phase.ObjectBundle[j])\n\t\t\tinstancesStates[j].CurrentReplicaCount = instancesStates[j].DesiredReplicaCount\n\t\t\tctx.GetState().Set(nsName, id, instancesStates[j])\n\t\t}\n\t}\n\treturn errList\n}\n\n\/\/ ExecuteObject executes single test object operation based on provided object configuration.\nfunc (ste *simpleTestExecutor) ExecuteObject(ctx Context, object *api.Object, namespace string, replicaIndex int32, operation OperationType) *util.ErrorList {\n\tobjName := fmt.Sprintf(\"%v-%d\", object.Basename, replicaIndex)\n\tvar err error\n\tvar obj *unstructured.Unstructured\n\tswitch operation {\n\tcase CREATE_OBJECT, PATCH_OBJECT:\n\t\tvar mapping map[string]interface{}\n\t\tif object.TemplateFillMap == nil {\n\t\t\tmapping = make(map[string]interface{})\n\t\t} else {\n\t\t\tmapping = object.TemplateFillMap\n\t\t}\n\t\tmapping[namePlaceholder] = objName\n\t\tmapping[indexPlaceholder] = replicaIndex\n\t\tobj, err = ctx.GetTemplateProvider().TemplateToObject(object.ObjectTemplatePath, mapping)\n\t\tif err != nil {\n\t\t\treturn util.NewErrorList(fmt.Errorf(\"reading template (%v) error: %v\", object.ObjectTemplatePath, err))\n\t\t}\n\tcase DELETE_OBJECT:\n\t\tobj, err = ctx.GetTemplateProvider().RawToObject(object.ObjectTemplatePath)\n\t\tif err != nil {\n\t\t\treturn util.NewErrorList(fmt.Errorf(\"reading template (%v) for deletion error: %v\", object.ObjectTemplatePath, err))\n\t\t}\n\tdefault:\n\t\treturn util.NewErrorList(fmt.Errorf(\"unsupported operation %v for namespace %v object %v\", operation, namespace, objName))\n\t}\n\tgvk := obj.GroupVersionKind()\n\n\terrList := util.NewErrorList()\n\tif namespace == \"\" {\n\t\t\/\/ TODO: handle cluster level object\n\t} else {\n\t\tswitch operation {\n\t\tcase CREATE_OBJECT:\n\t\t\tif err := ctx.GetFramework().CreateObject(namespace, objName, obj); err != nil {\n\t\t\t\terrList.Append(fmt.Errorf(\"namespace %v object %v creation error: %v\", namespace, objName, err))\n\t\t\t}\n\t\tcase PATCH_OBJECT:\n\t\t\tif err := ctx.GetFramework().PatchObject(namespace, objName, obj); err != nil {\n\t\t\t\terrList.Append(fmt.Errorf(\"namespace %v object %v updating error: %v\", namespace, objName, err))\n\t\t\t}\n\t\tcase DELETE_OBJECT:\n\t\t\tif err := ctx.GetFramework().DeleteObject(gvk, namespace, objName); err != nil {\n\t\t\t\terrList.Append(fmt.Errorf(\"namespace %v object %v deletion error: %v\", namespace, objName, err))\n\t\t\t}\n\t\t}\n\t}\n\treturn errList\n}\n\nfunc getIdentifier(ctx Context, object *api.Object) (state.InstancesIdentifier, error) {\n\tobjName := fmt.Sprintf(\"%v-%d\", object.Basename, 0)\n\tvar mapping map[string]interface{}\n\tif object.TemplateFillMap == nil {\n\t\tmapping = make(map[string]interface{})\n\t} else {\n\t\tmapping = object.TemplateFillMap\n\t}\n\tmapping[namePlaceholder] = objName\n\tmapping[indexPlaceholder] = 0\n\tobj, err := ctx.GetTemplateProvider().RawToObject(object.ObjectTemplatePath)\n\tif err != nil {\n\t\treturn state.InstancesIdentifier{}, fmt.Errorf(\"reading template (%v) for identifier error: %v\", object.ObjectTemplatePath, err)\n\t}\n\tgvk := obj.GroupVersionKind()\n\treturn state.InstancesIdentifier{\n\t\tBasename: object.Basename,\n\t\tObjectKind: gvk.Kind,\n\t\tApiGroup: gvk.Group,\n\t}, nil\n}\n\nfunc createNamespacesList(namespaceRange *api.NamespaceRange) []string {\n\tif namespaceRange == nil {\n\t\treturn []string{\"\"}\n\t}\n\n\tnsList := make([]string, 0)\n\tnsBasename := framework.AutomanagedNamespaceName\n\tif namespaceRange.Basename != nil {\n\t\tnsBasename = *namespaceRange.Basename\n\t}\n\n\tfor i := namespaceRange.Min; i <= namespaceRange.Max; i++ {\n\t\tnsList = append(nsList, fmt.Sprintf(\"%v-%d\", nsBasename, i))\n\t}\n\treturn nsList\n}\n\nfunc isErrsCritical(*util.ErrorList) bool {\n\t\/\/ TODO: define critical errors\n\treturn false\n}\n\nfunc cleanupResources(ctx Context) {\n\tcleanupStartTime := time.Now()\n\tif errList := ctx.GetFramework().DeleteAutomanagedNamespaces(); !errList.IsEmpty() {\n\t\tglog.Errorf(\"Resource cleanup error: %v\", errList.String())\n\t\treturn\n\t}\n\tglog.Infof(\"Resources cleanup time: %v\", time.Since(cleanupStartTime))\n}\nadding parallelism to test executor\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/perf-tests\/clusterloader2\/api\"\n\t\"k8s.io\/perf-tests\/clusterloader2\/pkg\/framework\"\n\t\"k8s.io\/perf-tests\/clusterloader2\/pkg\/state\"\n\t\"k8s.io\/perf-tests\/clusterloader2\/pkg\/util\"\n)\n\nconst (\n\tnamePlaceholder = \"Name\"\n\tindexPlaceholder = \"Index\"\n)\n\ntype simpleTestExecutor struct{}\n\nfunc createSimpleTestExecutor() TestExecutor {\n\treturn &simpleTestExecutor{}\n}\n\n\/\/ ExecuteTest executes test based on provided configuration.\nfunc (ste *simpleTestExecutor) ExecuteTest(ctx Context, conf *api.Config) *util.ErrorList {\n\tdefer cleanupResources(ctx)\n\tctx.GetTickerFactory().Init(conf.TuningSets)\n\tautomanagedNamespacesList, err := ctx.GetFramework().ListAutomanagedNamespaces()\n\tif err != nil {\n\t\treturn util.NewErrorList(fmt.Errorf(\"automanaged namespaces listing failed: %v\", err))\n\t}\n\tif len(automanagedNamespacesList) > 0 {\n\t\treturn util.NewErrorList(fmt.Errorf(\"pre-existing automanaged namespaces found\"))\n\t}\n\terr = ctx.GetFramework().CreateAutomanagedNamespaces(int(conf.AutomanagedNamespaces))\n\tif err != nil {\n\t\treturn util.NewErrorList(fmt.Errorf(\"automanaged namespaces creation failed: %v\", err))\n\t}\n\n\terrList := util.NewErrorList()\n\tfor i := range conf.Steps {\n\t\tif stepErrList := ste.ExecuteStep(ctx, &conf.Steps[i]); !stepErrList.IsEmpty() {\n\t\t\terrList.Concat(stepErrList)\n\t\t\tif isErrsCritical(stepErrList) {\n\t\t\t\treturn errList\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, summary := range ctx.GetMeasurementManager().GetSummaries() {\n\t\tsummaryText, err := summary.PrintSummary()\n\t\tif err != nil {\n\t\t\terrList.Append(fmt.Errorf(\"printing summary %s error: %v\", summary.SummaryName(), err))\n\t\t\tcontinue\n\t\t}\n\t\tif ctx.GetClusterLoaderConfig().ReportDir == \"\" {\n\t\t\tglog.Infof(\"%v: %v\", summary.SummaryName(), summaryText)\n\t\t} else {\n\t\t\t\/\/ TODO(krzysied): Remeber to keep original filename style for backward compatibility.\n\t\t\tfilePath := path.Join(ctx.GetClusterLoaderConfig().ReportDir, summary.SummaryName()+\"_\"+conf.Name+\"_\"+time.Now().Format(time.RFC3339)+\".txt\")\n\t\t\tif err := ioutil.WriteFile(filePath, []byte(summaryText), 0644); err != nil {\n\t\t\t\terrList.Append(fmt.Errorf(\"writing to file %v error: %v\", filePath, err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\treturn errList\n}\n\n\/\/ ExecuteStep executes single test step based on provided step configuration.\nfunc (ste *simpleTestExecutor) ExecuteStep(ctx Context, step *api.Step) *util.ErrorList {\n\tvar wg wait.Group\n\t\/\/ TODO(krzysied): Consider moving lock and errList to separate structure.\n\terrList := util.NewErrorList()\n\tif len(step.Measurements) > 0 {\n\t\tfor i := range step.Measurements {\n\t\t\t\/\/ index is created to make i value unchangeable during thread execution.\n\t\t\tindex := i\n\t\t\twg.Start(func() {\n\t\t\t\terr := ctx.GetMeasurementManager().Execute(step.Measurements[index].Method, step.Measurements[index].Identifier, step.Measurements[index].Params)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrList.Append(fmt.Errorf(\"measurement call %s - %s error: %v\", step.Measurements[index].Method, step.Measurements[index].Identifier, err))\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t} else {\n\t\tfor i := range step.Phases {\n\t\t\tphase := &step.Phases[i]\n\t\t\twg.Start(func() {\n\t\t\t\tif phaseErrList := ste.ExecutePhase(ctx, phase); !phaseErrList.IsEmpty() {\n\t\t\t\t\terrList.Concat(phaseErrList)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n\twg.Wait()\n\treturn errList\n}\n\n\/\/ ExecutePhase executes single test phase based on provided phase configuration.\nfunc (ste *simpleTestExecutor) ExecutePhase(ctx Context, phase *api.Phase) *util.ErrorList {\n\t\/\/ TODO: add tuning set\n\terrList := util.NewErrorList()\n\tnsList := createNamespacesList(phase.NamespaceRange)\n\tticker, err := ctx.GetTickerFactory().CreateTicker(phase.TuningSet)\n\tif err != nil {\n\t\treturn util.NewErrorList(fmt.Errorf(\"ticker creation error: %v\", err))\n\t}\n\tdefer ticker.Stop()\n\tvar wg wait.Group\n\tfor namespaceIndex := range nsList {\n\t\tnsName := nsList[namespaceIndex]\n\t\twg.Start(func() {\n\t\t\tinstancesStates := make([]*state.InstancesState, 0)\n\t\t\t\/\/ Updating state (DesiredReplicaCount) of every object in object bundle.\n\t\t\tfor j := range phase.ObjectBundle {\n\t\t\t\tid, err := getIdentifier(ctx, &phase.ObjectBundle[j])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrList.Append(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tinstances, exists := ctx.GetState().Get(nsName, id)\n\t\t\t\tif !exists {\n\t\t\t\t\tinstances = &state.InstancesState{\n\t\t\t\t\t\tDesiredReplicaCount: 0,\n\t\t\t\t\t\tCurrentReplicaCount: 0,\n\t\t\t\t\t\tObject: phase.ObjectBundle[j],\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tinstances.DesiredReplicaCount = phase.ReplicasPerNamespace\n\t\t\t\tctx.GetState().Set(nsName, id, instances)\n\t\t\t\tinstancesStates = append(instancesStates, instances)\n\t\t\t}\n\n\t\t\t\/\/ Calculating maximal replica count of objects from object bundle.\n\t\t\tvar maxCurrentReplicaCount int32\n\t\t\tfor j := range instancesStates {\n\t\t\t\tif instancesStates[j].CurrentReplicaCount > maxCurrentReplicaCount {\n\t\t\t\t\tmaxCurrentReplicaCount = instancesStates[j].CurrentReplicaCount\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar namespaceWG wait.Group\n\t\t\t\/\/ Deleting objects with index greater or equal requested replicas per namespace number.\n\t\t\t\/\/ Objects will be deleted in reversed order.\n\t\t\tfor replicaCounter := phase.ReplicasPerNamespace; replicaCounter < maxCurrentReplicaCount; replicaCounter++ {\n\t\t\t\treplicaIndex := replicaCounter\n\t\t\t\tnamespaceWG.Start(func() {\n\t\t\t\t\tfor j := len(phase.ObjectBundle) - 1; j >= 0; j-- {\n\t\t\t\t\t\tif replicaIndex < instancesStates[j].CurrentReplicaCount {\n\t\t\t\t\t\t\t<-ticker.C\n\t\t\t\t\t\t\tif objectErrList := ste.ExecuteObject(ctx, &phase.ObjectBundle[j], nsName, replicaIndex, DELETE_OBJECT); !objectErrList.IsEmpty() {\n\t\t\t\t\t\t\t\terrList.Concat(objectErrList)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t\t\/\/ Handling for update\/create objects.\n\t\t\tfor replicaCounter := int32(0); replicaCounter < phase.ReplicasPerNamespace; replicaCounter++ {\n\t\t\t\treplicaIndex := replicaCounter\n\t\t\t\tnamespaceWG.Start(func() {\n\t\t\t\t\tfor j := range phase.ObjectBundle {\n\t\t\t\t\t\tif instancesStates[j].CurrentReplicaCount == phase.ReplicasPerNamespace {\n\t\t\t\t\t\t\t<-ticker.C\n\t\t\t\t\t\t\tif objectErrList := ste.ExecuteObject(ctx, &phase.ObjectBundle[j], nsName, replicaIndex, PATCH_OBJECT); !objectErrList.IsEmpty() {\n\t\t\t\t\t\t\t\terrList.Concat(objectErrList)\n\t\t\t\t\t\t\t\t\/\/ If error then skip this bundle\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else if replicaIndex >= instancesStates[j].CurrentReplicaCount {\n\t\t\t\t\t\t\t<-ticker.C\n\t\t\t\t\t\t\tif objectErrList := ste.ExecuteObject(ctx, &phase.ObjectBundle[j], nsName, replicaIndex, CREATE_OBJECT); !objectErrList.IsEmpty() {\n\t\t\t\t\t\t\t\terrList.Concat(objectErrList)\n\t\t\t\t\t\t\t\t\/\/ If error then skip this bundle\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t\tnamespaceWG.Wait()\n\t\t\t\/\/ Updating state (CurrentReplicaCount) of every object in object bundle.\n\t\t\tfor j := range phase.ObjectBundle {\n\t\t\t\tid, _ := getIdentifier(ctx, &phase.ObjectBundle[j])\n\t\t\t\tinstancesStates[j].CurrentReplicaCount = instancesStates[j].DesiredReplicaCount\n\t\t\t\tctx.GetState().Set(nsName, id, instancesStates[j])\n\t\t\t}\n\t\t})\n\t}\n\twg.Wait()\n\treturn errList\n}\n\n\/\/ ExecuteObject executes single test object operation based on provided object configuration.\nfunc (ste *simpleTestExecutor) ExecuteObject(ctx Context, object *api.Object, namespace string, replicaIndex int32, operation OperationType) *util.ErrorList {\n\tobjName := fmt.Sprintf(\"%v-%d\", object.Basename, replicaIndex)\n\tvar err error\n\tvar obj *unstructured.Unstructured\n\tswitch operation {\n\tcase CREATE_OBJECT, PATCH_OBJECT:\n\t\tvar mapping map[string]interface{}\n\t\tif object.TemplateFillMap == nil {\n\t\t\tmapping = make(map[string]interface{})\n\t\t} else {\n\t\t\tmapping = object.TemplateFillMap\n\t\t}\n\t\tmapping[namePlaceholder] = objName\n\t\tmapping[indexPlaceholder] = replicaIndex\n\t\tobj, err = ctx.GetTemplateProvider().TemplateToObject(object.ObjectTemplatePath, mapping)\n\t\tif err != nil {\n\t\t\treturn util.NewErrorList(fmt.Errorf(\"reading template (%v) error: %v\", object.ObjectTemplatePath, err))\n\t\t}\n\tcase DELETE_OBJECT:\n\t\tobj, err = ctx.GetTemplateProvider().RawToObject(object.ObjectTemplatePath)\n\t\tif err != nil {\n\t\t\treturn util.NewErrorList(fmt.Errorf(\"reading template (%v) for deletion error: %v\", object.ObjectTemplatePath, err))\n\t\t}\n\tdefault:\n\t\treturn util.NewErrorList(fmt.Errorf(\"unsupported operation %v for namespace %v object %v\", operation, namespace, objName))\n\t}\n\tgvk := obj.GroupVersionKind()\n\n\terrList := util.NewErrorList()\n\tif namespace == \"\" {\n\t\t\/\/ TODO: handle cluster level object\n\t} else {\n\t\tswitch operation {\n\t\tcase CREATE_OBJECT:\n\t\t\tif err := ctx.GetFramework().CreateObject(namespace, objName, obj); err != nil {\n\t\t\t\terrList.Append(fmt.Errorf(\"namespace %v object %v creation error: %v\", namespace, objName, err))\n\t\t\t}\n\t\tcase PATCH_OBJECT:\n\t\t\tif err := ctx.GetFramework().PatchObject(namespace, objName, obj); err != nil {\n\t\t\t\terrList.Append(fmt.Errorf(\"namespace %v object %v updating error: %v\", namespace, objName, err))\n\t\t\t}\n\t\tcase DELETE_OBJECT:\n\t\t\tif err := ctx.GetFramework().DeleteObject(gvk, namespace, objName); err != nil {\n\t\t\t\terrList.Append(fmt.Errorf(\"namespace %v object %v deletion error: %v\", namespace, objName, err))\n\t\t\t}\n\t\t}\n\t}\n\treturn errList\n}\n\nfunc getIdentifier(ctx Context, object *api.Object) (state.InstancesIdentifier, error) {\n\tobjName := fmt.Sprintf(\"%v-%d\", object.Basename, 0)\n\tvar mapping map[string]interface{}\n\tif object.TemplateFillMap == nil {\n\t\tmapping = make(map[string]interface{})\n\t} else {\n\t\tmapping = object.TemplateFillMap\n\t}\n\tmapping[namePlaceholder] = objName\n\tmapping[indexPlaceholder] = 0\n\tobj, err := ctx.GetTemplateProvider().RawToObject(object.ObjectTemplatePath)\n\tif err != nil {\n\t\treturn state.InstancesIdentifier{}, fmt.Errorf(\"reading template (%v) for identifier error: %v\", object.ObjectTemplatePath, err)\n\t}\n\tgvk := obj.GroupVersionKind()\n\treturn state.InstancesIdentifier{\n\t\tBasename: object.Basename,\n\t\tObjectKind: gvk.Kind,\n\t\tApiGroup: gvk.Group,\n\t}, nil\n}\n\nfunc createNamespacesList(namespaceRange *api.NamespaceRange) []string {\n\tif namespaceRange == nil {\n\t\treturn []string{\"\"}\n\t}\n\n\tnsList := make([]string, 0)\n\tnsBasename := framework.AutomanagedNamespaceName\n\tif namespaceRange.Basename != nil {\n\t\tnsBasename = *namespaceRange.Basename\n\t}\n\n\tfor i := namespaceRange.Min; i <= namespaceRange.Max; i++ {\n\t\tnsList = append(nsList, fmt.Sprintf(\"%v-%d\", nsBasename, i))\n\t}\n\treturn nsList\n}\n\nfunc isErrsCritical(*util.ErrorList) bool {\n\t\/\/ TODO: define critical errors\n\treturn false\n}\n\nfunc cleanupResources(ctx Context) {\n\tcleanupStartTime := time.Now()\n\tif errList := ctx.GetFramework().DeleteAutomanagedNamespaces(); !errList.IsEmpty() {\n\t\tglog.Errorf(\"Resource cleanup error: %v\", errList.String())\n\t\treturn\n\t}\n\tglog.Infof(\"Resources cleanup time: %v\", time.Since(cleanupStartTime))\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2009 The Ninep Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage srv\n\nimport (\n\t\"log\"\n\t\"sync\/atomic\"\n\n\t\"github.com\/lionkov\/ninep\"\n)\n\nfunc (srv *Srv) version(req *Req) {\n\ttc := req.Tc\n\tconn := req.Conn\n\n\tif tc.Msize < ninep.IOHDRSZ {\n\t\treq.RespondError(&ninep.Error{\"msize too small\", ninep.EINVAL})\n\t\treturn\n\t}\n\n\tif tc.Msize < conn.Msize {\n\t\tconn.Msize = tc.Msize\n\t}\n\n\tconn.Dotu = tc.Version == \"9P2000.u\" && srv.Dotu\n\tver := \"9P2000\"\n\tif conn.Dotu {\n\t\tver = \"9P2000.u\"\n\t}\n\n\t\/* make sure that the responses of all current requests will be ignored *\/\n\tconn.Lock()\n\tfor tag, r := range conn.Reqs {\n\t\tif tag == ninep.NOTAG {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor rr := r; rr != nil; rr = rr.next {\n\t\t\trr.Lock()\n\t\t\trr.status |= reqFlush\n\t\t\trr.Unlock()\n\t\t}\n\t}\n\tconn.Unlock()\n\n\tatomic.AddUint32(&srv.Versioned, 1)\n\treq.RespondRversion(conn.Msize, ver)\n}\n\nfunc (srv *Srv) auth(req *Req) {\n\ttc := req.Tc\n\tconn := req.Conn\n\tif tc.Afid == ninep.NOFID {\n\t\treq.RespondError(Eunknownfid)\n\t\treturn\n\t}\n\n\treq.Afid = conn.FidNew(tc.Afid)\n\tif req.Afid == nil {\n\t\tlog.Printf(\"in auth(): Fid %v in use?\", tc.Afid)\n\t\treq.RespondError(Einuse)\n\t\treturn\n\t}\n\n\tvar user ninep.User = nil\n\tif tc.Unamenum != ninep.NOUID || conn.Dotu {\n\t\tuser = srv.Upool.Uid2User(int(tc.Unamenum))\n\t} else if tc.Uname != \"\" {\n\t\tuser = srv.Upool.Uname2User(tc.Uname)\n\t}\n\n\tif user == nil {\n\t\treq.RespondError(Enouser)\n\t\treturn\n\t}\n\n\treq.Afid.User = user\n\treq.Afid.Type = ninep.QTAUTH\n\tif aop, ok := (srv.ops).(AuthOps); ok {\n\t\taqid, err := aop.AuthInit(req.Afid, tc.Aname)\n\t\tif err != nil {\n\t\t\treq.RespondError(err)\n\t\t} else {\n\t\t\taqid.Type |= ninep.QTAUTH \/\/ just in case\n\t\t\treq.RespondRauth(aqid)\n\t\t}\n\t} else {\n\t\treq.RespondError(Enoauth)\n\t}\n\n}\n\nfunc (srv *Srv) authPost(req *Req) {\n\tif req.Rc != nil && req.Rc.Type == ninep.Rauth {\n\t\treq.Afid.IncRef()\n\t}\n}\n\nfunc (srv *Srv) attach(req *Req) {\n\ttc := req.Tc\n\tconn := req.Conn\n\tif tc.Fid == ninep.NOFID {\n\t\treq.RespondError(Eunknownfid)\n\t\treturn\n\t}\n\n\treq.Fid = conn.FidNew(tc.Fid)\n\tif req.Fid == nil {\n\t\tlog.Printf(\"attach: Fid %v in use? \", tc.Fid)\n\t\treq.RespondError(Einuse)\n\t\treturn\n\t}\n\n\tif tc.Afid != ninep.NOFID {\n\t\treq.Afid = conn.FidGet(tc.Afid)\n\t\tif req.Afid == nil {\n\t\t\treq.RespondError(Eunknownfid)\n\t\t}\n\t}\n\n\tvar user ninep.User = nil\n\tif tc.Unamenum != ninep.NOUID || conn.Dotu {\n\t\tuser = srv.Upool.Uid2User(int(tc.Unamenum))\n\t} else if tc.Uname != \"\" {\n\t\tuser = srv.Upool.Uname2User(tc.Uname)\n\t}\n\n\tif user == nil {\n\t\treq.RespondError(Enouser)\n\t\treturn\n\t}\n\n\treq.Fid.User = user\n\tif aop, ok := (srv.ops).(AuthOps); ok {\n\t\terr := aop.AuthCheck(req.Fid, req.Afid, tc.Aname)\n\t\tif err != nil {\n\t\t\treq.RespondError(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t(srv.ops).(ReqOps).Attach(req)\n}\n\nfunc (srv *Srv) attachPost(req *Req) {\n\tif req.Rc != nil && req.Rc.Type == ninep.Rattach {\n\t\treq.Fid.Type = req.Rc.Qid.Type\n\t\treq.Fid.IncRef()\n\t}\n}\n\nfunc (srv *Srv) flush(req *Req) {\n\tconn := req.Conn\n\ttag := req.Tc.Oldtag\n\tninep.PackRflush(req.Rc)\n\tconn.Lock()\n\tr := conn.Reqs[tag]\n\tif r != nil {\n\t\treq.flushreq = r.flushreq\n\t\tr.flushreq = req\n\t}\n\tconn.Unlock()\n\n\tif r == nil {\n\t\t\/\/ there are no requests with that tag\n\t\treq.Respond()\n\t\treturn\n\t}\n\n\tr.Lock()\n\tstatus := r.status\n\tif (status & (reqWork | reqSaved)) == 0 {\n\t\t\/* the request is not worked on yet *\/\n\t\tr.status |= reqFlush\n\t}\n\tr.Unlock()\n\n\tif (status & (reqWork | reqSaved)) == 0 {\n\t\tr.Respond()\n\t} else {\n\t\tif op, ok := (srv.ops).(FlushOp); ok {\n\t\t\top.Flush(r)\n\t\t}\n\t}\n}\n\nfunc (srv *Srv) walk(req *Req) {\n\tconn := req.Conn\n\ttc := req.Tc\n\tfid := req.Fid\n\n\t\/* we can't walk regular files, only clone them *\/\n\tif len(tc.Wname) > 0 && (fid.Type&ninep.QTDIR) == 0 {\n\t\treq.RespondError(Enotdir)\n\t\treturn\n\t}\n\n\t\/* we can't walk open files *\/\n\tif fid.opened {\n\t\treq.RespondError(Ebaduse)\n\t\treturn\n\t}\n\n\tif tc.Fid != tc.Newfid {\n\t\treq.Newfid = conn.FidNew(tc.Newfid)\n\t\tif req.Newfid == nil {\n\t\t\tlog.Printf(\"walk: fid %v in use? \", tc.Newfid)\n\t\t\treq.RespondError(Einuse)\n\t\t\treturn\n\t\t}\n\n\t\treq.Newfid.User = fid.User\n\t\treq.Newfid.Type = fid.Type\n\t} else {\n\t\treq.Newfid = req.Fid\n\t\treq.Newfid.IncRef()\n\t}\n\n\t(req.Conn.Srv.ops).(ReqOps).Walk(req)\n}\n\nfunc (srv *Srv) walkPost(req *Req) {\n\trc := req.Rc\n\tif rc == nil || rc.Type != ninep.Rwalk || req.Newfid == nil {\n\t\treturn\n\t}\n\n\tn := len(rc.Wqid)\n\tif n > 0 {\n\t\treq.Newfid.Type = rc.Wqid[n-1].Type\n\t} else {\n\t\treq.Newfid.Type = req.Fid.Type\n\t}\n\n\t\/\/ Don't retain the fid if only a partial walk succeeded\n\tif n != len(req.Tc.Wname) {\n\t\treturn\n\t}\n\n\tif req.Newfid.fid != req.Fid.fid {\n\t\treq.Newfid.IncRef()\n\t}\n}\n\nfunc (srv *Srv) open(req *Req) {\n\tfid := req.Fid\n\ttc := req.Tc\n\tif fid.opened {\n\t\treq.RespondError(Eopen)\n\t\treturn\n\t}\n\n\tif (fid.Type&ninep.QTDIR) != 0 && tc.Mode != ninep.OREAD {\n\t\treq.RespondError(Eperm)\n\t\treturn\n\t}\n\n\tfid.Omode = tc.Mode\n\t(req.Conn.Srv.ops).(ReqOps).Open(req)\n}\n\nfunc (srv *Srv) openPost(req *Req) {\n\tif req.Fid != nil {\n\t\treq.Fid.opened = req.Rc != nil && req.Rc.Type == ninep.Ropen\n\t}\n}\n\nfunc (srv *Srv) create(req *Req) {\n\tfid := req.Fid\n\ttc := req.Tc\n\tif fid.opened {\n\t\treq.RespondError(Eopen)\n\t\treturn\n\t}\n\n\tif (fid.Type & ninep.QTDIR) == 0 {\n\t\treq.RespondError(Enotdir)\n\t\treturn\n\t}\n\n\t\/* can't open directories for other than reading *\/\n\tif (tc.Perm&ninep.DMDIR) != 0 && tc.Mode != ninep.OREAD {\n\t\treq.RespondError(Eperm)\n\t\treturn\n\t}\n\n\t\/* can't create special files if not 9P2000.u *\/\n\tif (tc.Perm&(ninep.DMNAMEDPIPE|ninep.DMSYMLINK|ninep.DMLINK|ninep.DMDEVICE|ninep.DMSOCKET)) != 0 && !req.Conn.Dotu {\n\t\treq.RespondError(Eperm)\n\t\treturn\n\t}\n\n\tfid.Omode = tc.Mode\n\t(req.Conn.Srv.ops).(ReqOps).Create(req)\n}\n\nfunc (srv *Srv) createPost(req *Req) {\n\tif req.Rc != nil && req.Rc.Type == ninep.Rcreate && req.Fid != nil {\n\t\treq.Fid.Type = req.Rc.Qid.Type\n\t\treq.Fid.opened = true\n\t}\n}\n\nfunc (srv *Srv) read(req *Req) {\n\ttc := req.Tc\n\tfid := req.Fid\n\tif tc.Count+ninep.IOHDRSZ > req.Conn.Msize {\n\t\treq.RespondError(Etoolarge)\n\t\treturn\n\t}\n\n\tif (fid.Type & ninep.QTAUTH) != 0 {\n\t\tvar n int\n\n\t\trc := req.Rc\n\t\terr := ninep.InitRread(rc, tc.Count)\n\t\tif err != nil {\n\t\t\treq.RespondError(err)\n\t\t\treturn\n\t\t}\n\n\t\tif op, ok := (req.Conn.Srv.ops).(AuthOps); ok {\n\t\t\tn, err = op.AuthRead(fid, tc.Offset, rc.Data)\n\t\t\tif err != nil {\n\t\t\t\treq.RespondError(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tninep.SetRreadCount(rc, uint32(n))\n\t\t\treq.Respond()\n\t\t} else {\n\t\t\treq.RespondError(Enotimpl)\n\t\t}\n\n\t\treturn\n\t}\n\n\tif (fid.Type & ninep.QTDIR) != 0 {\n\t\tfid.Lock()\n\t\tif tc.Offset == 0 {\n\t\t\tfid.Diroffset = 0\n\t\t} else if tc.Offset != fid.Diroffset {\n\t\t\t\/\/ This used to be an error, at this\n\t\t\t\/\/ level. But maybe the provider can handle\n\t\t\t\/\/ offsets that change. In one version of 9p\n\t\t\t\/\/ we were able to support arbitrary\n\t\t\t\/\/ offsets. At the least, we're going to let\n\t\t\t\/\/ the provider decide if this is an error.\n\t\t\tfid.Diroffset = tc.Offset\n\t\t}\n\t\tfid.Unlock()\n\t}\n\n\t(req.Conn.Srv.ops).(ReqOps).Read(req)\n}\n\nfunc (srv *Srv) readPost(req *Req) {\n\tif req.Rc != nil && req.Rc.Type == ninep.Rread && (req.Fid.Type&ninep.QTDIR) != 0 {\n\t\treq.Fid.Lock()\n\t\treq.Fid.Diroffset += uint64(req.Rc.Count)\n\t\treq.Fid.Unlock()\n\t}\n}\n\nfunc (srv *Srv) write(req *Req) {\n\tfid := req.Fid\n\ttc := req.Tc\n\tif (fid.Type & ninep.QTAUTH) != 0 {\n\t\ttc := req.Tc\n\t\tif op, ok := (req.Conn.Srv.ops).(AuthOps); ok {\n\t\t\tn, err := op.AuthWrite(req.Fid, tc.Offset, tc.Data)\n\t\t\tif err != nil {\n\t\t\t\treq.RespondError(err)\n\t\t\t} else {\n\t\t\t\treq.RespondRwrite(uint32(n))\n\t\t\t}\n\t\t} else {\n\t\t\treq.RespondError(Enotimpl)\n\t\t}\n\n\t\treturn\n\t}\n\n\tif !fid.opened || (fid.Type&ninep.QTDIR) != 0 || (fid.Omode&3) == ninep.OREAD {\n\t\treq.RespondError(Ebaduse)\n\t\treturn\n\t}\n\n\tif tc.Count+ninep.IOHDRSZ > req.Conn.Msize {\n\t\treq.RespondError(Etoolarge)\n\t\treturn\n\t}\n\n\t(req.Conn.Srv.ops).(ReqOps).Write(req)\n}\n\nfunc (srv *Srv) clunk(req *Req) {\n\tfid := req.Fid\n\tif (fid.Type & ninep.QTAUTH) != 0 {\n\t\tif op, ok := (req.Conn.Srv.ops).(AuthOps); ok {\n\t\t\top.AuthDestroy(fid)\n\t\t\treq.RespondRclunk()\n\t\t} else {\n\t\t\treq.RespondError(Enotimpl)\n\t\t}\n\n\t\treturn\n\t}\n\n\t(req.Conn.Srv.ops).(ReqOps).Clunk(req)\n}\n\nfunc (srv *Srv) clunkPost(req *Req) {\n\tif req.Rc != nil && req.Rc.Type == ninep.Rclunk && req.Fid != nil {\n\t\treq.Fid.DecRef()\n\t}\n}\n\nfunc (srv *Srv) remove(req *Req) { (req.Conn.Srv.ops).(ReqOps).Remove(req) }\n\nfunc (srv *Srv) removePost(req *Req) {\n\tif req.Rc != nil && req.Fid != nil {\n\t\treq.Fid.DecRef()\n\t}\n}\n\nfunc (srv *Srv) stat(req *Req) { (req.Conn.Srv.ops).(ReqOps).Stat(req) }\n\nfunc (srv *Srv) wstat(req *Req) {\n\t\/*\n\t\tfid := req.Fid\n\t\td := &req.Tc.Dir\n\t\tif d.Type != uint16(0xFFFF) || d.Dev != uint32(0xFFFFFFFF) || d.Version != uint32(0xFFFFFFFF) ||\n\t\t\td.Path != uint64(0xFFFFFFFFFFFFFFFF) {\n\t\t\treq.RespondError(Eperm)\n\t\t\treturn\n\t\t}\n\n\t\tif (d.Mode != 0xFFFFFFFF) && (((fid.Type&ninep.QTDIR) != 0 && (d.Mode&ninep.DMDIR) == 0) ||\n\t\t\t((d.Type&ninep.QTDIR) == 0 && (d.Mode&ninep.DMDIR) != 0)) {\n\t\t\treq.RespondError(Edirchange)\n\t\t\treturn\n\t\t}\n\t*\/\n\n\t(req.Conn.Srv.ops).(ReqOps).Wstat(req)\n}\nRestage \"Check that a fid has been opened in read\"\/\/ Copyright 2009 The Ninep Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage srv\n\nimport (\n\t\"log\"\n\t\"sync\/atomic\"\n\n\t\"github.com\/lionkov\/ninep\"\n)\n\nfunc (srv *Srv) version(req *Req) {\n\ttc := req.Tc\n\tconn := req.Conn\n\n\tif tc.Msize < ninep.IOHDRSZ {\n\t\treq.RespondError(&ninep.Error{\"msize too small\", ninep.EINVAL})\n\t\treturn\n\t}\n\n\tif tc.Msize < conn.Msize {\n\t\tconn.Msize = tc.Msize\n\t}\n\n\tconn.Dotu = tc.Version == \"9P2000.u\" && srv.Dotu\n\tver := \"9P2000\"\n\tif conn.Dotu {\n\t\tver = \"9P2000.u\"\n\t}\n\n\t\/* make sure that the responses of all current requests will be ignored *\/\n\tconn.Lock()\n\tfor tag, r := range conn.Reqs {\n\t\tif tag == ninep.NOTAG {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor rr := r; rr != nil; rr = rr.next {\n\t\t\trr.Lock()\n\t\t\trr.status |= reqFlush\n\t\t\trr.Unlock()\n\t\t}\n\t}\n\tconn.Unlock()\n\n\tatomic.AddUint32(&srv.Versioned, 1)\n\treq.RespondRversion(conn.Msize, ver)\n}\n\nfunc (srv *Srv) auth(req *Req) {\n\ttc := req.Tc\n\tconn := req.Conn\n\tif tc.Afid == ninep.NOFID {\n\t\treq.RespondError(Eunknownfid)\n\t\treturn\n\t}\n\n\treq.Afid = conn.FidNew(tc.Afid)\n\tif req.Afid == nil {\n\t\tlog.Printf(\"in auth(): Fid %v in use?\", tc.Afid)\n\t\treq.RespondError(Einuse)\n\t\treturn\n\t}\n\n\tvar user ninep.User = nil\n\tif tc.Unamenum != ninep.NOUID || conn.Dotu {\n\t\tuser = srv.Upool.Uid2User(int(tc.Unamenum))\n\t} else if tc.Uname != \"\" {\n\t\tuser = srv.Upool.Uname2User(tc.Uname)\n\t}\n\n\tif user == nil {\n\t\treq.RespondError(Enouser)\n\t\treturn\n\t}\n\n\treq.Afid.User = user\n\treq.Afid.Type = ninep.QTAUTH\n\tif aop, ok := (srv.ops).(AuthOps); ok {\n\t\taqid, err := aop.AuthInit(req.Afid, tc.Aname)\n\t\tif err != nil {\n\t\t\treq.RespondError(err)\n\t\t} else {\n\t\t\taqid.Type |= ninep.QTAUTH \/\/ just in case\n\t\t\treq.RespondRauth(aqid)\n\t\t}\n\t} else {\n\t\treq.RespondError(Enoauth)\n\t}\n\n}\n\nfunc (srv *Srv) authPost(req *Req) {\n\tif req.Rc != nil && req.Rc.Type == ninep.Rauth {\n\t\treq.Afid.IncRef()\n\t}\n}\n\nfunc (srv *Srv) attach(req *Req) {\n\ttc := req.Tc\n\tconn := req.Conn\n\tif tc.Fid == ninep.NOFID {\n\t\treq.RespondError(Eunknownfid)\n\t\treturn\n\t}\n\n\treq.Fid = conn.FidNew(tc.Fid)\n\tif req.Fid == nil {\n\t\tlog.Printf(\"attach: Fid %v in use? \", tc.Fid)\n\t\treq.RespondError(Einuse)\n\t\treturn\n\t}\n\n\tif tc.Afid != ninep.NOFID {\n\t\treq.Afid = conn.FidGet(tc.Afid)\n\t\tif req.Afid == nil {\n\t\t\treq.RespondError(Eunknownfid)\n\t\t}\n\t}\n\n\tvar user ninep.User = nil\n\tif tc.Unamenum != ninep.NOUID || conn.Dotu {\n\t\tuser = srv.Upool.Uid2User(int(tc.Unamenum))\n\t} else if tc.Uname != \"\" {\n\t\tuser = srv.Upool.Uname2User(tc.Uname)\n\t}\n\n\tif user == nil {\n\t\treq.RespondError(Enouser)\n\t\treturn\n\t}\n\n\treq.Fid.User = user\n\tif aop, ok := (srv.ops).(AuthOps); ok {\n\t\terr := aop.AuthCheck(req.Fid, req.Afid, tc.Aname)\n\t\tif err != nil {\n\t\t\treq.RespondError(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t(srv.ops).(ReqOps).Attach(req)\n}\n\nfunc (srv *Srv) attachPost(req *Req) {\n\tif req.Rc != nil && req.Rc.Type == ninep.Rattach {\n\t\treq.Fid.Type = req.Rc.Qid.Type\n\t\treq.Fid.IncRef()\n\t}\n}\n\nfunc (srv *Srv) flush(req *Req) {\n\tconn := req.Conn\n\ttag := req.Tc.Oldtag\n\tninep.PackRflush(req.Rc)\n\tconn.Lock()\n\tr := conn.Reqs[tag]\n\tif r != nil {\n\t\treq.flushreq = r.flushreq\n\t\tr.flushreq = req\n\t}\n\tconn.Unlock()\n\n\tif r == nil {\n\t\t\/\/ there are no requests with that tag\n\t\treq.Respond()\n\t\treturn\n\t}\n\n\tr.Lock()\n\tstatus := r.status\n\tif (status & (reqWork | reqSaved)) == 0 {\n\t\t\/* the request is not worked on yet *\/\n\t\tr.status |= reqFlush\n\t}\n\tr.Unlock()\n\n\tif (status & (reqWork | reqSaved)) == 0 {\n\t\tr.Respond()\n\t} else {\n\t\tif op, ok := (srv.ops).(FlushOp); ok {\n\t\t\top.Flush(r)\n\t\t}\n\t}\n}\n\nfunc (srv *Srv) walk(req *Req) {\n\tconn := req.Conn\n\ttc := req.Tc\n\tfid := req.Fid\n\n\t\/* we can't walk regular files, only clone them *\/\n\tif len(tc.Wname) > 0 && (fid.Type&ninep.QTDIR) == 0 {\n\t\treq.RespondError(Enotdir)\n\t\treturn\n\t}\n\n\t\/* we can't walk open files *\/\n\tif fid.opened {\n\t\treq.RespondError(Ebaduse)\n\t\treturn\n\t}\n\n\tif tc.Fid != tc.Newfid {\n\t\treq.Newfid = conn.FidNew(tc.Newfid)\n\t\tif req.Newfid == nil {\n\t\t\tlog.Printf(\"walk: fid %v in use? \", tc.Newfid)\n\t\t\treq.RespondError(Einuse)\n\t\t\treturn\n\t\t}\n\n\t\treq.Newfid.User = fid.User\n\t\treq.Newfid.Type = fid.Type\n\t} else {\n\t\treq.Newfid = req.Fid\n\t\treq.Newfid.IncRef()\n\t}\n\n\t(req.Conn.Srv.ops).(ReqOps).Walk(req)\n}\n\nfunc (srv *Srv) walkPost(req *Req) {\n\trc := req.Rc\n\tif rc == nil || rc.Type != ninep.Rwalk || req.Newfid == nil {\n\t\treturn\n\t}\n\n\tn := len(rc.Wqid)\n\tif n > 0 {\n\t\treq.Newfid.Type = rc.Wqid[n-1].Type\n\t} else {\n\t\treq.Newfid.Type = req.Fid.Type\n\t}\n\n\t\/\/ Don't retain the fid if only a partial walk succeeded\n\tif n != len(req.Tc.Wname) {\n\t\treturn\n\t}\n\n\tif req.Newfid.fid != req.Fid.fid {\n\t\treq.Newfid.IncRef()\n\t}\n}\n\nfunc (srv *Srv) open(req *Req) {\n\tfid := req.Fid\n\ttc := req.Tc\n\tif fid.opened {\n\t\treq.RespondError(Eopen)\n\t\treturn\n\t}\n\n\tif (fid.Type&ninep.QTDIR) != 0 && tc.Mode != ninep.OREAD {\n\t\treq.RespondError(Eperm)\n\t\treturn\n\t}\n\n\tfid.Omode = tc.Mode\n\t(req.Conn.Srv.ops).(ReqOps).Open(req)\n}\n\nfunc (srv *Srv) openPost(req *Req) {\n\tif req.Fid != nil {\n\t\treq.Fid.opened = req.Rc != nil && req.Rc.Type == ninep.Ropen\n\t}\n}\n\nfunc (srv *Srv) create(req *Req) {\n\tfid := req.Fid\n\ttc := req.Tc\n\tif fid.opened {\n\t\treq.RespondError(Eopen)\n\t\treturn\n\t}\n\n\tif (fid.Type & ninep.QTDIR) == 0 {\n\t\treq.RespondError(Enotdir)\n\t\treturn\n\t}\n\n\t\/* can't open directories for other than reading *\/\n\tif (tc.Perm&ninep.DMDIR) != 0 && tc.Mode != ninep.OREAD {\n\t\treq.RespondError(Eperm)\n\t\treturn\n\t}\n\n\t\/* can't create special files if not 9P2000.u *\/\n\tif (tc.Perm&(ninep.DMNAMEDPIPE|ninep.DMSYMLINK|ninep.DMLINK|ninep.DMDEVICE|ninep.DMSOCKET)) != 0 && !req.Conn.Dotu {\n\t\treq.RespondError(Eperm)\n\t\treturn\n\t}\n\n\tfid.Omode = tc.Mode\n\t(req.Conn.Srv.ops).(ReqOps).Create(req)\n}\n\nfunc (srv *Srv) createPost(req *Req) {\n\tif req.Rc != nil && req.Rc.Type == ninep.Rcreate && req.Fid != nil {\n\t\treq.Fid.Type = req.Rc.Qid.Type\n\t\treq.Fid.opened = true\n\t}\n}\n\nfunc (srv *Srv) read(req *Req) {\n\ttc := req.Tc\n\tfid := req.Fid\n\tif tc.Count+ninep.IOHDRSZ > req.Conn.Msize {\n\t\treq.RespondError(Etoolarge)\n\t\treturn\n\t}\n\n\tif (fid.Type & ninep.QTAUTH) != 0 {\n\t\tvar n int\n\n\t\trc := req.Rc\n\t\terr := ninep.InitRread(rc, tc.Count)\n\t\tif err != nil {\n\t\t\treq.RespondError(err)\n\t\t\treturn\n\t\t}\n\n\t\tif op, ok := (req.Conn.Srv.ops).(AuthOps); ok {\n\t\t\tn, err = op.AuthRead(fid, tc.Offset, rc.Data)\n\t\t\tif err != nil {\n\t\t\t\treq.RespondError(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tninep.SetRreadCount(rc, uint32(n))\n\t\t\treq.Respond()\n\t\t} else {\n\t\t\treq.RespondError(Enotimpl)\n\t\t}\n\n\t\treturn\n\t}\n\n\tif !fid.opened || (fid.Omode&3) == ninep.OWRITE {\n\t\treq.RespondError(Ebaduse)\n\t\treturn\n\t}\n\n\tif (fid.Type & ninep.QTDIR) != 0 {\n\t\tfid.Lock()\n\t\tif tc.Offset == 0 {\n\t\t\tfid.Diroffset = 0\n\t\t} else if tc.Offset != fid.Diroffset {\n\t\t\t\/\/ This used to be an error, at this\n\t\t\t\/\/ level. But maybe the provider can handle\n\t\t\t\/\/ offsets that change. In one version of 9p\n\t\t\t\/\/ we were able to support arbitrary\n\t\t\t\/\/ offsets. At the least, we're going to let\n\t\t\t\/\/ the provider decide if this is an error.\n\t\t\tfid.Diroffset = tc.Offset\n\t\t}\n\t\tfid.Unlock()\n\t}\n\n\t(req.Conn.Srv.ops).(ReqOps).Read(req)\n}\n\nfunc (srv *Srv) readPost(req *Req) {\n\tif req.Rc != nil && req.Rc.Type == ninep.Rread && (req.Fid.Type&ninep.QTDIR) != 0 {\n\t\treq.Fid.Lock()\n\t\treq.Fid.Diroffset += uint64(req.Rc.Count)\n\t\treq.Fid.Unlock()\n\t}\n}\n\nfunc (srv *Srv) write(req *Req) {\n\tfid := req.Fid\n\ttc := req.Tc\n\tif (fid.Type & ninep.QTAUTH) != 0 {\n\t\ttc := req.Tc\n\t\tif op, ok := (req.Conn.Srv.ops).(AuthOps); ok {\n\t\t\tn, err := op.AuthWrite(req.Fid, tc.Offset, tc.Data)\n\t\t\tif err != nil {\n\t\t\t\treq.RespondError(err)\n\t\t\t} else {\n\t\t\t\treq.RespondRwrite(uint32(n))\n\t\t\t}\n\t\t} else {\n\t\t\treq.RespondError(Enotimpl)\n\t\t}\n\n\t\treturn\n\t}\n\n\tif !fid.opened || (fid.Type&ninep.QTDIR) != 0 || (fid.Omode&3) == ninep.OREAD {\n\t\treq.RespondError(Ebaduse)\n\t\treturn\n\t}\n\n\tif tc.Count+ninep.IOHDRSZ > req.Conn.Msize {\n\t\treq.RespondError(Etoolarge)\n\t\treturn\n\t}\n\n\t(req.Conn.Srv.ops).(ReqOps).Write(req)\n}\n\nfunc (srv *Srv) clunk(req *Req) {\n\tfid := req.Fid\n\tif (fid.Type & ninep.QTAUTH) != 0 {\n\t\tif op, ok := (req.Conn.Srv.ops).(AuthOps); ok {\n\t\t\top.AuthDestroy(fid)\n\t\t\treq.RespondRclunk()\n\t\t} else {\n\t\t\treq.RespondError(Enotimpl)\n\t\t}\n\n\t\treturn\n\t}\n\n\t(req.Conn.Srv.ops).(ReqOps).Clunk(req)\n}\n\nfunc (srv *Srv) clunkPost(req *Req) {\n\tif req.Rc != nil && req.Rc.Type == ninep.Rclunk && req.Fid != nil {\n\t\treq.Fid.DecRef()\n\t}\n}\n\nfunc (srv *Srv) remove(req *Req) { (req.Conn.Srv.ops).(ReqOps).Remove(req) }\n\nfunc (srv *Srv) removePost(req *Req) {\n\tif req.Rc != nil && req.Fid != nil {\n\t\treq.Fid.DecRef()\n\t}\n}\n\nfunc (srv *Srv) stat(req *Req) { (req.Conn.Srv.ops).(ReqOps).Stat(req) }\n\nfunc (srv *Srv) wstat(req *Req) {\n\t\/*\n\t\tfid := req.Fid\n\t\td := &req.Tc.Dir\n\t\tif d.Type != uint16(0xFFFF) || d.Dev != uint32(0xFFFFFFFF) || d.Version != uint32(0xFFFFFFFF) ||\n\t\t\td.Path != uint64(0xFFFFFFFFFFFFFFFF) {\n\t\t\treq.RespondError(Eperm)\n\t\t\treturn\n\t\t}\n\n\t\tif (d.Mode != 0xFFFFFFFF) && (((fid.Type&ninep.QTDIR) != 0 && (d.Mode&ninep.DMDIR) == 0) ||\n\t\t\t((d.Type&ninep.QTDIR) == 0 && (d.Mode&ninep.DMDIR) != 0)) {\n\t\t\treq.RespondError(Edirchange)\n\t\t\treturn\n\t\t}\n\t*\/\n\n\t(req.Conn.Srv.ops).(ReqOps).Wstat(req)\n}\n<|endoftext|>"} {"text":"package main\n\nimport \"fmt\"\n\nvar allocs int\n\nfunc my_append(slice []int, items ...int) []int {\n\ttotal := len(slice)+len(items)\n\n\tif total > cap(slice) {\n\t\tnewSlice := make([]int, len(slice), total*2+1)\n\t\tcopy(newSlice, slice)\n\t\tslice = newSlice\n\t\tallocs++\n\t}\n\n\tn := len(slice)\n\tslice = slice[:total]\n\tcopy(slice[n:], items)\n\treturn slice\n}\n\nfunc main() {\n\tvar s []int\n\n\tfor i := 0; i < 20; i++ {\n\t\tfmt.Println(\"i =\", i, \"s =\", s)\n\t\ts = my_append(s, i)\n\t}\n\n\tfmt.Println(\"Finished, s =\", s)\n\ts = my_append(s, s...)\n\tfmt.Println(\"After appending to itself:\", s)\n\tfmt.Println(\"Total allocations:\", allocs)\n}\nUpdated my_append() example description\/* A small example demonstrating how append() is implemented\n *\n * Based on the ideas exposed in http:\/\/blog.golang.org\/slices\n *\/\n\npackage main\n\nimport \"fmt\"\n\nvar allocs int\n\nfunc my_append(slice []int, items ...int) []int {\n\ttotal := len(slice)+len(items)\n\n\tif total > cap(slice) {\n\t\tnewSlice := make([]int, len(slice), total*2+1)\n\t\tcopy(newSlice, slice)\n\t\tslice = newSlice\n\t\tallocs++\n\t}\n\n\tn := len(slice)\n\tslice = slice[:total]\n\tcopy(slice[n:], items)\n\treturn slice\n}\n\nfunc main() {\n\tvar s []int\n\n\tfor i := 0; i < 20; i++ {\n\t\tfmt.Println(\"i =\", i, \"s =\", s)\n\t\ts = my_append(s, i)\n\t}\n\n\tfmt.Println(\"Finished, s =\", s)\n\ts = my_append(s, s...)\n\tfmt.Println(\"After appending to itself:\", s)\n\tfmt.Println(\"Total allocations:\", allocs)\n}\n<|endoftext|>"} {"text":"package services\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/url\"\n\n\t\"github.com\/gravitational\/teleport\/lib\/utils\"\n\n\t\"github.com\/coreos\/go-oidc\/jose\"\n\t\"github.com\/gravitational\/trace\"\n)\n\n\/\/ OIDCConnector specifies configuration for Open ID Connect compatible external\n\/\/ identity provider, e.g. google in some organisation\ntype OIDCConnector interface {\n\t\/\/ Name is a provider name, 'e.g.' google, used internally\n\tGetName() string\n\t\/\/ Issuer URL is the endpoint of the provider, e.g. https:\/\/accounts.google.com\n\tGetIssuerURL() string\n\t\/\/ ClientID is id for authentication client (in our case it's our Auth server)\n\tGetClientID() string\n\t\/\/ ClientSecret is used to authenticate our client and should not\n\t\/\/ be visible to end user\n\tGetClientSecret() string\n\t\/\/ RedirectURL - Identity provider will use this URL to redirect\n\t\/\/ client's browser back to it after successfull authentication\n\t\/\/ Should match the URL on Provider's side\n\tGetRedirectURL() string\n\t\/\/ Display - Friendly name for this provider.\n\tGetDisplay() string\n\t\/\/ Scope is additional scopes set by provder\n\tGetScope() []string\n\t\/\/ ClaimsToRoles specifies dynamic mapping from claims to roles\n\tGetClaimsToRoles() []ClaimMapping\n\t\/\/ GetClaims returns list of claims expected by mappings\n\tGetClaims() []string\n\t\/\/ MapClaims maps claims to roles\n\tMapClaims(claims jose.Claims) []string\n\t\/\/ Check checks OIDC connector for errors\n\tCheck() error\n\t\/\/ SetClientSecret sets client secret to some value\n\tSetClientSecret(secret string)\n}\n\nvar connectorMarshaler OIDCConnectorMarshaler = &TeleportOIDCConnectorMarshaler{}\n\n\/\/ SetOIDCConnectorMarshaler sets global user marshaler\nfunc SetOIDCConnectorMarshaler(m OIDCConnectorMarshaler) {\n\tmarshalerMutex.Lock()\n\tdefer marshalerMutex.Unlock()\n\tconnectorMarshaler = m\n}\n\n\/\/ GetOIDCConnectorMarshaler returns currently set user marshaler\nfunc GetOIDCConnectorMarshaler() OIDCConnectorMarshaler {\n\tmarshalerMutex.RLock()\n\tdefer marshalerMutex.RUnlock()\n\treturn connectorMarshaler\n}\n\n\/\/ OIDCConnectorMarshaler implements marshal\/unmarshal of User implementations\n\/\/ mostly adds support for extended versions\ntype OIDCConnectorMarshaler interface {\n\t\/\/ UnmarshalOIDCConnector unmarshals connector from binary representation\n\tUnmarshalOIDCConnector(bytes []byte) (OIDCConnector, error)\n\t\/\/ MarshalOIDCConnector marshals connector to binary representation\n\tMarshalOIDCConnector(c OIDCConnector, opts ...MarshalOption) ([]byte, error)\n}\n\n\/\/ GetOIDCConnectorSchema returns schema for OIDCConnector\nfunc GetOIDCConnectorSchema() string {\n\treturn fmt.Sprintf(OIDCConnectorV2SchemaTemplate, MetadataSchema, OIDCConnectorSpecV2Schema)\n}\n\ntype TeleportOIDCConnectorMarshaler struct{}\n\n\/\/ UnmarshalOIDCConnector unmarshals connector from\nfunc (*TeleportOIDCConnectorMarshaler) UnmarshalOIDCConnector(bytes []byte) (OIDCConnector, error) {\n\tvar h ResourceHeader\n\terr := json.Unmarshal(bytes, &h)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tswitch h.Version {\n\tcase \"\":\n\t\tvar c OIDCConnectorV1\n\t\terr := json.Unmarshal(bytes, &c)\n\t\tif err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn c.V2(), nil\n\tcase V2:\n\t\tvar c OIDCConnectorV2\n\t\tif err := utils.UnmarshalWithSchema(GetOIDCConnectorSchema(), &c, bytes); err != nil {\n\t\t\treturn nil, trace.BadParameter(err.Error())\n\t\t}\n\t\treturn &c, nil\n\t}\n\n\treturn nil, trace.BadParameter(\"OIDC connector resource version %v is not supported\", h.Version)\n}\n\n\/\/ MarshalUser marshals OIDC connector into JSON\nfunc (*TeleportOIDCConnectorMarshaler) MarshalOIDCConnector(c OIDCConnector, opts ...MarshalOption) ([]byte, error) {\n\tcfg, err := collectOptions(opts)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\ttype connv1 interface {\n\t\tV1() *OIDCConnectorV1\n\t}\n\n\ttype connv2 interface {\n\t\tV2() *OIDCConnectorV2\n\t}\n\tversion := cfg.GetVersion()\n\tswitch version {\n\tcase V1:\n\t\tv, ok := c.(connv1)\n\t\tif !ok {\n\t\t\treturn nil, trace.BadParameter(\"don't know how to marshal %v\", V1)\n\t\t}\n\t\treturn json.Marshal(v.V1())\n\tcase V2:\n\t\tv, ok := c.(connv2)\n\t\tif !ok {\n\t\t\treturn nil, trace.BadParameter(\"don't know how to marshal %v\", V2)\n\t\t}\n\t\treturn json.Marshal(v.V2())\n\tdefault:\n\t\treturn nil, trace.BadParameter(\"version %v is not supported\", version)\n\t}\n}\n\n\/\/ OIDCConnectorV2 is version 1 resource spec for OIDC connector\ntype OIDCConnectorV2 struct {\n\t\/\/ Kind is a resource kind\n\tKind string `json:\"kind\"`\n\t\/\/ Version is version\n\tVersion string `json:\"version\"`\n\t\/\/ Metadata is connector metadata\n\tMetadata Metadata `json:\"metadata\"`\n\t\/\/ Spec contains connector specification\n\tSpec OIDCConnectorSpecV2 `json:\"spec\"`\n}\n\n\/\/ V2 returns V2 version of the resource\nfunc (o *OIDCConnectorV2) V2() *OIDCConnectorV2 {\n\treturn o\n}\n\n\/\/ V1 converts OIDCConnectorV2 to OIDCConnectorV1 format\nfunc (o *OIDCConnectorV2) V1() *OIDCConnectorV1 {\n\treturn &OIDCConnectorV1{\n\t\tID: o.Metadata.Name,\n\t\tIssuerURL: o.Spec.IssuerURL,\n\t\tClientID: o.Spec.ClientID,\n\t\tClientSecret: o.Spec.ClientSecret,\n\t\tRedirectURL: o.Spec.RedirectURL,\n\t\tDisplay: o.Spec.Display,\n\t\tScope: o.Spec.Scope,\n\t\tClaimsToRoles: o.Spec.ClaimsToRoles,\n\t}\n}\n\n\/\/ SetClientSecret sets client secret to some value\nfunc (o *OIDCConnectorV2) SetClientSecret(secret string) {\n\to.Spec.ClientSecret = secret\n}\n\n\/\/ ID is a provider id, 'e.g.' google, used internally\nfunc (o *OIDCConnectorV2) GetName() string {\n\treturn o.Metadata.Name\n}\n\n\/\/ Issuer URL is the endpoint of the provider, e.g. https:\/\/accounts.google.com\nfunc (o *OIDCConnectorV2) GetIssuerURL() string {\n\treturn o.Spec.IssuerURL\n}\n\n\/\/ ClientID is id for authentication client (in our case it's our Auth server)\nfunc (o *OIDCConnectorV2) GetClientID() string {\n\treturn o.Spec.ClientID\n}\n\n\/\/ ClientSecret is used to authenticate our client and should not\n\/\/ be visible to end user\nfunc (o *OIDCConnectorV2) GetClientSecret() string {\n\treturn o.Spec.ClientSecret\n}\n\n\/\/ RedirectURL - Identity provider will use this URL to redirect\n\/\/ client's browser back to it after successfull authentication\n\/\/ Should match the URL on Provider's side\nfunc (o *OIDCConnectorV2) GetRedirectURL() string {\n\treturn o.Spec.RedirectURL\n}\n\n\/\/ Display - Friendly name for this provider.\nfunc (o *OIDCConnectorV2) GetDisplay() string {\n\tif o.Spec.Display != \"\" {\n\t\treturn o.Spec.Display\n\t}\n\treturn o.GetName()\n}\n\n\/\/ Scope is additional scopes set by provder\nfunc (o *OIDCConnectorV2) GetScope() []string {\n\treturn o.Spec.Scope\n}\n\n\/\/ ClaimsToRoles specifies dynamic mapping from claims to roles\nfunc (o *OIDCConnectorV2) GetClaimsToRoles() []ClaimMapping {\n\treturn o.Spec.ClaimsToRoles\n}\n\n\/\/ GetClaims returns list of claims expected by mappings\nfunc (o *OIDCConnectorV2) GetClaims() []string {\n\tvar out []string\n\tfor _, mapping := range o.Spec.ClaimsToRoles {\n\t\tout = append(out, mapping.Claim)\n\t}\n\treturn utils.Deduplicate(out)\n}\n\n\/\/ MapClaims maps claims to roles\nfunc (o *OIDCConnectorV2) MapClaims(claims jose.Claims) []string {\n\tvar roles []string\n\tfor _, mapping := range o.Spec.ClaimsToRoles {\n\t\tfor claimName := range claims {\n\t\t\tif claimName != mapping.Claim {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tclaimValue, ok, _ := claims.StringClaim(claimName)\n\t\t\tif ok && claimValue == mapping.Value {\n\t\t\t\troles = append(roles, mapping.Roles...)\n\t\t\t}\n\t\t\tclaimValues, ok, _ := claims.StringsClaim(claimName)\n\t\t\tif ok {\n\t\t\t\tfor _, claimValue := range claimValues {\n\t\t\t\t\tif claimValue == mapping.Value {\n\t\t\t\t\t\troles = append(roles, mapping.Roles...)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn utils.Deduplicate(roles)\n}\n\n\/\/ Check returns nil if all parameters are great, err otherwise\nfunc (o *OIDCConnectorV2) Check() error {\n\tif o.Metadata.Name == \"\" {\n\t\treturn trace.BadParameter(\"ID: missing connector name\")\n\t}\n\tif _, err := url.Parse(o.Spec.IssuerURL); err != nil {\n\t\treturn trace.BadParameter(\"IssuerURL: bad url: '%v'\", o.Spec.IssuerURL)\n\t}\n\tif _, err := url.Parse(o.Spec.RedirectURL); err != nil {\n\t\treturn trace.BadParameter(\"RedirectURL: bad url: '%v'\", o.Spec.RedirectURL)\n\t}\n\tif o.Spec.ClientID == \"\" {\n\t\treturn trace.BadParameter(\"ClientID: missing client id\")\n\t}\n\tif o.Spec.ClientSecret == \"\" {\n\t\treturn trace.BadParameter(\"ClientSecret: missing client secret\")\n\t}\n\treturn nil\n}\n\n\/\/ OIDCConnectorV2SchemaTemplate is a template JSON Schema for user\nconst OIDCConnectorV2SchemaTemplate = `{\n \"type\": \"object\",\n \"additionalProperties\": false,\n \"required\": [\"kind\", \"spec\", \"metadata\", \"version\"],\n \"properties\": {\n \"kind\": {\"type\": \"string\"},\n \"version\": {\"type\": \"string\", \"default\": \"v1\"},\n \"metadata\": %v,\n \"spec\": %v\n }\n}`\n\n\/\/ OIDCConnectorSpecV2 specifies configuration for Open ID Connect compatible external\n\/\/ identity provider, e.g. google in some organisation\ntype OIDCConnectorSpecV2 struct {\n\t\/\/ Issuer URL is the endpoint of the provider, e.g. https:\/\/accounts.google.com\n\tIssuerURL string `json:\"issuer_url\"`\n\t\/\/ ClientID is id for authentication client (in our case it's our Auth server)\n\tClientID string `json:\"client_id\"`\n\t\/\/ ClientSecret is used to authenticate our client and should not\n\t\/\/ be visible to end user\n\tClientSecret string `json:\"client_secret\"`\n\t\/\/ RedirectURL - Identity provider will use this URL to redirect\n\t\/\/ client's browser back to it after successfull authentication\n\t\/\/ Should match the URL on Provider's side\n\tRedirectURL string `json:\"redirect_url\"`\n\t\/\/ Display - Friendly name for this provider.\n\tDisplay string `json:\"display,omitempty\"`\n\t\/\/ Scope is additional scopes set by provder\n\tScope []string `json:\"scope,omitempty\"`\n\t\/\/ ClaimsToRoles specifies dynamic mapping from claims to roles\n\tClaimsToRoles []ClaimMapping `json:\"claims_to_roles,omitempty\"`\n}\n\n\/\/ OIDCConnectorSpecV2Schema is a JSON Schema for OIDC Connector\nvar OIDCConnectorSpecV2Schema = fmt.Sprintf(`{\n \"type\": \"object\",\n \"additionalProperties\": false,\n \"required\": [\"issuer_url\", \"client_id\", \"client_secret\", \"redirect_url\"],\n \"properties\": {\n \"issuer_url\": {\"type\": \"string\"},\n \"client_id\": {\"type\": \"string\"},\n \"client_secret\": {\"type\": \"string\"},\n \"redirect_url\": {\"type\": \"string\"},\n \"scope\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n }\n },\n \"claims_to_roles\": {\n \"type\": \"array\",\n \"items\": %v\n }\n }\n}`, ClaimMappingSchema)\n\n\/\/ GetClaimNames returns a list of claim names from the claim values\nfunc GetClaimNames(claims jose.Claims) []string {\n\tvar out []string\n\tfor claim := range claims {\n\t\tout = append(out, claim)\n\t}\n\treturn out\n}\n\n\/\/ ClaimMapping is OIDC claim mapping that maps\n\/\/ claim name to teleport roles\ntype ClaimMapping struct {\n\t\/\/ Claim is OIDC claim name\n\tClaim string `json:\"claim\"`\n\t\/\/ Value is claim value to match\n\tValue string `json:\"value\"`\n\t\/\/ Roles is a list of teleport roles to match\n\tRoles []string `json:\"roles\"`\n}\n\n\/\/ ClaimMappingSchema is JSON schema for claim mapping\nconst ClaimMappingSchema = `{\n \"type\": \"object\",\n \"additionalProperties\": false,\n \"required\": [\"claim\", \"value\", \"roles\"],\n \"properties\": {\n \"claim\": {\"type\": \"string\"}, \n \"value\": {\"type\": \"string\"},\n \"roles\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n }\n}`\n\n\/\/ OIDCConnectorV1 specifies configuration for Open ID Connect compatible external\n\/\/ identity provider, e.g. google in some organisation\ntype OIDCConnectorV1 struct {\n\t\/\/ ID is a provider id, 'e.g.' google, used internally\n\tID string `json:\"id\"`\n\t\/\/ Issuer URL is the endpoint of the provider, e.g. https:\/\/accounts.google.com\n\tIssuerURL string `json:\"issuer_url\"`\n\t\/\/ ClientID is id for authentication client (in our case it's our Auth server)\n\tClientID string `json:\"client_id\"`\n\t\/\/ ClientSecret is used to authenticate our client and should not\n\t\/\/ be visible to end user\n\tClientSecret string `json:\"client_secret\"`\n\t\/\/ RedirectURL - Identity provider will use this URL to redirect\n\t\/\/ client's browser back to it after successfull authentication\n\t\/\/ Should match the URL on Provider's side\n\tRedirectURL string `json:\"redirect_url\"`\n\t\/\/ Display - Friendly name for this provider.\n\tDisplay string `json:\"display\"`\n\t\/\/ Scope is additional scopes set by provder\n\tScope []string `json:\"scope\"`\n\t\/\/ ClaimsToRoles specifies dynamic mapping from claims to roles\n\tClaimsToRoles []ClaimMapping `json:\"claims_to_roles\"`\n}\n\n\/\/ V1 returns V1 version of the resource\nfunc (o *OIDCConnectorV1) V1() *OIDCConnectorV1 {\n\treturn o\n}\n\n\/\/ V2 returns V2 version of the connector\nfunc (o *OIDCConnectorV1) V2() *OIDCConnectorV2 {\n\treturn &OIDCConnectorV2{\n\t\tKind: KindOIDCConnector,\n\t\tVersion: V2,\n\t\tMetadata: Metadata{\n\t\t\tName: o.ID,\n\t\t},\n\t\tSpec: OIDCConnectorSpecV2{\n\t\t\tIssuerURL: o.IssuerURL,\n\t\t\tClientID: o.ClientID,\n\t\t\tClientSecret: o.ClientSecret,\n\t\t\tRedirectURL: o.RedirectURL,\n\t\t\tDisplay: o.Display,\n\t\t\tScope: o.Scope,\n\t\t\tClaimsToRoles: o.ClaimsToRoles,\n\t\t},\n\t}\n}\nadd interfacespackage services\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/url\"\n\n\t\"github.com\/gravitational\/teleport\/lib\/utils\"\n\n\t\"github.com\/coreos\/go-oidc\/jose\"\n\t\"github.com\/gravitational\/trace\"\n)\n\n\/\/ OIDCConnector specifies configuration for Open ID Connect compatible external\n\/\/ identity provider, e.g. google in some organisation\ntype OIDCConnector interface {\n\t\/\/ Name is a provider name, 'e.g.' google, used internally\n\tGetName() string\n\t\/\/ Issuer URL is the endpoint of the provider, e.g. https:\/\/accounts.google.com\n\tGetIssuerURL() string\n\t\/\/ ClientID is id for authentication client (in our case it's our Auth server)\n\tGetClientID() string\n\t\/\/ ClientSecret is used to authenticate our client and should not\n\t\/\/ be visible to end user\n\tGetClientSecret() string\n\t\/\/ RedirectURL - Identity provider will use this URL to redirect\n\t\/\/ client's browser back to it after successfull authentication\n\t\/\/ Should match the URL on Provider's side\n\tGetRedirectURL() string\n\t\/\/ Display - Friendly name for this provider.\n\tGetDisplay() string\n\t\/\/ Scope is additional scopes set by provder\n\tGetScope() []string\n\t\/\/ ClaimsToRoles specifies dynamic mapping from claims to roles\n\tGetClaimsToRoles() []ClaimMapping\n\t\/\/ GetClaims returns list of claims expected by mappings\n\tGetClaims() []string\n\t\/\/ MapClaims maps claims to roles\n\tMapClaims(claims jose.Claims) []string\n\t\/\/ Check checks OIDC connector for errors\n\tCheck() error\n\t\/\/ SetClientSecret sets client secret to some value\n\tSetClientSecret(secret string)\n\t\/\/ SetClientID sets id for authentication client (in our case it's our Auth server)\n\tSetClientID(string)\n\t\/\/ SetName sets a provider name\n\tSetName(string)\n\t\/\/ SetIssuerURL sets the endpoint of the provider\n\tSetIssuerURL(string)\n\t\/\/ SetRedirectURL sets RedirectURL\n\tSetRedirectURL(string)\n\t\/\/ SetScope sets additional scopes set by provider\n\tSetScope([]string)\n\t\/\/ SetClaimsToRoles sets dynamic mapping from claims to roles\n\tSetClaimsToRoles([]ClaimMapping)\n\t\/\/ SetDisplay sets friendly name for this provider.\n\tSetDisplay(string)\n}\n\nvar connectorMarshaler OIDCConnectorMarshaler = &TeleportOIDCConnectorMarshaler{}\n\n\/\/ SetOIDCConnectorMarshaler sets global user marshaler\nfunc SetOIDCConnectorMarshaler(m OIDCConnectorMarshaler) {\n\tmarshalerMutex.Lock()\n\tdefer marshalerMutex.Unlock()\n\tconnectorMarshaler = m\n}\n\n\/\/ GetOIDCConnectorMarshaler returns currently set user marshaler\nfunc GetOIDCConnectorMarshaler() OIDCConnectorMarshaler {\n\tmarshalerMutex.RLock()\n\tdefer marshalerMutex.RUnlock()\n\treturn connectorMarshaler\n}\n\n\/\/ OIDCConnectorMarshaler implements marshal\/unmarshal of User implementations\n\/\/ mostly adds support for extended versions\ntype OIDCConnectorMarshaler interface {\n\t\/\/ UnmarshalOIDCConnector unmarshals connector from binary representation\n\tUnmarshalOIDCConnector(bytes []byte) (OIDCConnector, error)\n\t\/\/ MarshalOIDCConnector marshals connector to binary representation\n\tMarshalOIDCConnector(c OIDCConnector, opts ...MarshalOption) ([]byte, error)\n}\n\n\/\/ GetOIDCConnectorSchema returns schema for OIDCConnector\nfunc GetOIDCConnectorSchema() string {\n\treturn fmt.Sprintf(OIDCConnectorV2SchemaTemplate, MetadataSchema, OIDCConnectorSpecV2Schema)\n}\n\ntype TeleportOIDCConnectorMarshaler struct{}\n\n\/\/ UnmarshalOIDCConnector unmarshals connector from\nfunc (*TeleportOIDCConnectorMarshaler) UnmarshalOIDCConnector(bytes []byte) (OIDCConnector, error) {\n\tvar h ResourceHeader\n\terr := json.Unmarshal(bytes, &h)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tswitch h.Version {\n\tcase \"\":\n\t\tvar c OIDCConnectorV1\n\t\terr := json.Unmarshal(bytes, &c)\n\t\tif err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\treturn c.V2(), nil\n\tcase V2:\n\t\tvar c OIDCConnectorV2\n\t\tif err := utils.UnmarshalWithSchema(GetOIDCConnectorSchema(), &c, bytes); err != nil {\n\t\t\treturn nil, trace.BadParameter(err.Error())\n\t\t}\n\t\treturn &c, nil\n\t}\n\n\treturn nil, trace.BadParameter(\"OIDC connector resource version %v is not supported\", h.Version)\n}\n\n\/\/ MarshalUser marshals OIDC connector into JSON\nfunc (*TeleportOIDCConnectorMarshaler) MarshalOIDCConnector(c OIDCConnector, opts ...MarshalOption) ([]byte, error) {\n\tcfg, err := collectOptions(opts)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\ttype connv1 interface {\n\t\tV1() *OIDCConnectorV1\n\t}\n\n\ttype connv2 interface {\n\t\tV2() *OIDCConnectorV2\n\t}\n\tversion := cfg.GetVersion()\n\tswitch version {\n\tcase V1:\n\t\tv, ok := c.(connv1)\n\t\tif !ok {\n\t\t\treturn nil, trace.BadParameter(\"don't know how to marshal %v\", V1)\n\t\t}\n\t\treturn json.Marshal(v.V1())\n\tcase V2:\n\t\tv, ok := c.(connv2)\n\t\tif !ok {\n\t\t\treturn nil, trace.BadParameter(\"don't know how to marshal %v\", V2)\n\t\t}\n\t\treturn json.Marshal(v.V2())\n\tdefault:\n\t\treturn nil, trace.BadParameter(\"version %v is not supported\", version)\n\t}\n}\n\n\/\/ OIDCConnectorV2 is version 1 resource spec for OIDC connector\ntype OIDCConnectorV2 struct {\n\t\/\/ Kind is a resource kind\n\tKind string `json:\"kind\"`\n\t\/\/ Version is version\n\tVersion string `json:\"version\"`\n\t\/\/ Metadata is connector metadata\n\tMetadata Metadata `json:\"metadata\"`\n\t\/\/ Spec contains connector specification\n\tSpec OIDCConnectorSpecV2 `json:\"spec\"`\n}\n\n\/\/ V2 returns V2 version of the resource\nfunc (o *OIDCConnectorV2) V2() *OIDCConnectorV2 {\n\treturn o\n}\n\n\/\/ V1 converts OIDCConnectorV2 to OIDCConnectorV1 format\nfunc (o *OIDCConnectorV2) V1() *OIDCConnectorV1 {\n\treturn &OIDCConnectorV1{\n\t\tID: o.Metadata.Name,\n\t\tIssuerURL: o.Spec.IssuerURL,\n\t\tClientID: o.Spec.ClientID,\n\t\tClientSecret: o.Spec.ClientSecret,\n\t\tRedirectURL: o.Spec.RedirectURL,\n\t\tDisplay: o.Spec.Display,\n\t\tScope: o.Spec.Scope,\n\t\tClaimsToRoles: o.Spec.ClaimsToRoles,\n\t}\n}\n\n\/\/ SetDisplay sets friendly name for this provider.\nfunc (o *OIDCConnectorV2) SetDisplay(display string) {\n\to.Spec.Display = display\n}\n\n\/\/ SetName sets client secret to some value\nfunc (o *OIDCConnectorV2) SetName(name string) {\n\to.Metadata.Name = name\n}\n\n\/\/ SetIssuerURL sets client secret to some value\nfunc (o *OIDCConnectorV2) SetIssuerURL(issuerURL string) {\n\to.Spec.IssuerURL = issuerURL\n}\n\n\/\/ SetRedirectURL sets client secret to some value\nfunc (o *OIDCConnectorV2) SetRedirectURL(redirectURL string) {\n\to.Spec.RedirectURL = redirectURL\n}\n\n\/\/ SetScope sets additional scopes set by provider\nfunc (o *OIDCConnectorV2) SetScope(scope []string) {\n\to.Spec.Scope = scope\n}\n\n\/\/ SetClaimsToRoles sets dynamic mapping from claims to roles\nfunc (o *OIDCConnectorV2) SetClaimsToRoles(claims []ClaimMapping) {\n\to.Spec.ClaimsToRoles = claims\n}\n\n\/\/ SetClientID sets id for authentication client (in our case it's our Auth server)\nfunc (o *OIDCConnectorV2) SetClientID(clintID string) {\n\to.Spec.ClientID = clintID\n}\n\n\/\/ SetClientSecret sets client secret to some value\nfunc (o *OIDCConnectorV2) SetClientSecret(secret string) {\n\to.Spec.ClientSecret = secret\n}\n\n\/\/ ID is a provider id, 'e.g.' google, used internally\nfunc (o *OIDCConnectorV2) GetName() string {\n\treturn o.Metadata.Name\n}\n\n\/\/ Issuer URL is the endpoint of the provider, e.g. https:\/\/accounts.google.com\nfunc (o *OIDCConnectorV2) GetIssuerURL() string {\n\treturn o.Spec.IssuerURL\n}\n\n\/\/ ClientID is id for authentication client (in our case it's our Auth server)\nfunc (o *OIDCConnectorV2) GetClientID() string {\n\treturn o.Spec.ClientID\n}\n\n\/\/ ClientSecret is used to authenticate our client and should not\n\/\/ be visible to end user\nfunc (o *OIDCConnectorV2) GetClientSecret() string {\n\treturn o.Spec.ClientSecret\n}\n\n\/\/ RedirectURL - Identity provider will use this URL to redirect\n\/\/ client's browser back to it after successfull authentication\n\/\/ Should match the URL on Provider's side\nfunc (o *OIDCConnectorV2) GetRedirectURL() string {\n\treturn o.Spec.RedirectURL\n}\n\n\/\/ Display - Friendly name for this provider.\nfunc (o *OIDCConnectorV2) GetDisplay() string {\n\tif o.Spec.Display != \"\" {\n\t\treturn o.Spec.Display\n\t}\n\treturn o.GetName()\n}\n\n\/\/ Scope is additional scopes set by provder\nfunc (o *OIDCConnectorV2) GetScope() []string {\n\treturn o.Spec.Scope\n}\n\n\/\/ ClaimsToRoles specifies dynamic mapping from claims to roles\nfunc (o *OIDCConnectorV2) GetClaimsToRoles() []ClaimMapping {\n\treturn o.Spec.ClaimsToRoles\n}\n\n\/\/ GetClaims returns list of claims expected by mappings\nfunc (o *OIDCConnectorV2) GetClaims() []string {\n\tvar out []string\n\tfor _, mapping := range o.Spec.ClaimsToRoles {\n\t\tout = append(out, mapping.Claim)\n\t}\n\treturn utils.Deduplicate(out)\n}\n\n\/\/ MapClaims maps claims to roles\nfunc (o *OIDCConnectorV2) MapClaims(claims jose.Claims) []string {\n\tvar roles []string\n\tfor _, mapping := range o.Spec.ClaimsToRoles {\n\t\tfor claimName := range claims {\n\t\t\tif claimName != mapping.Claim {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tclaimValue, ok, _ := claims.StringClaim(claimName)\n\t\t\tif ok && claimValue == mapping.Value {\n\t\t\t\troles = append(roles, mapping.Roles...)\n\t\t\t}\n\t\t\tclaimValues, ok, _ := claims.StringsClaim(claimName)\n\t\t\tif ok {\n\t\t\t\tfor _, claimValue := range claimValues {\n\t\t\t\t\tif claimValue == mapping.Value {\n\t\t\t\t\t\troles = append(roles, mapping.Roles...)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn utils.Deduplicate(roles)\n}\n\n\/\/ Check returns nil if all parameters are great, err otherwise\nfunc (o *OIDCConnectorV2) Check() error {\n\tif o.Metadata.Name == \"\" {\n\t\treturn trace.BadParameter(\"ID: missing connector name\")\n\t}\n\tif _, err := url.Parse(o.Spec.IssuerURL); err != nil {\n\t\treturn trace.BadParameter(\"IssuerURL: bad url: '%v'\", o.Spec.IssuerURL)\n\t}\n\tif _, err := url.Parse(o.Spec.RedirectURL); err != nil {\n\t\treturn trace.BadParameter(\"RedirectURL: bad url: '%v'\", o.Spec.RedirectURL)\n\t}\n\tif o.Spec.ClientID == \"\" {\n\t\treturn trace.BadParameter(\"ClientID: missing client id\")\n\t}\n\tif o.Spec.ClientSecret == \"\" {\n\t\treturn trace.BadParameter(\"ClientSecret: missing client secret\")\n\t}\n\treturn nil\n}\n\n\/\/ OIDCConnectorV2SchemaTemplate is a template JSON Schema for user\nconst OIDCConnectorV2SchemaTemplate = `{\n \"type\": \"object\",\n \"additionalProperties\": false,\n \"required\": [\"kind\", \"spec\", \"metadata\", \"version\"],\n \"properties\": {\n \"kind\": {\"type\": \"string\"},\n \"version\": {\"type\": \"string\", \"default\": \"v1\"},\n \"metadata\": %v,\n \"spec\": %v\n }\n}`\n\n\/\/ OIDCConnectorSpecV2 specifies configuration for Open ID Connect compatible external\n\/\/ identity provider, e.g. google in some organisation\ntype OIDCConnectorSpecV2 struct {\n\t\/\/ Issuer URL is the endpoint of the provider, e.g. https:\/\/accounts.google.com\n\tIssuerURL string `json:\"issuer_url\"`\n\t\/\/ ClientID is id for authentication client (in our case it's our Auth server)\n\tClientID string `json:\"client_id\"`\n\t\/\/ ClientSecret is used to authenticate our client and should not\n\t\/\/ be visible to end user\n\tClientSecret string `json:\"client_secret\"`\n\t\/\/ RedirectURL - Identity provider will use this URL to redirect\n\t\/\/ client's browser back to it after successfull authentication\n\t\/\/ Should match the URL on Provider's side\n\tRedirectURL string `json:\"redirect_url\"`\n\t\/\/ Display - Friendly name for this provider.\n\tDisplay string `json:\"display,omitempty\"`\n\t\/\/ Scope is additional scopes set by provder\n\tScope []string `json:\"scope,omitempty\"`\n\t\/\/ ClaimsToRoles specifies dynamic mapping from claims to roles\n\tClaimsToRoles []ClaimMapping `json:\"claims_to_roles,omitempty\"`\n}\n\n\/\/ OIDCConnectorSpecV2Schema is a JSON Schema for OIDC Connector\nvar OIDCConnectorSpecV2Schema = fmt.Sprintf(`{\n \"type\": \"object\",\n \"additionalProperties\": false,\n \"required\": [\"issuer_url\", \"client_id\", \"client_secret\", \"redirect_url\"],\n \"properties\": {\n \"issuer_url\": {\"type\": \"string\"},\n \"client_id\": {\"type\": \"string\"},\n \"client_secret\": {\"type\": \"string\"},\n \"redirect_url\": {\"type\": \"string\"},\n \"scope\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n }\n },\n \"claims_to_roles\": {\n \"type\": \"array\",\n \"items\": %v\n }\n }\n}`, ClaimMappingSchema)\n\n\/\/ GetClaimNames returns a list of claim names from the claim values\nfunc GetClaimNames(claims jose.Claims) []string {\n\tvar out []string\n\tfor claim := range claims {\n\t\tout = append(out, claim)\n\t}\n\treturn out\n}\n\n\/\/ ClaimMapping is OIDC claim mapping that maps\n\/\/ claim name to teleport roles\ntype ClaimMapping struct {\n\t\/\/ Claim is OIDC claim name\n\tClaim string `json:\"claim\"`\n\t\/\/ Value is claim value to match\n\tValue string `json:\"value\"`\n\t\/\/ Roles is a list of teleport roles to match\n\tRoles []string `json:\"roles\"`\n}\n\n\/\/ ClaimMappingSchema is JSON schema for claim mapping\nconst ClaimMappingSchema = `{\n \"type\": \"object\",\n \"additionalProperties\": false,\n \"required\": [\"claim\", \"value\", \"roles\"],\n \"properties\": {\n \"claim\": {\"type\": \"string\"}, \n \"value\": {\"type\": \"string\"},\n \"roles\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n }\n}`\n\n\/\/ OIDCConnectorV1 specifies configuration for Open ID Connect compatible external\n\/\/ identity provider, e.g. google in some organisation\ntype OIDCConnectorV1 struct {\n\t\/\/ ID is a provider id, 'e.g.' google, used internally\n\tID string `json:\"id\"`\n\t\/\/ Issuer URL is the endpoint of the provider, e.g. https:\/\/accounts.google.com\n\tIssuerURL string `json:\"issuer_url\"`\n\t\/\/ ClientID is id for authentication client (in our case it's our Auth server)\n\tClientID string `json:\"client_id\"`\n\t\/\/ ClientSecret is used to authenticate our client and should not\n\t\/\/ be visible to end user\n\tClientSecret string `json:\"client_secret\"`\n\t\/\/ RedirectURL - Identity provider will use this URL to redirect\n\t\/\/ client's browser back to it after successfull authentication\n\t\/\/ Should match the URL on Provider's side\n\tRedirectURL string `json:\"redirect_url\"`\n\t\/\/ Display - Friendly name for this provider.\n\tDisplay string `json:\"display\"`\n\t\/\/ Scope is additional scopes set by provder\n\tScope []string `json:\"scope\"`\n\t\/\/ ClaimsToRoles specifies dynamic mapping from claims to roles\n\tClaimsToRoles []ClaimMapping `json:\"claims_to_roles\"`\n}\n\n\/\/ V1 returns V1 version of the resource\nfunc (o *OIDCConnectorV1) V1() *OIDCConnectorV1 {\n\treturn o\n}\n\n\/\/ V2 returns V2 version of the connector\nfunc (o *OIDCConnectorV1) V2() *OIDCConnectorV2 {\n\treturn &OIDCConnectorV2{\n\t\tKind: KindOIDCConnector,\n\t\tVersion: V2,\n\t\tMetadata: Metadata{\n\t\t\tName: o.ID,\n\t\t},\n\t\tSpec: OIDCConnectorSpecV2{\n\t\t\tIssuerURL: o.IssuerURL,\n\t\t\tClientID: o.ClientID,\n\t\t\tClientSecret: o.ClientSecret,\n\t\t\tRedirectURL: o.RedirectURL,\n\t\t\tDisplay: o.Display,\n\t\t\tScope: o.Scope,\n\t\t\tClaimsToRoles: o.ClaimsToRoles,\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"package wats\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/cf\"\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/generator\"\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/helpers\"\n)\n\nvar _ = Describe(\"An application printing a bunch of output\", func() {\n\tvar appName string\n\n\tBeforeEach(func() {\n\t\tappName = generator.RandomName()\n\n\t\tEventually(pushNora(appName), CF_PUSH_TIMEOUT).Should(Succeed())\n\t\tenableDiego(appName)\n\t\tdisableSsh(appName)\n\t\tEventually(runCf(\"start\", appName), CF_PUSH_TIMEOUT).Should(Succeed())\n\t})\n\n\tAfterEach(func() {\n\t\tEventually(cf.Cf(\"logs\", appName, \"--recent\")).Should(Exit())\n\t\tEventually(cf.Cf(\"delete\", appName, \"-f\")).Should(Exit(0))\n\t})\n\n\tXIt(\"doesn't die when printing 32MB\", func() {\n\t\tbeforeId := helpers.CurlApp(appName, \"\/id\")\n\n\t\tExpect(helpers.CurlAppWithTimeout(appName, \"\/logspew\/32000\", DEFAULT_TIMEOUT)).\n\t\t\tTo(ContainSubstring(\"Just wrote 32000 kbytes to the log\"))\n\n\t\tConsistently(func() string {\n\t\t\treturn helpers.CurlApp(appName, \"\/id\")\n\t\t}, \"10s\").Should(Equal(beforeId))\n\t})\n})\nEnable the output volume testpackage wats\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/cf\"\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/generator\"\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/helpers\"\n)\n\nvar _ = Describe(\"An application printing a bunch of output\", func() {\n\tvar appName string\n\n\tBeforeEach(func() {\n\t\tappName = generator.RandomName()\n\n\t\tEventually(pushNora(appName), CF_PUSH_TIMEOUT).Should(Succeed())\n\t\tenableDiego(appName)\n\t\tdisableSsh(appName)\n\t\tEventually(runCf(\"start\", appName), CF_PUSH_TIMEOUT).Should(Succeed())\n\t})\n\n\tAfterEach(func() {\n\t\tEventually(cf.Cf(\"logs\", appName, \"--recent\")).Should(Exit())\n\t\tEventually(cf.Cf(\"delete\", appName, \"-f\")).Should(Exit(0))\n\t})\n\n\tIt(\"doesn't die when printing 32MB\", func() {\n\t\tbeforeId := helpers.CurlApp(appName, \"\/id\")\n\n\t\tExpect(helpers.CurlAppWithTimeout(appName, \"\/logspew\/32000\", DEFAULT_TIMEOUT)).\n\t\t\tTo(ContainSubstring(\"Just wrote 32000 kbytes to the log\"))\n\n\t\tConsistently(func() string {\n\t\t\treturn helpers.CurlApp(appName, \"\/id\")\n\t\t}, \"10s\").Should(Equal(beforeId))\n\t})\n})\n<|endoftext|>"} {"text":"package models\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/sai-lab\/mouryou\/lib\/apache\"\n\t\"github.com\/sai-lab\/mouryou\/lib\/logger\"\n)\n\ntype VirtualMachine struct {\n\tId int `json:\"id\"`\n\tName string `json:\"name\"`\n\tHost string `json:\"host\"`\n\t\/\/ スループットの平均値\n\tAverage int `json:\"average\"`\n\t\/\/ 基準の重さ\n\tBasicWeight int `json:\"basic_weight\"`\n\t\/\/ 現在の重さ\n\tWeight int `json:\"weight\"`\n\t\/\/ is start machine\n\tIsStartMachine bool `json:\"is_start_machine\"`\n\t\/\/ ハイパーバイザ\n\tHypervisor *HypervisorStruct `json:\"-\"`\n\t\/\/ ベンダー\n\tVendor *VendorStruct `json:\"-\"`\n}\n\n\/\/ ServerState はapache.Scoreboardから負荷状況を受け取り返却します。\nfunc (machine VirtualMachine) ServerStatus() apache.ServerStatus {\n\tvar status apache.ServerStatus\n\n\tboard, err := apache.Scoreboard(machine.Host)\n\tif err != nil {\n\t\t\/\/ errがあった場合、timeoutしていると判断します。\n\t\tstatus.HostName = machine.Name\n\t\tstatus.Other = \"Connection is timeout.\"\n\t} else {\n\t\terr = json.Unmarshal(board, &status)\n\t\tif err != nil {\n\t\t\tlogger.PrintPlace(fmt.Sprint(err))\n\t\t}\n\t}\n\tstatus.Id = machine.Id\n\n\treturn status\n}\n\n\/\/ Bootup はVMの起動処理を行います。\n\/\/ 現在は実際に起動停止は行わないため起動にかかる時間分sleepします。\nfunc (machine VirtualMachine) Bootup(sleep time.Duration) string {\n\t\/\/ connection, err := machine.Hypervisor.Connect()\n\t\/\/ if err != nil {\n\t\/\/ \tpower <- err.Error()\n\t\/\/ \treturn\n\t\/\/ }\n\t\/\/ defer connection.CloseConnection()\n\n\t\/\/ domain, err := connection.LookupDomainByName(machine.Name)\n\t\/\/ if err != nil {\n\t\/\/ \tpower <- err.Error()\n\t\/\/ \treturn\n\t\/\/ }\n\n\t\/\/ err = domain.Create()\n\t\/\/ if err != nil {\n\t\/\/ \tpower <- err.Error()\n\t\/\/ \treturn\n\t\/\/ }\n\n\ttime.Sleep(sleep * time.Second)\n\n\treturn \"booted up\"\n}\n\n\/\/ Bootup はVMの起動処理を行います。\n\/\/ 現在は実際に起動停止は行わないため停止にかかる時間分sleepします。\nfunc (machine VirtualMachine) Shutdown(sleep time.Duration) string {\n\t\/\/ connection, err := machine.Hypervisor.Connect() \/\/ here?\n\n\t\/\/ if err != nil {\n\t\/\/ \tpower <- err.Error()\n\t\/\/ \treturn\n\t\/\/ }\n\t\/\/ defer connection.CloseConnection()\n\n\t\/\/ domain, err := connection.LookupDomainByName(machine.Name)\n\t\/\/ if err != nil {\n\t\/\/ \tpower <- err.Error()\n\t\/\/ \treturn\n\t\/\/ }\n\n\t\/\/ time.Sleep(sleep * time.Second)\n\t\/\/ err = domain.Shutdown()\n\t\/\/ if err != nil {\n\t\/\/ \tpower <- err.Error()\n\t\/\/ \tlogger.PrintPlace(fmt.Sprint(err.Error))\n\t\/\/ \treturn\n\t\/\/ }\n\n\ttime.Sleep(sleep * time.Second)\n\n\treturn \"shutted down\"\n}\nVirtualMachineにOperationを追加package models\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/sai-lab\/mouryou\/lib\/apache\"\n\t\"github.com\/sai-lab\/mouryou\/lib\/logger\"\n)\n\ntype VirtualMachine struct {\n\tId int `json:\"id\"`\n\tName string `json:\"name\"`\n\tHost string `json:\"host\"`\n\tOperation string `json:\"operation\"`\n\t\/\/ スループットの平均値\n\tAverage int `json:\"average\"`\n\t\/\/ 基準の重さ\n\tBasicWeight int `json:\"basic_weight\"`\n\t\/\/ 現在の重さ\n\tWeight int `json:\"weight\"`\n\t\/\/ is start machine\n\tIsStartMachine bool `json:\"is_start_machine\"`\n\t\/\/ ハイパーバイザ\n\tHypervisor *HypervisorStruct `json:\"-\"`\n\t\/\/ ベンダー\n\tVendor *VendorStruct `json:\"-\"`\n}\n\n\/\/ ServerState はapache.Scoreboardから負荷状況を受け取り返却します。\nfunc (machine VirtualMachine) ServerStatus() apache.ServerStatus {\n\tvar status apache.ServerStatus\n\n\tboard, err := apache.Scoreboard(machine.Host)\n\tif err != nil {\n\t\t\/\/ errがあった場合、timeoutしていると判断します。\n\t\tstatus.HostName = machine.Name\n\t\tstatus.Other = \"Connection is timeout.\"\n\t} else {\n\t\terr = json.Unmarshal(board, &status)\n\t\tif err != nil {\n\t\t\tlogger.PrintPlace(fmt.Sprint(err))\n\t\t}\n\t}\n\tstatus.Id = machine.Id\n\n\treturn status\n}\n\n\/\/ Bootup はVMの起動処理を行います。\n\/\/ 現在は実際に起動停止は行わないため起動にかかる時間分sleepします。\nfunc (machine VirtualMachine) Bootup(sleep time.Duration) string {\n\t\/\/ connection, err := machine.Hypervisor.Connect()\n\t\/\/ if err != nil {\n\t\/\/ \tpower <- err.Error()\n\t\/\/ \treturn\n\t\/\/ }\n\t\/\/ defer connection.CloseConnection()\n\n\t\/\/ domain, err := connection.LookupDomainByName(machine.Name)\n\t\/\/ if err != nil {\n\t\/\/ \tpower <- err.Error()\n\t\/\/ \treturn\n\t\/\/ }\n\n\t\/\/ err = domain.Create()\n\t\/\/ if err != nil {\n\t\/\/ \tpower <- err.Error()\n\t\/\/ \treturn\n\t\/\/ }\n\n\ttime.Sleep(sleep * time.Second)\n\n\treturn \"booted up\"\n}\n\n\/\/ Bootup はVMの起動処理を行います。\n\/\/ 現在は実際に起動停止は行わないため停止にかかる時間分sleepします。\nfunc (machine VirtualMachine) Shutdown(sleep time.Duration) string {\n\t\/\/ connection, err := machine.Hypervisor.Connect() \/\/ here?\n\n\t\/\/ if err != nil {\n\t\/\/ \tpower <- err.Error()\n\t\/\/ \treturn\n\t\/\/ }\n\t\/\/ defer connection.CloseConnection()\n\n\t\/\/ domain, err := connection.LookupDomainByName(machine.Name)\n\t\/\/ if err != nil {\n\t\/\/ \tpower <- err.Error()\n\t\/\/ \treturn\n\t\/\/ }\n\n\t\/\/ time.Sleep(sleep * time.Second)\n\t\/\/ err = domain.Shutdown()\n\t\/\/ if err != nil {\n\t\/\/ \tpower <- err.Error()\n\t\/\/ \tlogger.PrintPlace(fmt.Sprint(err.Error))\n\t\/\/ \treturn\n\t\/\/ }\n\n\ttime.Sleep(sleep * time.Second)\n\n\treturn \"shutted down\"\n}\n<|endoftext|>"} {"text":"package pelican\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"regexp\"\n)\n\nvar validIPv4addr = regexp.MustCompile(`^[0-9]+[.][0-9]+[.][0-9]+[.][0-9]+$`)\n\nvar privateIPv4addr = regexp.MustCompile(`(^127\\.0\\.0\\.1)|(^10\\.)|(^172\\.1[6-9]\\.)|(^172\\.2[0-9]\\.)|(^172\\.3[0-1]\\.)|(^192\\.168\\.)`)\n\nfunc IsRoutableIPv4(ip string) bool {\n\tmatch := privateIPv4addr.FindStringSubmatch(ip)\n\tif match != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc GetExternalIP() string {\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvalid := []string{}\n\n\tfor _, a := range addrs {\n\t\tif ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {\n\t\t\taddr := ipnet.IP.String()\n\t\t\tmatch := validIPv4addr.FindStringSubmatch(addr)\n\t\t\tif match != nil {\n\t\t\t\tif addr != \"127.0.0.1\" {\n\t\t\t\t\tvalid = append(valid, addr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tswitch len(valid) {\n\tcase 0:\n\t\treturn \"127.0.0.1\"\n\tcase 1:\n\t\treturn valid[0]\n\tdefault:\n\t\t\/\/ try to get a routable ip if possible.\n\t\tfor _, ip := range valid {\n\t\t\tif IsRoutableIPv4(ip) {\n\t\t\t\treturn ip\n\t\t\t}\n\t\t}\n\t\t\/\/ give up, just return the first.\n\t\treturn valid[0]\n\t}\n}\n\nfunc GetExternalIPAsInt() int {\n\ts := GetExternalIP()\n\tip := net.ParseIP(s).To4()\n\tif ip == nil {\n\t\treturn 0\n\t}\n\tsum := 0\n\tfor i := 0; i < 4; i++ {\n\t\tmult := 1 << (8 * uint64(3-i))\n\t\t\/\/fmt.Printf(\"mult = %d\\n\", mult)\n\t\tsum += int(mult) * int(ip[i])\n\t\t\/\/fmt.Printf(\"sum = %d\\n\", sum)\n\t}\n\t\/\/fmt.Printf(\"GetExternalIPAsInt() returns %d\\n\", sum)\n\treturn sum\n}\n\n\/\/ sure there's a race here, but should be okay.\n\/\/ :0 asks the OS to give us a free port.\nfunc GetAvailPort() int {\n\tl, _ := net.Listen(\"tcp\", \":0\")\n\tr := l.Addr()\n\tl.Close()\n\treturn r.(*net.TCPAddr).Port\n}\n\nfunc GenAddress() string {\n\tport := GetAvailPort()\n\tip := GetExternalIP()\n\ts := fmt.Sprintf(\"tcp:\/\/%s:%d\", ip, port)\n\t\/\/fmt.Printf(\"GenAddress returning '%s'\\n\", s)\n\treturn s\n}\n\n\/\/ reduce `tcp:\/\/blah:port` to `blah:port`\nvar validSplitOffProto = regexp.MustCompile(`^[^:]*:\/\/(.*)$`)\n\nfunc StripNanomsgAddressPrefix(nanomsgAddr string) (suffix string, err error) {\n\n\tmatch := validSplitOffProto.FindStringSubmatch(nanomsgAddr)\n\tif match == nil || len(match) != 2 {\n\t\treturn \"\", fmt.Errorf(\"could not strip prefix tcp:\/\/ from nanomsg address '%s'\", nanomsgAddr)\n\t}\n\treturn match[1], nil\n}\ndocs++package pelican\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"regexp\"\n)\n\nvar validIPv4addr = regexp.MustCompile(`^[0-9]+[.][0-9]+[.][0-9]+[.][0-9]+$`)\n\nvar privateIPv4addr = regexp.MustCompile(`(^127\\.0\\.0\\.1)|(^10\\.)|(^172\\.1[6-9]\\.)|(^172\\.2[0-9]\\.)|(^172\\.3[0-1]\\.)|(^192\\.168\\.)`)\n\n\/\/ IsRoutableIPv4 returns true if the string in ip represents an IPv4 address that is not\n\/\/ private. See http:\/\/en.wikipedia.org\/wiki\/Private_network#Private_IPv4_address_spaces\n\/\/ for the numeric ranges that are private. 127.0.0.1, 192.168.0.1, and 172.16.0.1 are\n\/\/ examples of non-routables IP addresses.\nfunc IsRoutableIPv4(ip string) bool {\n\tmatch := privateIPv4addr.FindStringSubmatch(ip)\n\tif match != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/ GetExternalIP tries to determine the external IP address\n\/\/ used on this host.\nfunc GetExternalIP() string {\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvalid := []string{}\n\n\tfor _, a := range addrs {\n\t\tif ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {\n\t\t\taddr := ipnet.IP.String()\n\t\t\tmatch := validIPv4addr.FindStringSubmatch(addr)\n\t\t\tif match != nil {\n\t\t\t\tif addr != \"127.0.0.1\" {\n\t\t\t\t\tvalid = append(valid, addr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tswitch len(valid) {\n\tcase 0:\n\t\treturn \"127.0.0.1\"\n\tcase 1:\n\t\treturn valid[0]\n\tdefault:\n\t\t\/\/ try to get a routable ip if possible.\n\t\tfor _, ip := range valid {\n\t\t\tif IsRoutableIPv4(ip) {\n\t\t\t\treturn ip\n\t\t\t}\n\t\t}\n\t\t\/\/ give up, just return the first.\n\t\treturn valid[0]\n\t}\n}\n\n\/\/ GetExternalIPAsInt calls GetExternalIP() and then converts\n\/\/ the resulting IPv4 string into an integer.\nfunc GetExternalIPAsInt() int {\n\ts := GetExternalIP()\n\tip := net.ParseIP(s).To4()\n\tif ip == nil {\n\t\treturn 0\n\t}\n\tsum := 0\n\tfor i := 0; i < 4; i++ {\n\t\tmult := 1 << (8 * uint64(3-i))\n\t\t\/\/fmt.Printf(\"mult = %d\\n\", mult)\n\t\tsum += int(mult) * int(ip[i])\n\t\t\/\/fmt.Printf(\"sum = %d\\n\", sum)\n\t}\n\t\/\/fmt.Printf(\"GetExternalIPAsInt() returns %d\\n\", sum)\n\treturn sum\n}\n\n\/\/ GetAvailPort asks the OS for an unused port.\n\/\/ There's a race here, where the port could be grabbed by someone else\n\/\/ before the caller gets to Listen on it, but in practice such races\n\/\/ are rare. Uses net.Listen(\"tcp\", \":0\") to determine a free port, then\n\/\/ releases it back to the OS with Listener.Close().\nfunc GetAvailPort() int {\n\tl, _ := net.Listen(\"tcp\", \":0\")\n\tr := l.Addr()\n\tl.Close()\n\treturn r.(*net.TCPAddr).Port\n}\n\n\/\/ GenAddress generates a local address by calling GetAvailPort() and\n\/\/ GetExternalIP(), then prefixing them with 'tcp:\/\/'.\nfunc GenAddress() string {\n\tport := GetAvailPort()\n\tip := GetExternalIP()\n\ts := fmt.Sprintf(\"tcp:\/\/%s:%d\", ip, port)\n\t\/\/fmt.Printf(\"GenAddress returning '%s'\\n\", s)\n\treturn s\n}\n\n\/\/ reduce `tcp:\/\/blah:port` to `blah:port`\nvar validSplitOffProto = regexp.MustCompile(`^[^:]*:\/\/(.*)$`)\n\n\/\/ StripNanomsgAddressPrefix removes the 'tcp:\/\/' prefix from\n\/\/ nanomsgAddr.\nfunc StripNanomsgAddressPrefix(nanomsgAddr string) (suffix string, err error) {\n\n\tmatch := validSplitOffProto.FindStringSubmatch(nanomsgAddr)\n\tif match == nil || len(match) != 2 {\n\t\treturn \"\", fmt.Errorf(\"could not strip prefix tcp:\/\/ from nanomsg address '%s'\", nanomsgAddr)\n\t}\n\treturn match[1], nil\n}\n<|endoftext|>"} {"text":"package concat\n\nfunc canFormArray(arr []int, pcs [][]int) bool {\n\treturn false\n}\nsolve 1640 use hashmappackage concat\n\nfunc canFormArray(arr []int, pcs [][]int) bool {\n\treturn useHashmap(arr, pcs)\n}\n\n\/\/ useHashmap time complexity O(N) where (N is the length of arr), space compelxity O(N)\nfunc useHashmap(arr []int, pcs [][]int) bool {\n\tn := len(arr)\n\tset := make(map[int]int, n)\n\tfor i := range arr {\n\t\tset[arr[i]] = i\n\t}\n\tfor i := range pcs {\n\t\tcur := pcs[i]\n\t\tif len(cur) == 1 {\n\t\t\tif _, exists := set[cur[0]]; !exists {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tstart := -1\n\t\tcj := 0\n\t\tfor j := range cur {\n\t\t\tif p, exists := set[cur[j]]; exists {\n\t\t\t\tif start == -1 {\n\t\t\t\t\tstart = p\n\t\t\t\t} else {\n\t\t\t\t\tif j-cj != p-start {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"package acceptance_test\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/lib\/pq\"\n\t\"github.com\/pivotal-golang\/lager\/lagertest\"\n\t\"github.com\/sclevine\/agouti\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/ginkgomon\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n\t. \"github.com\/sclevine\/agouti\/matchers\"\n\n\t\"github.com\/cloudfoundry\/gunk\/urljoiner\"\n\t\"github.com\/concourse\/atc\"\n\t\"github.com\/concourse\/atc\/db\"\n)\n\nvar _ = Describe(\"Resource Pausing\", func() {\n\tvar atcProcess ifrit.Process\n\tvar dbListener *pq.Listener\n\tvar atcPort uint16\n\n\tBeforeEach(func() {\n\t\tatcBin, err := gexec.Build(\"github.com\/concourse\/atc\/cmd\/atc\")\n\t\tΩ(err).ShouldNot(HaveOccurred())\n\n\t\tdbLogger := lagertest.NewTestLogger(\"test\")\n\t\tpostgresRunner.CreateTestDB()\n\t\tdbConn = postgresRunner.Open()\n\t\tdbListener = pq.NewListener(postgresRunner.DataSourceName(), time.Second, time.Minute, nil)\n\t\tbus := db.NewNotificationsBus(dbListener)\n\t\tsqlDB = db.NewSQL(dbLogger, dbConn, bus)\n\t\tΩ(err).ShouldNot(HaveOccurred())\n\t\tatcProcess, atcPort = startATC(atcBin, 1)\n\t})\n\n\tAfterEach(func() {\n\t\tginkgomon.Interrupt(atcProcess)\n\n\t\tΩ(dbConn.Close()).Should(Succeed())\n\t\tΩ(dbListener.Close()).Should(Succeed())\n\n\t\tpostgresRunner.DropTestDB()\n\t})\n\n\tDescribe(\"pausing a resource\", func() {\n\t\tvar page *agouti.Page\n\n\t\tBeforeEach(func() {\n\t\t\tvar err error\n\t\t\tpage, err = agoutiDriver.NewPage()\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tExpect(page.Destroy()).To(Succeed())\n\t\t})\n\n\t\thomepage := func() string {\n\t\t\treturn fmt.Sprintf(\"http:\/\/127.0.0.1:%d\/pipelines\/%s\", atcPort, atc.DefaultPipelineName)\n\t\t}\n\n\t\twithPath := func(path string) string {\n\t\t\treturn urljoiner.Join(homepage(), path)\n\t\t}\n\n\t\tContext(\"with a resource in the configuration\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\t\/\/ job build data\n\t\t\t\tΩ(sqlDB.SaveConfig(atc.DefaultPipelineName, atc.Config{\n\t\t\t\t\tJobs: atc.JobConfigs{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"job-name\",\n\t\t\t\t\t\t\tPlan: atc.PlanSequence{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tGet: \"resource-name\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tResources: atc.ResourceConfigs{\n\t\t\t\t\t\t{Name: \"resource-name\"},\n\t\t\t\t\t},\n\t\t\t\t}, db.ConfigVersion(1))).Should(Succeed())\n\t\t\t})\n\n\t\t\tIt(\"can view the resource\", func() {\n\t\t\t\t\/\/ homepage -> resource detail\n\t\t\t\tExpect(page.Navigate(homepage())).To(Succeed())\n\t\t\t\tEventually(page.FindByLink(\"resource-name\")).Should(BeFound())\n\t\t\t\tExpect(page.FindByLink(\"resource-name\").Click()).To(Succeed())\n\n\t\t\t\t\/\/ resource detail -> paused resource detail\n\t\t\t\tExpect(page).Should(HaveURL(withPath(\"\/resources\/resource-name\")))\n\t\t\t\tExpect(page.Find(\"h1\")).To(HaveText(\"resource-name\"))\n\n\t\t\t\tAuthenticate(page, \"admin\", \"password\")\n\n\t\t\t\tExpect(page.Find(\".js-pauseUnpause\").Click()).To(Succeed())\n\t\t\t\tEventually(page.Find(\".header h3\")).Should(HaveText(\"checking paused\"))\n\n\t\t\t\tpage.Refresh()\n\n\t\t\t\tEventually(page.Find(\".header h3\")).Should(HaveText(\"checking paused\"))\n\t\t\t})\n\t\t})\n\t})\n})\nqualify the pause button by js-resourcepackage acceptance_test\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/lib\/pq\"\n\t\"github.com\/pivotal-golang\/lager\/lagertest\"\n\t\"github.com\/sclevine\/agouti\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/ginkgomon\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n\t. \"github.com\/sclevine\/agouti\/matchers\"\n\n\t\"github.com\/cloudfoundry\/gunk\/urljoiner\"\n\t\"github.com\/concourse\/atc\"\n\t\"github.com\/concourse\/atc\/db\"\n)\n\nvar _ = Describe(\"Resource Pausing\", func() {\n\tvar atcProcess ifrit.Process\n\tvar dbListener *pq.Listener\n\tvar atcPort uint16\n\n\tBeforeEach(func() {\n\t\tatcBin, err := gexec.Build(\"github.com\/concourse\/atc\/cmd\/atc\")\n\t\tΩ(err).ShouldNot(HaveOccurred())\n\n\t\tdbLogger := lagertest.NewTestLogger(\"test\")\n\t\tpostgresRunner.CreateTestDB()\n\t\tdbConn = postgresRunner.Open()\n\t\tdbListener = pq.NewListener(postgresRunner.DataSourceName(), time.Second, time.Minute, nil)\n\t\tbus := db.NewNotificationsBus(dbListener)\n\t\tsqlDB = db.NewSQL(dbLogger, dbConn, bus)\n\t\tΩ(err).ShouldNot(HaveOccurred())\n\t\tatcProcess, atcPort = startATC(atcBin, 1)\n\t})\n\n\tAfterEach(func() {\n\t\tginkgomon.Interrupt(atcProcess)\n\n\t\tΩ(dbConn.Close()).Should(Succeed())\n\t\tΩ(dbListener.Close()).Should(Succeed())\n\n\t\tpostgresRunner.DropTestDB()\n\t})\n\n\tDescribe(\"pausing a resource\", func() {\n\t\tvar page *agouti.Page\n\n\t\tBeforeEach(func() {\n\t\t\tvar err error\n\t\t\tpage, err = agoutiDriver.NewPage()\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tExpect(page.Destroy()).To(Succeed())\n\t\t})\n\n\t\thomepage := func() string {\n\t\t\treturn fmt.Sprintf(\"http:\/\/127.0.0.1:%d\/pipelines\/%s\", atcPort, atc.DefaultPipelineName)\n\t\t}\n\n\t\twithPath := func(path string) string {\n\t\t\treturn urljoiner.Join(homepage(), path)\n\t\t}\n\n\t\tContext(\"with a resource in the configuration\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\t\/\/ job build data\n\t\t\t\tΩ(sqlDB.SaveConfig(atc.DefaultPipelineName, atc.Config{\n\t\t\t\t\tJobs: atc.JobConfigs{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"job-name\",\n\t\t\t\t\t\t\tPlan: atc.PlanSequence{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tGet: \"resource-name\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tResources: atc.ResourceConfigs{\n\t\t\t\t\t\t{Name: \"resource-name\"},\n\t\t\t\t\t},\n\t\t\t\t}, db.ConfigVersion(1))).Should(Succeed())\n\t\t\t})\n\n\t\t\tIt(\"can view the resource\", func() {\n\t\t\t\t\/\/ homepage -> resource detail\n\t\t\t\tExpect(page.Navigate(homepage())).To(Succeed())\n\t\t\t\tEventually(page.FindByLink(\"resource-name\")).Should(BeFound())\n\t\t\t\tExpect(page.FindByLink(\"resource-name\").Click()).To(Succeed())\n\n\t\t\t\t\/\/ resource detail -> paused resource detail\n\t\t\t\tExpect(page).Should(HaveURL(withPath(\"\/resources\/resource-name\")))\n\t\t\t\tExpect(page.Find(\"h1\")).To(HaveText(\"resource-name\"))\n\n\t\t\t\tAuthenticate(page, \"admin\", \"password\")\n\n\t\t\t\tExpect(page.Find(\".js-resource .js-pauseUnpause\").Click()).To(Succeed())\n\t\t\t\tEventually(page.Find(\".header h3\")).Should(HaveText(\"checking paused\"))\n\n\t\t\t\tpage.Refresh()\n\n\t\t\t\tEventually(page.Find(\".header h3\")).Should(HaveText(\"checking paused\"))\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/url\"\n\t\"testing\"\n\t\"time\"\n)\n\n\n\/\/ start when there is no config should report that fact right away\nfunc TestStartWithoutConfig(t *testing.T) {\n\tdone := start(\"\/not\/notaconfig.conf\")\n\tmessage := <- done\n\tif message != \"Could not read file at \/not\/notaconfig.conf\" {\n\t\tt.Errorf(\"did not report the file that was missing\")\n\t}\n}\n\n\/\/ start when there is a bad config should report that fact right away\nfunc TestStartWithBadConfig(t *testing.T) {\n\tdone := start(\".\/test_bad.conf\")\n\tmessage := <- done\n\tif message != \"Could not decode config\" {\n\t\tt.Errorf(\"incorrect bad config mesage\\n\\\"%s\\\"\", message)\n\t}\n}\n\n\/\/ start with a valid file should not be done right away\nfunc TestStartWithGoodConfig(t *testing.T) {\n\tdone := start(\".\/test_good.conf\")\n\tselect {\n\tcase message := <-done:\n\t\tt.Errorf(\"done with message\\n \\\"%s\\\"\", message)\n\tcase <-time.After(time.Millisecond * 50):\n\t\tfmt.Print(\"Stayed up with good config\\n\")\n\t}\n}\n\n\/\/ Stabilizer.ServeHTTP should return the first good response\nfunc TestStabilizerReturnsFirstResponse(t *testing.T) {\n\t\/\/ mock handler that first errors, then takes a long time then returns a\n\t\/\/ a good response\n\treqCount := 0\n\tmockHandler := func(w http.ResponseWriter, r *http.Request) {\n\t\treqCount++\n\t\tif reqCount % 3 == 1 {\n\t\t\thttp.Error(w, \"test error\", 1234567890)\n\t\t}\n\t\tif reqCount % 3 == 2 {\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\tfmt.Fprintf(w, \"slow response\")\n\t\t}\n\t\tif reqCount % 3 == 0 {\n\t\t\tfmt.Fprintf(w, \"fast response\")\n\t\t}\n\t}\n\n\tmockUnstableBackend := httptest.NewServer(http.HandlerFunc(mockHandler))\n\tdefer mockUnstableBackend.Close()\n\tu, err := url.Parse(mockUnstableBackend.URL)\n\tif err != nil { t.Errorf(\"error parsing backend url test broken\") }\n\ttestStabilizer := &Stabilizer{u, 4}\n\ttestStableServer := httptest.NewServer(\n\t\thttp.TimeoutHandler(testStabilizer, 5 * time.Second, \"timeout\"),\n\t)\n\tdefer testStableServer.Close()\n\n\t\/\/ make many requests and make sure they are all the fast response\n\tfor i := 0; i < 10; i++ {\n\t\tres, err := http.Get(testStableServer.URL)\n\t\tif err != nil { t.Errorf(\"error response from stable server\") }\n\n\t\tmessage, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil { t.Errorf(\"error reading response body from stable server\") }\n\t\tres.Body.Close()\n\n\t\t\/\/ ensure that all responses are the fast response\n\t\tif string(message) != \"fast response\" {\n\t\t\tt.Errorf(string(message))\n\t\t}\n\n\t\ttime.Sleep(10 * time.Millisecond)\n\t}\n}\n\n\/\/ TestCanPass proves that tests are running\nfunc TestCanPass(t *testing.T) {\n\tif true != true {\n\t\tt.Errorf(\"true is not true,\\ncheck your premises,\\n consider clojure?\")\n\t}\n}\ntests are gentler on the system nowpackage main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/url\"\n\t\"testing\"\n\t\"time\"\n)\n\n\n\/\/ start when there is no config should report that fact right away\nfunc TestStartWithoutConfig(t *testing.T) {\n\tdone := start(\"\/not\/notaconfig.conf\")\n\tmessage := <- done\n\tif message != \"Could not read file at \/not\/notaconfig.conf\" {\n\t\tt.Errorf(\"did not report the file that was missing\")\n\t}\n}\n\n\/\/ start when there is a bad config should report that fact right away\nfunc TestStartWithBadConfig(t *testing.T) {\n\tdone := start(\".\/test_bad.conf\")\n\tmessage := <- done\n\tif message != \"Could not decode config\" {\n\t\tt.Errorf(\"incorrect bad config mesage\\n\\\"%s\\\"\", message)\n\t}\n}\n\n\/\/ start with a valid file should not be done right away\nfunc TestStartWithGoodConfig(t *testing.T) {\n\tdone := start(\".\/test_good.conf\")\n\tselect {\n\tcase message := <-done:\n\t\tt.Errorf(\"done with message\\n \\\"%s\\\"\", message)\n\tcase <-time.After(time.Millisecond * 50):\n\t\tfmt.Print(\"Stayed up with good config\\n\")\n\t}\n}\n\n\/\/ Stabilizer.ServeHTTP should return the first good response\nfunc TestStabilizerReturnsFirstResponse(t *testing.T) {\n\t\/\/ mock handler that first errors, then takes a long time then returns a\n\t\/\/ a good response\n\treqCount := 0\n\tmockHandler := func(w http.ResponseWriter, r *http.Request) {\n\t\treqCount++\n\t\tif reqCount % 3 == 1 {\n\t\t\thttp.Error(w, \"test error\", 1234567890)\n\t\t}\n\t\tif reqCount % 3 == 2 {\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\tfmt.Fprintf(w, \"slow response\")\n\t\t}\n\t\tif reqCount % 3 == 0 {\n\t\t\tfmt.Fprintf(w, \"fast response\")\n\t\t}\n\t}\n\n\tmockUnstableBackend := httptest.NewServer(http.HandlerFunc(mockHandler))\n\tdefer mockUnstableBackend.Close()\n\tu, err := url.Parse(mockUnstableBackend.URL)\n\tif err != nil { t.Errorf(\"error parsing backend url test broken\") }\n\ttestStabilizer := &Stabilizer{u, 4}\n\ttestStableServer := httptest.NewServer(\n\t\thttp.TimeoutHandler(testStabilizer, 5 * time.Second, \"timeout\"),\n\t)\n\tdefer testStableServer.Close()\n\n\t\/\/ make many requests and make sure they are all the fast response\n\tfor i := 0; i < 3; i++ {\n\t\tres, err := http.Get(testStableServer.URL)\n\t\tif err != nil { t.Errorf(\"error response from stable server\") }\n\n\t\tmessage, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil { t.Errorf(\"error reading response body from stable server\") }\n\t\tres.Body.Close()\n\n\t\t\/\/ ensure that all responses are the fast response\n\t\tif string(message) != \"fast response\" {\n\t\t\tt.Errorf(string(message))\n\t\t}\n\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n}\n\n\/\/ TestCanPass proves that tests are running\nfunc TestCanPass(t *testing.T) {\n\tif true != true {\n\t\tt.Errorf(\"true is not true,\\ncheck your premises,\\n consider clojure?\")\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2022 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/go:build go1.18\n\/\/ +build go1.18\n\npackage vulncheck\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/google\/go-cmp\/cmp\"\n\t\"github.com\/google\/go-cmp\/cmp\/cmpopts\"\n\t\"golang.org\/x\/tools\/go\/packages\"\n\t\"golang.org\/x\/tools\/gopls\/internal\/lsp\/cache\"\n\t\"golang.org\/x\/tools\/gopls\/internal\/lsp\/fake\"\n\t\"golang.org\/x\/tools\/gopls\/internal\/lsp\/source\"\n\t\"golang.org\/x\/tools\/gopls\/internal\/lsp\/tests\"\n\t\"golang.org\/x\/vuln\/client\"\n\t\"golang.org\/x\/vuln\/osv\"\n)\n\nfunc TestCmd_Run(t *testing.T) {\n\trunTest(t, workspace1, proxy1, func(ctx context.Context, snapshot source.Snapshot) {\n\t\tcmd := &cmd{Client: testClient1}\n\t\tcfg := packagesCfg(ctx, snapshot)\n\t\tresult, err := cmd.Run(ctx, cfg, \".\/...\")\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\t\/\/ Check that we find the right number of vulnerabilities.\n\t\t\/\/ There should be three entries as there are three vulnerable\n\t\t\/\/ symbols in the two import-reachable OSVs.\n\t\tvar got []report\n\t\tfor _, v := range result {\n\t\t\tgot = append(got, toReport(v))\n\t\t}\n\t\t\/\/ drop the workspace root directory path included in the summary.\n\t\tcwd := cfg.Dir\n\t\tfor _, g := range got {\n\t\t\tfor i, summary := range g.CallStackSummaries {\n\t\t\t\tg.CallStackSummaries[i] = strings.ReplaceAll(summary, cwd, \".\")\n\t\t\t}\n\t\t}\n\n\t\tvar want = []report{\n\t\t\t{\n\t\t\t\tVuln: Vuln{\n\t\t\t\t\tID: \"GO-2022-01\",\n\t\t\t\t\tSymbol: \"VulnData.Vuln1\",\n\t\t\t\t\tPkgPath: \"golang.org\/amod\/avuln\",\n\t\t\t\t\tModPath: \"golang.org\/amod\",\n\t\t\t\t\tURL: \"https:\/\/pkg.go.dev\/vuln\/GO-2022-01\",\n\t\t\t\t\tCurrentVersion: \"v1.1.3\",\n\t\t\t\t\tFixedVersion: \"v1.0.4\",\n\t\t\t\t\tCallStackSummaries: []string{\n\t\t\t\t\t\t\"golang.org\/entry\/x.X calls golang.org\/amod\/avuln.VulnData.Vuln1\",\n\t\t\t\t\t\t\"golang.org\/entry\/x.X calls golang.org\/cmod\/c.C1, which eventually calls golang.org\/amod\/avuln.VulnData.Vuln2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCallStacksStr: []string{\n\t\t\t\t\t\"golang.org\/entry\/x.X [approx.] (x.go:8)\\n\" +\n\t\t\t\t\t\t\"golang.org\/amod\/avuln.VulnData.Vuln1 (avuln.go:3)\\n\",\n\t\t\t\t\t\"golang.org\/entry\/x.X (x.go:8)\\n\" +\n\t\t\t\t\t\t\"golang.org\/cmod\/c.C1 (c.go:13)\\n\" +\n\t\t\t\t\t\t\"golang.org\/amod\/avuln.VulnData.Vuln2 (avuln.go:4)\\n\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tVuln: Vuln{\n\t\t\t\t\tID: \"GO-2022-02\",\n\t\t\t\t\tSymbol: \"Vuln\",\n\t\t\t\t\tPkgPath: \"golang.org\/bmod\/bvuln\",\n\t\t\t\t\tModPath: \"golang.org\/bmod\",\n\t\t\t\t\tURL: \"https:\/\/pkg.go.dev\/vuln\/GO-2022-02\",\n\t\t\t\t\tCurrentVersion: \"v0.5.0\",\n\t\t\t\t\tCallStackSummaries: []string{\"golang.org\/entry\/y.Y calls golang.org\/bmod\/bvuln.Vuln\"},\n\t\t\t\t},\n\t\t\t\tCallStacksStr: []string{\n\t\t\t\t\t\"golang.org\/entry\/y.Y [approx.] (y.go:5)\\n\" +\n\t\t\t\t\t\t\"golang.org\/bmod\/bvuln.Vuln (bvuln.go:2)\\n\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tVuln: Vuln{\n\t\t\t\t\tID: \"GO-2022-03\",\n\t\t\t\t\tDetails: \"unaffecting vulnerability\",\n\t\t\t\t\tModPath: \"golang.org\/amod\",\n\t\t\t\t\tURL: \"https:\/\/pkg.go.dev\/vuln\/GO-2022-03\",\n\t\t\t\t\tFixedVersion: \"v1.0.4\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\t\/\/ sort reports for stability before comparison.\n\t\tfor _, rpts := range [][]report{got, want} {\n\t\t\tsort.Slice(rpts, func(i, j int) bool {\n\t\t\t\ta, b := rpts[i], rpts[j]\n\t\t\t\tif a.ID != b.ID {\n\t\t\t\t\treturn a.ID < b.ID\n\t\t\t\t}\n\t\t\t\tif a.PkgPath != b.PkgPath {\n\t\t\t\t\treturn a.PkgPath < b.PkgPath\n\t\t\t\t}\n\t\t\t\treturn a.Symbol < b.Symbol\n\t\t\t})\n\t\t}\n\t\tif diff := cmp.Diff(want, got, cmpopts.IgnoreFields(report{}, \"Vuln.CallStacks\")); diff != \"\" {\n\t\t\tt.Error(diff)\n\t\t}\n\n\t})\n}\n\ntype report struct {\n\tVuln\n\t\/\/ Trace is stringified Vuln.CallStacks\n\tCallStacksStr []string\n}\n\nfunc toReport(v Vuln) report {\n\tvar r = report{Vuln: v}\n\tfor _, s := range v.CallStacks {\n\t\tr.CallStacksStr = append(r.CallStacksStr, CallStackString(s))\n\t}\n\treturn r\n}\n\nfunc CallStackString(callstack CallStack) string {\n\tvar b bytes.Buffer\n\tfor _, entry := range callstack {\n\t\tfname := filepath.Base(entry.URI.SpanURI().Filename())\n\t\tfmt.Fprintf(&b, \"%v (%v:%d)\\n\", entry.Name, fname, entry.Pos.Line)\n\t}\n\treturn b.String()\n}\n\nconst workspace1 = `\n-- go.mod --\nmodule golang.org\/entry\n\nrequire (\n\tgolang.org\/cmod v1.1.3\n)\ngo 1.18\n-- x\/x.go --\npackage x\n\nimport \t(\n \"golang.org\/cmod\/c\"\n \"golang.org\/entry\/y\"\n)\n\nfunc X() {\n\tc.C1().Vuln1() \/\/ vuln use: X -> Vuln1\n}\n\nfunc CallY() {\n\ty.Y() \/\/ vuln use: CallY -> y.Y -> bvuln.Vuln \n}\n\n-- y\/y.go --\npackage y\n\nimport \"golang.org\/cmod\/c\"\n\nfunc Y() {\n\tc.C2()() \/\/ vuln use: Y -> bvuln.Vuln\n}\n`\n\nconst proxy1 = `\n-- golang.org\/cmod@v1.1.3\/go.mod --\nmodule golang.org\/cmod\n\ngo 1.12\n-- golang.org\/cmod@v1.1.3\/c\/c.go --\npackage c\n\nimport (\n\t\"golang.org\/amod\/avuln\"\n\t\"golang.org\/bmod\/bvuln\"\n)\n\ntype I interface {\n\tVuln1()\n}\n\nfunc C1() I {\n\tv := avuln.VulnData{}\n\tv.Vuln2() \/\/ vuln use\n\treturn v\n}\n\nfunc C2() func() {\n\treturn bvuln.Vuln\n}\n-- golang.org\/amod@v1.1.3\/go.mod --\nmodule golang.org\/amod\n\ngo 1.14\n-- golang.org\/amod@v1.1.3\/avuln\/avuln.go --\npackage avuln\n\ntype VulnData struct {}\nfunc (v VulnData) Vuln1() {}\nfunc (v VulnData) Vuln2() {}\n-- golang.org\/bmod@v0.5.0\/go.mod --\nmodule golang.org\/bmod\n\ngo 1.14\n-- golang.org\/bmod@v0.5.0\/bvuln\/bvuln.go --\npackage bvuln\n\nfunc Vuln() {\n\t\/\/ something evil\n}\n`\n\n\/\/ testClient contains the following test vulnerabilities\n\/\/\n\/\/\tgolang.org\/amod\/avuln.{VulnData.Vuln1, vulnData.Vuln2}\n\/\/\tgolang.org\/bmod\/bvuln.{Vuln}\nvar testClient1 = &mockClient{\n\tret: map[string][]*osv.Entry{\n\t\t\"golang.org\/amod\": {\n\t\t\t{\n\t\t\t\tID: \"GO-2022-01\",\n\t\t\t\tReferences: []osv.Reference{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: \"href\",\n\t\t\t\t\t\tURL: \"pkg.go.dev\/vuln\/GO-2022-01\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAffected: []osv.Affected{{\n\t\t\t\t\tPackage: osv.Package{Name: \"golang.org\/amod\"},\n\t\t\t\t\tRanges: osv.Affects{{Type: osv.TypeSemver, Events: []osv.RangeEvent{{Introduced: \"1.0.0\"}, {Fixed: \"1.0.4\"}, {Introduced: \"1.1.2\"}}}},\n\t\t\t\t\tEcosystemSpecific: osv.EcosystemSpecific{\n\t\t\t\t\t\tImports: []osv.EcosystemSpecificImport{{\n\t\t\t\t\t\t\tPath: \"golang.org\/amod\/avuln\",\n\t\t\t\t\t\t\tSymbols: []string{\"VulnData.Vuln1\", \"VulnData.Vuln2\"}}},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tID: \"GO-2022-03\",\n\t\t\t\tDetails: \"unaffecting vulnerability\",\n\t\t\t\tReferences: []osv.Reference{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: \"href\",\n\t\t\t\t\t\tURL: \"pkg.go.dev\/vuln\/GO-2022-01\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAffected: []osv.Affected{{\n\t\t\t\t\tPackage: osv.Package{Name: \"golang.org\/amod\"},\n\t\t\t\t\tRanges: osv.Affects{{Type: osv.TypeSemver, Events: []osv.RangeEvent{{Introduced: \"1.0.0\"}, {Fixed: \"1.0.4\"}, {Introduced: \"1.1.2\"}}}},\n\t\t\t\t\tEcosystemSpecific: osv.EcosystemSpecific{\n\t\t\t\t\t\tImports: []osv.EcosystemSpecificImport{{\n\t\t\t\t\t\t\tPath: \"golang.org\/amod\/avuln\",\n\t\t\t\t\t\t\tSymbols: []string{\"nonExisting\"}}},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\t\"golang.org\/bmod\": {\n\t\t\t{\n\t\t\t\tID: \"GO-2022-02\",\n\t\t\t\tAffected: []osv.Affected{{\n\t\t\t\t\tPackage: osv.Package{Name: \"golang.org\/bmod\"},\n\t\t\t\t\tRanges: osv.Affects{{Type: osv.TypeSemver}},\n\t\t\t\t\tEcosystemSpecific: osv.EcosystemSpecific{\n\t\t\t\t\t\tImports: []osv.EcosystemSpecificImport{{\n\t\t\t\t\t\t\tPath: \"golang.org\/bmod\/bvuln\",\n\t\t\t\t\t\t\tSymbols: []string{\"Vuln\"}}},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t},\n}\n\ntype mockClient struct {\n\tclient.Client\n\tret map[string][]*osv.Entry\n}\n\nfunc (mc *mockClient) GetByModule(ctx context.Context, a string) ([]*osv.Entry, error) {\n\treturn mc.ret[a], nil\n}\n\nfunc runTest(t *testing.T, workspaceData, proxyData string, test func(context.Context, source.Snapshot)) {\n\tws, err := fake.NewSandbox(&fake.SandboxConfig{\n\t\tFiles: fake.UnpackTxt(workspaceData),\n\t\tProxyFiles: fake.UnpackTxt(proxyData),\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ws.Close()\n\n\tctx := tests.Context(t)\n\n\t\/\/ get the module cache populated and the go.sum file at the root auto-generated.\n\tdir := ws.Workdir.RootURI().SpanURI().Filename()\n\tif err := ws.RunGoCommand(ctx, dir, \"list\", []string{\"-mod=mod\", \"...\"}, true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcache := cache.New(nil, nil, nil)\n\tsession := cache.NewSession(ctx)\n\toptions := source.DefaultOptions().Clone()\n\ttests.DefaultOptions(options)\n\tsession.SetOptions(options)\n\tenvs := []string{}\n\tfor k, v := range ws.GoEnv() {\n\t\tenvs = append(envs, k+\"=\"+v)\n\t}\n\toptions.SetEnvSlice(envs)\n\tname := ws.RootDir()\n\tfolder := ws.Workdir.RootURI().SpanURI()\n\tview, snapshot, release, err := session.NewView(ctx, name, folder, options)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdefer func() {\n\t\t\/\/ The snapshot must be released before calling view.Shutdown, to avoid a\n\t\t\/\/ deadlock.\n\t\trelease()\n\t\tview.Shutdown(ctx)\n\t}()\n\n\ttest(ctx, snapshot)\n}\n\n\/\/ TODO: expose this as a method of Snapshot.\nfunc packagesCfg(ctx context.Context, snapshot source.Snapshot) *packages.Config {\n\tview := snapshot.View()\n\tviewBuildFlags := view.Options().BuildFlags\n\tvar viewEnv []string\n\tif e := view.Options().EnvSlice(); e != nil {\n\t\tviewEnv = append(os.Environ(), e...)\n\t}\n\treturn &packages.Config{\n\t\t\/\/ Mode will be set by cmd.Run.\n\t\tContext: ctx,\n\t\tTests: true,\n\t\tBuildFlags: viewBuildFlags,\n\t\tEnv: viewEnv,\n\t\tDir: view.Folder().Filename(),\n\t}\n}\ngopls\/internal\/vulncheck: use vulntest for test database creation\/\/ Copyright 2022 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/go:build go1.18\n\/\/ +build go1.18\n\npackage vulncheck\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/google\/go-cmp\/cmp\"\n\t\"github.com\/google\/go-cmp\/cmp\/cmpopts\"\n\t\"golang.org\/x\/tools\/go\/packages\"\n\t\"golang.org\/x\/tools\/gopls\/internal\/lsp\/cache\"\n\t\"golang.org\/x\/tools\/gopls\/internal\/lsp\/fake\"\n\t\"golang.org\/x\/tools\/gopls\/internal\/lsp\/source\"\n\t\"golang.org\/x\/tools\/gopls\/internal\/lsp\/tests\"\n\t\"golang.org\/x\/tools\/gopls\/internal\/vulncheck\/vulntest\"\n)\n\nfunc TestCmd_Run(t *testing.T) {\n\trunTest(t, workspace1, proxy1, func(ctx context.Context, snapshot source.Snapshot) {\n\t\tdb, err := vulntest.NewDatabase(ctx, []byte(vulnsData))\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer db.Clean()\n\t\tcli, err := vulntest.NewClient(db)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tcmd := &cmd{Client: cli}\n\t\tcfg := packagesCfg(ctx, snapshot)\n\t\tresult, err := cmd.Run(ctx, cfg, \".\/...\")\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\t\/\/ Check that we find the right number of vulnerabilities.\n\t\t\/\/ There should be three entries as there are three vulnerable\n\t\t\/\/ symbols in the two import-reachable OSVs.\n\t\tvar got []report\n\t\tfor _, v := range result {\n\t\t\tgot = append(got, toReport(v))\n\t\t}\n\t\t\/\/ drop the workspace root directory path included in the summary.\n\t\tcwd := cfg.Dir\n\t\tfor _, g := range got {\n\t\t\tfor i, summary := range g.CallStackSummaries {\n\t\t\t\tg.CallStackSummaries[i] = strings.ReplaceAll(summary, cwd, \".\")\n\t\t\t}\n\t\t}\n\n\t\tvar want = []report{\n\t\t\t{\n\t\t\t\tVuln: Vuln{\n\t\t\t\t\tID: \"GO-2022-01\",\n\t\t\t\t\tDetails: \"Something.\\n\",\n\t\t\t\t\tSymbol: \"VulnData.Vuln1\",\n\t\t\t\t\tPkgPath: \"golang.org\/amod\/avuln\",\n\t\t\t\t\tModPath: \"golang.org\/amod\",\n\t\t\t\t\tURL: \"https:\/\/pkg.go.dev\/vuln\/GO-2022-01\",\n\t\t\t\t\tCurrentVersion: \"v1.1.3\",\n\t\t\t\t\tFixedVersion: \"v1.0.4\",\n\t\t\t\t\tCallStackSummaries: []string{\n\t\t\t\t\t\t\"golang.org\/entry\/x.X calls golang.org\/amod\/avuln.VulnData.Vuln1\",\n\t\t\t\t\t\t\"golang.org\/entry\/x.X calls golang.org\/cmod\/c.C1, which eventually calls golang.org\/amod\/avuln.VulnData.Vuln2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCallStacksStr: []string{\n\t\t\t\t\t\"golang.org\/entry\/x.X [approx.] (x.go:8)\\n\" +\n\t\t\t\t\t\t\"golang.org\/amod\/avuln.VulnData.Vuln1 (avuln.go:3)\\n\",\n\t\t\t\t\t\"golang.org\/entry\/x.X (x.go:8)\\n\" +\n\t\t\t\t\t\t\"golang.org\/cmod\/c.C1 (c.go:13)\\n\" +\n\t\t\t\t\t\t\"golang.org\/amod\/avuln.VulnData.Vuln2 (avuln.go:4)\\n\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tVuln: Vuln{\n\t\t\t\t\tID: \"GO-2022-02\",\n\t\t\t\t\tSymbol: \"Vuln\",\n\t\t\t\t\tPkgPath: \"golang.org\/bmod\/bvuln\",\n\t\t\t\t\tModPath: \"golang.org\/bmod\",\n\t\t\t\t\tURL: \"https:\/\/pkg.go.dev\/vuln\/GO-2022-02\",\n\t\t\t\t\tCurrentVersion: \"v0.5.0\",\n\t\t\t\t\tCallStackSummaries: []string{\"golang.org\/entry\/y.Y calls golang.org\/bmod\/bvuln.Vuln\"},\n\t\t\t\t},\n\t\t\t\tCallStacksStr: []string{\n\t\t\t\t\t\"golang.org\/entry\/y.Y [approx.] (y.go:5)\\n\" +\n\t\t\t\t\t\t\"golang.org\/bmod\/bvuln.Vuln (bvuln.go:2)\\n\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tVuln: Vuln{\n\t\t\t\t\tID: \"GO-2022-03\",\n\t\t\t\t\tDetails: \"unaffecting vulnerability.\\n\",\n\t\t\t\t\tModPath: \"golang.org\/amod\",\n\t\t\t\t\tURL: \"https:\/\/pkg.go.dev\/vuln\/GO-2022-03\",\n\t\t\t\t\tFixedVersion: \"v1.0.4\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\t\/\/ sort reports for stability before comparison.\n\t\tfor _, rpts := range [][]report{got, want} {\n\t\t\tsort.Slice(rpts, func(i, j int) bool {\n\t\t\t\ta, b := rpts[i], rpts[j]\n\t\t\t\tif a.ID != b.ID {\n\t\t\t\t\treturn a.ID < b.ID\n\t\t\t\t}\n\t\t\t\tif a.PkgPath != b.PkgPath {\n\t\t\t\t\treturn a.PkgPath < b.PkgPath\n\t\t\t\t}\n\t\t\t\treturn a.Symbol < b.Symbol\n\t\t\t})\n\t\t}\n\t\tif diff := cmp.Diff(want, got, cmpopts.IgnoreFields(report{}, \"Vuln.CallStacks\")); diff != \"\" {\n\t\t\tt.Error(diff)\n\t\t}\n\n\t})\n}\n\ntype report struct {\n\tVuln\n\t\/\/ Trace is stringified Vuln.CallStacks\n\tCallStacksStr []string\n}\n\nfunc toReport(v Vuln) report {\n\tvar r = report{Vuln: v}\n\tfor _, s := range v.CallStacks {\n\t\tr.CallStacksStr = append(r.CallStacksStr, CallStackString(s))\n\t}\n\treturn r\n}\n\nfunc CallStackString(callstack CallStack) string {\n\tvar b bytes.Buffer\n\tfor _, entry := range callstack {\n\t\tfname := filepath.Base(entry.URI.SpanURI().Filename())\n\t\tfmt.Fprintf(&b, \"%v (%v:%d)\\n\", entry.Name, fname, entry.Pos.Line)\n\t}\n\treturn b.String()\n}\n\nconst workspace1 = `\n-- go.mod --\nmodule golang.org\/entry\n\nrequire (\n\tgolang.org\/cmod v1.1.3\n)\ngo 1.18\n-- x\/x.go --\npackage x\n\nimport \t(\n \"golang.org\/cmod\/c\"\n \"golang.org\/entry\/y\"\n)\n\nfunc X() {\n\tc.C1().Vuln1() \/\/ vuln use: X -> Vuln1\n}\n\nfunc CallY() {\n\ty.Y() \/\/ vuln use: CallY -> y.Y -> bvuln.Vuln \n}\n\n-- y\/y.go --\npackage y\n\nimport \"golang.org\/cmod\/c\"\n\nfunc Y() {\n\tc.C2()() \/\/ vuln use: Y -> bvuln.Vuln\n}\n`\n\nconst proxy1 = `\n-- golang.org\/cmod@v1.1.3\/go.mod --\nmodule golang.org\/cmod\n\ngo 1.12\n-- golang.org\/cmod@v1.1.3\/c\/c.go --\npackage c\n\nimport (\n\t\"golang.org\/amod\/avuln\"\n\t\"golang.org\/bmod\/bvuln\"\n)\n\ntype I interface {\n\tVuln1()\n}\n\nfunc C1() I {\n\tv := avuln.VulnData{}\n\tv.Vuln2() \/\/ vuln use\n\treturn v\n}\n\nfunc C2() func() {\n\treturn bvuln.Vuln\n}\n-- golang.org\/amod@v1.1.3\/go.mod --\nmodule golang.org\/amod\n\ngo 1.14\n-- golang.org\/amod@v1.1.3\/avuln\/avuln.go --\npackage avuln\n\ntype VulnData struct {}\nfunc (v VulnData) Vuln1() {}\nfunc (v VulnData) Vuln2() {}\n-- golang.org\/bmod@v0.5.0\/go.mod --\nmodule golang.org\/bmod\n\ngo 1.14\n-- golang.org\/bmod@v0.5.0\/bvuln\/bvuln.go --\npackage bvuln\n\nfunc Vuln() {\n\t\/\/ something evil\n}\n`\n\nconst vulnsData = `\n-- GO-2022-01.yaml --\nmodules:\n - module: golang.org\/amod\n versions:\n - introduced: 1.0.0\n - fixed: 1.0.4\n - introduced: 1.1.2\n packages:\n - package: golang.org\/amod\/avuln\n symbols:\n - VulnData.Vuln1\n - VulnData.Vuln2\ndescription: |\n Something.\nreferences:\n - href: pkg.go.dev\/vuln\/GO-2022-01\n\n-- GO-2022-03.yaml --\nmodules:\n - module: golang.org\/amod\n versions:\n - introduced: 1.0.0\n - fixed: 1.0.4\n - introduced: 1.1.2\n packages:\n - package: golang.org\/amod\/avuln\n symbols:\n - nonExisting\ndescription: |\n unaffecting vulnerability.\n\n-- GO-2022-02.yaml --\nmodules:\n - module: golang.org\/bmod\n packages:\n - package: golang.org\/bmod\/bvuln\n symbols:\n - Vuln\n`\n\nfunc runTest(t *testing.T, workspaceData, proxyData string, test func(context.Context, source.Snapshot)) {\n\tws, err := fake.NewSandbox(&fake.SandboxConfig{\n\t\tFiles: fake.UnpackTxt(workspaceData),\n\t\tProxyFiles: fake.UnpackTxt(proxyData),\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ws.Close()\n\n\tctx := tests.Context(t)\n\n\t\/\/ get the module cache populated and the go.sum file at the root auto-generated.\n\tdir := ws.Workdir.RootURI().SpanURI().Filename()\n\tif err := ws.RunGoCommand(ctx, dir, \"list\", []string{\"-mod=mod\", \"...\"}, true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcache := cache.New(nil, nil, nil)\n\tsession := cache.NewSession(ctx)\n\toptions := source.DefaultOptions().Clone()\n\ttests.DefaultOptions(options)\n\tsession.SetOptions(options)\n\tenvs := []string{}\n\tfor k, v := range ws.GoEnv() {\n\t\tenvs = append(envs, k+\"=\"+v)\n\t}\n\toptions.SetEnvSlice(envs)\n\tname := ws.RootDir()\n\tfolder := ws.Workdir.RootURI().SpanURI()\n\tview, snapshot, release, err := session.NewView(ctx, name, folder, options)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdefer func() {\n\t\t\/\/ The snapshot must be released before calling view.Shutdown, to avoid a\n\t\t\/\/ deadlock.\n\t\trelease()\n\t\tview.Shutdown(ctx)\n\t}()\n\n\ttest(ctx, snapshot)\n}\n\n\/\/ TODO: expose this as a method of Snapshot.\nfunc packagesCfg(ctx context.Context, snapshot source.Snapshot) *packages.Config {\n\tview := snapshot.View()\n\tviewBuildFlags := view.Options().BuildFlags\n\tvar viewEnv []string\n\tif e := view.Options().EnvSlice(); e != nil {\n\t\tviewEnv = append(os.Environ(), e...)\n\t}\n\treturn &packages.Config{\n\t\t\/\/ Mode will be set by cmd.Run.\n\t\tContext: ctx,\n\t\tTests: true,\n\t\tBuildFlags: viewBuildFlags,\n\t\tEnv: viewEnv,\n\t\tDir: view.Folder().Filename(),\n\t}\n}\n<|endoftext|>"} {"text":"package sqlstore\n\nimport (\n\t\"testing\"\n\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n\n\tm \"github.com\/grafana\/grafana\/pkg\/models\"\n)\n\n\/*\ntype UpdatePlaylistCommand struct {\n\tOrgId int64 `json:\"-\"`\n\tId int64 `json:\"id\" binding:\"Required\"`\n\tName string `json:\"name\" binding:\"Required\"`\n\tType string `json:\"type\"`\n\tInterval string `json:\"interval\"`\n\tItems []PlaylistItemDTO `json:\"items\"`\n\n\tResult *PlaylistDTO\n}\n\ntype CreatePlaylistCommand struct {\n\tName string `json:\"name\" binding:\"Required\"`\n\tInterval string `json:\"interval\"`\n\tData []int64 `json:\"data\"`\n\tItems []PlaylistItemDTO `json:\"items\"`\n\n\tOrgId int64 `json:\"-\"`\n\tResult *Playlist\n}\n\ntype DeletePlaylistCommand struct {\n\tId int64\n\tOrgId int64\n}\n\n*\/\n\nfunc TestPlaylistDataAccess(t *testing.T) {\n\n\tConvey(\"Testing Playlist data access\", t, func() {\n\t\tInitTestDB(t)\n\n\t\tConvey(\"Can create playlist\", func() {\n\t\t\titems := []m.PlaylistItemDTO{\n\t\t\t\tm.PlaylistItemDTO{Title: \"graphite\", Value: \"graphite\", Type: \"dashboard_by_tag\"},\n\t\t\t\tm.PlaylistItemDTO{Title: \"Backend response times\", Value: \"3\", Type: \"dashboard_by_id\"},\n\t\t\t}\n\t\t\tcmd := m.CreatePlaylistCommand{Name: \"NYC office\", Interval: \"10m\", OrgId: 1, Items: items}\n\t\t\terr := CreatePlaylist(&cmd)\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\tConvey(\"can update playlist\", func() {\n\t\t\t\titems := []m.PlaylistItemDTO{\n\t\t\t\t\tm.PlaylistItemDTO{Title: \"influxdb\", Value: \"influxdb\", Type: \"dashboard_by_tag\"},\n\t\t\t\t\tm.PlaylistItemDTO{Title: \"Backend response times\", Value: \"2\", Type: \"dashboard_by_id\"},\n\t\t\t\t}\n\t\t\t\tquery := m.UpdatePlaylistCommand{Name: \"NYC office \", OrgId: 1, Id: 1, Interval: \"10s\", Items: items}\n\t\t\t\terr = UpdatePlaylist(&query)\n\n\t\t\t\tSo(err, ShouldBeNil)\n\n\t\t\t\tConvey(\"can remove playlist\", func() {\n\t\t\t\t\tquery := m.DeletePlaylistCommand{Id: 1}\n\t\t\t\t\terr = DeletePlaylist(&query)\n\n\t\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n}\nchore(playlist): remove commented codepackage sqlstore\n\nimport (\n\t\"testing\"\n\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n\n\tm \"github.com\/grafana\/grafana\/pkg\/models\"\n)\n\nfunc TestPlaylistDataAccess(t *testing.T) {\n\n\tConvey(\"Testing Playlist data access\", t, func() {\n\t\tInitTestDB(t)\n\n\t\tConvey(\"Can create playlist\", func() {\n\t\t\titems := []m.PlaylistItemDTO{\n\t\t\t\t{Title: \"graphite\", Value: \"graphite\", Type: \"dashboard_by_tag\"},\n\t\t\t\t{Title: \"Backend response times\", Value: \"3\", Type: \"dashboard_by_id\"},\n\t\t\t}\n\t\t\tcmd := m.CreatePlaylistCommand{Name: \"NYC office\", Interval: \"10m\", OrgId: 1, Items: items}\n\t\t\terr := CreatePlaylist(&cmd)\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\tConvey(\"can update playlist\", func() {\n\t\t\t\titems := []m.PlaylistItemDTO{\n\t\t\t\t\t{Title: \"influxdb\", Value: \"influxdb\", Type: \"dashboard_by_tag\"},\n\t\t\t\t\t{Title: \"Backend response times\", Value: \"2\", Type: \"dashboard_by_id\"},\n\t\t\t\t}\n\t\t\t\tquery := m.UpdatePlaylistCommand{Name: \"NYC office \", OrgId: 1, Id: 1, Interval: \"10s\", Items: items}\n\t\t\t\terr = UpdatePlaylist(&query)\n\n\t\t\t\tSo(err, ShouldBeNil)\n\n\t\t\t\tConvey(\"can remove playlist\", func() {\n\t\t\t\t\tquery := m.DeletePlaylistCommand{Id: 1}\n\t\t\t\t\terr = DeletePlaylist(&query)\n\n\t\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n}\n<|endoftext|>"} {"text":"package couchdb\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/cozy\/checkup\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/config\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\/mango\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestErrors(t *testing.T) {\n\terr := Error{StatusCode: 404, Name: \"not_found\", Reason: \"missing\"}\n\tassert.Contains(t, err.Error(), \"404\")\n\tassert.Contains(t, err.Error(), \"missing\")\n}\n\nconst TestDoctype = \"io.cozy.testobject\"\n\nvar TestPrefix = SimpleDatabasePrefix(\"couchdb-tests\")\n\ntype testDoc struct {\n\tTestID string `json:\"_id,omitempty\"`\n\tTestRev string `json:\"_rev,omitempty\"`\n\tTest string `json:\"test\"`\n\tFieldA string `json:\"fieldA,omitempty\"`\n\tFieldB int `json:\"fieldB,omitempty\"`\n}\n\nfunc (t *testDoc) ID() string {\n\treturn t.TestID\n}\n\nfunc (t *testDoc) Rev() string {\n\treturn t.TestRev\n}\n\nfunc (t *testDoc) DocType() string {\n\treturn TestDoctype\n}\n\nfunc (t *testDoc) SetID(id string) {\n\tt.TestID = id\n}\n\nfunc (t *testDoc) SetRev(rev string) {\n\tt.TestRev = rev\n}\n\nfunc makeTestDoc() Doc {\n\treturn &testDoc{\n\t\tTest: \"somevalue\",\n\t}\n}\n\nfunc TestCreateDoc(t *testing.T) {\n\tvar err error\n\n\tvar doc = makeTestDoc()\n\tassert.Empty(t, doc.Rev(), doc.ID())\n\n\t\/\/ Create the document\n\terr = CreateDoc(TestPrefix, doc)\n\tassert.NoError(t, err)\n\tassert.NotEmpty(t, doc.Rev(), doc.ID())\n\n\tdocType, id := doc.DocType(), doc.ID()\n\n\t\/\/ Fetch it and see if its match\n\tfetched := &testDoc{}\n\terr = GetDoc(TestPrefix, docType, id, fetched)\n\tassert.NoError(t, err)\n\tassert.Equal(t, doc.ID(), fetched.ID())\n\tassert.Equal(t, doc.Rev(), fetched.Rev())\n\tassert.Equal(t, \"somevalue\", fetched.Test)\n\n\trevBackup := fetched.Rev()\n\n\t\/\/ Update it\n\tupdated := fetched\n\tupdated.Test = \"changedvalue\"\n\terr = UpdateDoc(TestPrefix, updated)\n\tassert.NoError(t, err)\n\tassert.NotEqual(t, revBackup, updated.Rev())\n\tassert.Equal(t, \"changedvalue\", updated.Test)\n\n\t\/\/ Refetch it and see if its match\n\tfetched2 := &testDoc{}\n\terr = GetDoc(TestPrefix, docType, id, fetched2)\n\tassert.NoError(t, err)\n\tassert.Equal(t, doc.ID(), fetched2.ID())\n\tassert.Equal(t, updated.Rev(), fetched2.Rev())\n\tassert.Equal(t, \"changedvalue\", fetched2.Test)\n\n\t\/\/ Delete it\n\terr = DeleteDoc(TestPrefix, updated)\n\tassert.NoError(t, err)\n\n\tfetched3 := &testDoc{}\n\terr = GetDoc(TestPrefix, docType, id, fetched3)\n\tassert.Error(t, err)\n\tcoucherr, iscoucherr := err.(*Error)\n\tif assert.True(t, iscoucherr) {\n\t\tassert.Equal(t, coucherr.Reason, \"deleted\")\n\t}\n\n}\n\nfunc TestGetAllDocs(t *testing.T) {\n\tdoc1 := &testDoc{Test: \"all_1\"}\n\tdoc2 := &testDoc{Test: \"all_2\"}\n\tCreateDoc(TestPrefix, doc1)\n\tCreateDoc(TestPrefix, doc2)\n\n\tvar results []*testDoc\n\terr := GetAllDocs(TestPrefix, TestDoctype, &AllDocsRequest{Limit: 2}, &results)\n\tif assert.NoError(t, err) {\n\t\tassert.Len(t, results, 2)\n\t\tassert.Equal(t, results[0].Test, \"all_1\")\n\t\tassert.Equal(t, results[1].Test, \"all_2\")\n\t}\n}\n\nfunc TestDefineIndex(t *testing.T) {\n\terr := DefineIndex(TestPrefix, TestDoctype, mango.IndexOnFields(\"fieldA\", \"fieldB\"))\n\tassert.NoError(t, err)\n\n\t\/\/ if I try to define the same index several time\n\terr2 := DefineIndex(TestPrefix, TestDoctype, mango.IndexOnFields(\"fieldA\", \"fieldB\"))\n\tassert.NoError(t, err2)\n}\n\nfunc TestQuery(t *testing.T) {\n\n\t\/\/ create a few docs for testing\n\tdoc1 := testDoc{FieldA: \"value1\", FieldB: 100}\n\tdoc2 := testDoc{FieldA: \"value2\", FieldB: 1000}\n\tdoc3 := testDoc{FieldA: \"value2\", FieldB: 300}\n\tdoc4 := testDoc{FieldA: \"value13\", FieldB: 1500}\n\tdocs := []*testDoc{&doc1, &doc2, &doc3, &doc4}\n\tfor _, doc := range docs {\n\t\terr := CreateDoc(TestPrefix, doc)\n\t\tif !assert.NoError(t, err) || doc.ID() == \"\" {\n\t\t\tt.FailNow()\n\t\t\treturn\n\t\t}\n\t}\n\n\terr := DefineIndex(TestPrefix, TestDoctype, mango.IndexOnFields(\"fieldA\", \"fieldB\"))\n\tif !assert.NoError(t, err) {\n\t\tt.FailNow()\n\t\treturn\n\t}\n\tvar out []testDoc\n\treq := &FindRequest{Selector: mango.Equal(\"fieldA\", \"value2\")}\n\terr = FindDocs(TestPrefix, TestDoctype, req, &out)\n\tif assert.NoError(t, err) {\n\t\tassert.Len(t, out, 2, \"should get 2 results\")\n\t\t\/\/ if fieldA are equaly, docs will be ordered by fieldB\n\t\tassert.Equal(t, doc3.ID(), out[0].ID())\n\t\tassert.Equal(t, \"value2\", out[0].FieldA)\n\t\tassert.Equal(t, doc2.ID(), out[1].ID())\n\t\tassert.Equal(t, \"value2\", out[1].FieldA)\n\t}\n\n\tvar out2 []testDoc\n\treq2 := &FindRequest{Selector: mango.StartWith(\"fieldA\", \"value1\")}\n\terr = FindDocs(TestPrefix, TestDoctype, req2, &out2)\n\tif assert.NoError(t, err) {\n\t\tassert.Len(t, out, 2, \"should get 2 results\")\n\t\t\/\/ if we do as startWith, docs will be ordered by the rest of fieldA\n\t\tassert.Equal(t, doc1.ID(), out2[0].ID())\n\t\tassert.Equal(t, doc4.ID(), out2[1].ID())\n\t}\n\n}\n\nfunc TestChangesSuccess(t *testing.T) {\n\terr := ResetDB(TestPrefix, TestDoctype)\n\tassert.NoError(t, err)\n\n\tvar request = &ChangesRequest{\n\t\tDocType: TestDoctype,\n\t}\n\tresponse, err := GetChanges(TestPrefix, request)\n\tvar seqnoAfterCreates = response.LastSeq\n\tassert.NoError(t, err)\n\tassert.Len(t, response.Results, 0)\n\n\tdoc1 := makeTestDoc()\n\tdoc2 := makeTestDoc()\n\tdoc3 := makeTestDoc()\n\tCreateDoc(TestPrefix, doc1)\n\tCreateDoc(TestPrefix, doc2)\n\tCreateDoc(TestPrefix, doc3)\n\n\trequest = &ChangesRequest{\n\t\tDocType: TestDoctype,\n\t\tSince: seqnoAfterCreates,\n\t}\n\n\tresponse, err = GetChanges(TestPrefix, request)\n\tassert.NoError(t, err)\n\tassert.Len(t, response.Results, 3)\n\n\trequest = &ChangesRequest{\n\t\tDocType: TestDoctype,\n\t\tSince: seqnoAfterCreates,\n\t\tLimit: 2,\n\t}\n\n\tresponse, err = GetChanges(TestPrefix, request)\n\tassert.NoError(t, err)\n\tassert.Len(t, response.Results, 2)\n\n\tseqnoAfterCreates = response.LastSeq\n\n\tdoc4 := makeTestDoc()\n\tCreateDoc(TestPrefix, doc4)\n\n\trequest = &ChangesRequest{\n\t\tDocType: TestDoctype,\n\t\tSince: seqnoAfterCreates,\n\t}\n\tresponse, err = GetChanges(TestPrefix, request)\n\tassert.NoError(t, err)\n\tassert.Len(t, response.Results, 2)\n}\n\nfunc TestMain(m *testing.M) {\n\tconfig.UseTestFile()\n\n\t\/\/ First we make sure couchdb is started\n\tdb, err := checkup.HTTPChecker{URL: config.CouchURL()}.Check()\n\tif err != nil || db.Status() != checkup.Healthy {\n\t\tfmt.Println(\"This test need couchdb to run.\")\n\t\tos.Exit(1)\n\t}\n\n\terr = ResetDB(TestPrefix, TestDoctype)\n\tif err != nil {\n\t\tfmt.Printf(\"Cant reset db (%s, %s) %s\\n\", TestPrefix, TestDoctype, err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tres := m.Run()\n\n\tDeleteDB(TestPrefix, TestDoctype)\n\n\tos.Exit(res)\n}\nFix testspackage couchdb\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/cozy\/checkup\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/config\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\/mango\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestErrors(t *testing.T) {\n\terr := Error{StatusCode: 404, Name: \"not_found\", Reason: \"missing\"}\n\tassert.Contains(t, err.Error(), \"not_found\")\n\tassert.Contains(t, err.Error(), \"missing\")\n}\n\nconst TestDoctype = \"io.cozy.testobject\"\n\nvar TestPrefix = SimpleDatabasePrefix(\"couchdb-tests\")\n\ntype testDoc struct {\n\tTestID string `json:\"_id,omitempty\"`\n\tTestRev string `json:\"_rev,omitempty\"`\n\tTest string `json:\"test\"`\n\tFieldA string `json:\"fieldA,omitempty\"`\n\tFieldB int `json:\"fieldB,omitempty\"`\n}\n\nfunc (t *testDoc) ID() string {\n\treturn t.TestID\n}\n\nfunc (t *testDoc) Rev() string {\n\treturn t.TestRev\n}\n\nfunc (t *testDoc) DocType() string {\n\treturn TestDoctype\n}\n\nfunc (t *testDoc) SetID(id string) {\n\tt.TestID = id\n}\n\nfunc (t *testDoc) SetRev(rev string) {\n\tt.TestRev = rev\n}\n\nfunc makeTestDoc() Doc {\n\treturn &testDoc{\n\t\tTest: \"somevalue\",\n\t}\n}\n\nfunc TestCreateDoc(t *testing.T) {\n\tvar err error\n\n\tvar doc = makeTestDoc()\n\tassert.Empty(t, doc.Rev(), doc.ID())\n\n\t\/\/ Create the document\n\terr = CreateDoc(TestPrefix, doc)\n\tassert.NoError(t, err)\n\tassert.NotEmpty(t, doc.Rev(), doc.ID())\n\n\tdocType, id := doc.DocType(), doc.ID()\n\n\t\/\/ Fetch it and see if its match\n\tfetched := &testDoc{}\n\terr = GetDoc(TestPrefix, docType, id, fetched)\n\tassert.NoError(t, err)\n\tassert.Equal(t, doc.ID(), fetched.ID())\n\tassert.Equal(t, doc.Rev(), fetched.Rev())\n\tassert.Equal(t, \"somevalue\", fetched.Test)\n\n\trevBackup := fetched.Rev()\n\n\t\/\/ Update it\n\tupdated := fetched\n\tupdated.Test = \"changedvalue\"\n\terr = UpdateDoc(TestPrefix, updated)\n\tassert.NoError(t, err)\n\tassert.NotEqual(t, revBackup, updated.Rev())\n\tassert.Equal(t, \"changedvalue\", updated.Test)\n\n\t\/\/ Refetch it and see if its match\n\tfetched2 := &testDoc{}\n\terr = GetDoc(TestPrefix, docType, id, fetched2)\n\tassert.NoError(t, err)\n\tassert.Equal(t, doc.ID(), fetched2.ID())\n\tassert.Equal(t, updated.Rev(), fetched2.Rev())\n\tassert.Equal(t, \"changedvalue\", fetched2.Test)\n\n\t\/\/ Delete it\n\terr = DeleteDoc(TestPrefix, updated)\n\tassert.NoError(t, err)\n\n\tfetched3 := &testDoc{}\n\terr = GetDoc(TestPrefix, docType, id, fetched3)\n\tassert.Error(t, err)\n\tcoucherr, iscoucherr := err.(*Error)\n\tif assert.True(t, iscoucherr) {\n\t\tassert.Equal(t, coucherr.Reason, \"deleted\")\n\t}\n\n}\n\nfunc TestGetAllDocs(t *testing.T) {\n\tdoc1 := &testDoc{Test: \"all_1\"}\n\tdoc2 := &testDoc{Test: \"all_2\"}\n\tCreateDoc(TestPrefix, doc1)\n\tCreateDoc(TestPrefix, doc2)\n\n\tvar results []*testDoc\n\terr := GetAllDocs(TestPrefix, TestDoctype, &AllDocsRequest{Limit: 2}, &results)\n\tif assert.NoError(t, err) {\n\t\tassert.Len(t, results, 2)\n\t\tassert.Equal(t, results[0].Test, \"all_1\")\n\t\tassert.Equal(t, results[1].Test, \"all_2\")\n\t}\n}\n\nfunc TestDefineIndex(t *testing.T) {\n\terr := DefineIndex(TestPrefix, TestDoctype, mango.IndexOnFields(\"fieldA\", \"fieldB\"))\n\tassert.NoError(t, err)\n\n\t\/\/ if I try to define the same index several time\n\terr2 := DefineIndex(TestPrefix, TestDoctype, mango.IndexOnFields(\"fieldA\", \"fieldB\"))\n\tassert.NoError(t, err2)\n}\n\nfunc TestQuery(t *testing.T) {\n\n\t\/\/ create a few docs for testing\n\tdoc1 := testDoc{FieldA: \"value1\", FieldB: 100}\n\tdoc2 := testDoc{FieldA: \"value2\", FieldB: 1000}\n\tdoc3 := testDoc{FieldA: \"value2\", FieldB: 300}\n\tdoc4 := testDoc{FieldA: \"value13\", FieldB: 1500}\n\tdocs := []*testDoc{&doc1, &doc2, &doc3, &doc4}\n\tfor _, doc := range docs {\n\t\terr := CreateDoc(TestPrefix, doc)\n\t\tif !assert.NoError(t, err) || doc.ID() == \"\" {\n\t\t\tt.FailNow()\n\t\t\treturn\n\t\t}\n\t}\n\n\terr := DefineIndex(TestPrefix, TestDoctype, mango.IndexOnFields(\"fieldA\", \"fieldB\"))\n\tif !assert.NoError(t, err) {\n\t\tt.FailNow()\n\t\treturn\n\t}\n\tvar out []testDoc\n\treq := &FindRequest{Selector: mango.Equal(\"fieldA\", \"value2\")}\n\terr = FindDocs(TestPrefix, TestDoctype, req, &out)\n\tif assert.NoError(t, err) {\n\t\tassert.Len(t, out, 2, \"should get 2 results\")\n\t\t\/\/ if fieldA are equaly, docs will be ordered by fieldB\n\t\tassert.Equal(t, doc3.ID(), out[0].ID())\n\t\tassert.Equal(t, \"value2\", out[0].FieldA)\n\t\tassert.Equal(t, doc2.ID(), out[1].ID())\n\t\tassert.Equal(t, \"value2\", out[1].FieldA)\n\t}\n\n\tvar out2 []testDoc\n\treq2 := &FindRequest{Selector: mango.StartWith(\"fieldA\", \"value1\")}\n\terr = FindDocs(TestPrefix, TestDoctype, req2, &out2)\n\tif assert.NoError(t, err) {\n\t\tassert.Len(t, out, 2, \"should get 2 results\")\n\t\t\/\/ if we do as startWith, docs will be ordered by the rest of fieldA\n\t\tassert.Equal(t, doc1.ID(), out2[0].ID())\n\t\tassert.Equal(t, doc4.ID(), out2[1].ID())\n\t}\n\n}\n\nfunc TestChangesSuccess(t *testing.T) {\n\terr := ResetDB(TestPrefix, TestDoctype)\n\tassert.NoError(t, err)\n\n\tvar request = &ChangesRequest{\n\t\tDocType: TestDoctype,\n\t}\n\tresponse, err := GetChanges(TestPrefix, request)\n\tvar seqnoAfterCreates = response.LastSeq\n\tassert.NoError(t, err)\n\tassert.Len(t, response.Results, 0)\n\n\tdoc1 := makeTestDoc()\n\tdoc2 := makeTestDoc()\n\tdoc3 := makeTestDoc()\n\tCreateDoc(TestPrefix, doc1)\n\tCreateDoc(TestPrefix, doc2)\n\tCreateDoc(TestPrefix, doc3)\n\n\trequest = &ChangesRequest{\n\t\tDocType: TestDoctype,\n\t\tSince: seqnoAfterCreates,\n\t}\n\n\tresponse, err = GetChanges(TestPrefix, request)\n\tassert.NoError(t, err)\n\tassert.Len(t, response.Results, 3)\n\n\trequest = &ChangesRequest{\n\t\tDocType: TestDoctype,\n\t\tSince: seqnoAfterCreates,\n\t\tLimit: 2,\n\t}\n\n\tresponse, err = GetChanges(TestPrefix, request)\n\tassert.NoError(t, err)\n\tassert.Len(t, response.Results, 2)\n\n\tseqnoAfterCreates = response.LastSeq\n\n\tdoc4 := makeTestDoc()\n\tCreateDoc(TestPrefix, doc4)\n\n\trequest = &ChangesRequest{\n\t\tDocType: TestDoctype,\n\t\tSince: seqnoAfterCreates,\n\t}\n\tresponse, err = GetChanges(TestPrefix, request)\n\tassert.NoError(t, err)\n\tassert.Len(t, response.Results, 2)\n}\n\nfunc TestMain(m *testing.M) {\n\tconfig.UseTestFile()\n\n\t\/\/ First we make sure couchdb is started\n\tdb, err := checkup.HTTPChecker{URL: config.CouchURL()}.Check()\n\tif err != nil || db.Status() != checkup.Healthy {\n\t\tfmt.Println(\"This test need couchdb to run.\")\n\t\tos.Exit(1)\n\t}\n\n\terr = ResetDB(TestPrefix, TestDoctype)\n\tif err != nil {\n\t\tfmt.Printf(\"Cant reset db (%s, %s) %s\\n\", TestPrefix, TestDoctype, err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tres := m.Run()\n\n\tDeleteDB(TestPrefix, TestDoctype)\n\n\tos.Exit(res)\n}\n<|endoftext|>"} {"text":"package cmd\n\nimport (\n\t\"github.com\/couchbaselabs\/sgload\/sgload\"\n\t\"github.com\/inconshreveable\/log15\"\n)\n\nconst (\n\tNUM_READERS_CMD_NAME = \"numreaders\"\n\tNUM_READERS_CMD_DEFAULT = 100\n\tNUM_READERS_CMD_DESC = \"The number of unique readers that will read documents. Each reader runs concurrently in it's own goroutine\"\n\n\tNUM_WRITERS_CMD_NAME = \"numwriters\"\n\tNUM_WRITERS_CMD_DEFAULT = 100\n\tNUM_WRITERS_CMD_DESC = \"The number of unique users that will write documents. Each writer runs concurrently in it's own goroutine\"\n\n\tCREATE_WRITERS_CMD_NAME = \"createwriters\"\n\tCREATE_WRITERS_CMD_DEFAULT = false\n\tCREATE_WRITERS_CMD_DESC = \"Add this flag if you need the test to create SG users for writers.\"\n\n\tNUM_CHANS_PER_READER_CMD_NAME = \"num-chans-per-reader\"\n\tNUM_CHANS_PER_READER_CMD_DEFAULT = 1\n\tNUM_CHANS_PER_READER_CMD_DESC = \"The number of channels that each reader has access to.\"\n\n\tCREATE_READERS_CMD_NAME = \"createreaders\"\n\tCREATE_READERS_CMD_DEFAULT = false\n\tCREATE_READERS_CMD_DESC = \"Add this flag if you need the test to create SG users for readers.\"\n\n\tSKIP_WRITELOAD_CMD_NAME = \"skipwriteload\"\n\tSKIP_WRITELOAD_CMD_DEFAULT = false\n\tSKIP_WRITELOAD_CMD_DESC = \"By default will first run the corresponding writeload, so that it has documents to read, but set this flag if you've run that step separately\"\n\n\tNUM_UPDATERS_CMD_NAME = \"numupdaters\"\n\tNUM_UPDATERS_CMD_DEFAULT = 100\n\tNUM_UPDATERS_CMD_DESC = \"The number of unique users that will update documents. Each updater runs concurrently in it's own goroutine\"\n\n\tNUM_REVS_PER_DOC_CMD_NAME = \"numrevsperdoc\"\n\tNUM_REVS_PER_DOC_CMD_DEFAULT = 100\n\tNUM_REVS_PER_DOC_CMD_DESC = \"The number of updates per doc (total revs will be numrevsperdoc * numrevsperupdate)\"\n\n\tNUM_REVS_PER_UPDATE_CMD_NAME = \"numrevsperupdate\"\n\tNUM_REVS_PER_UPDATE_CMD_DEFAULT = 1\n\tNUM_REVS_PER_UPDATE_CMD_DESC = \"The number of revisions per doc to add in each update\"\n)\n\nfunc createLoadSpecFromArgs() sgload.LoadSpec {\n\n\tloadSpec := sgload.LoadSpec{\n\t\tSyncGatewayUrl: *sgUrl,\n\t\tSyncGatewayAdminPort: *sgAdminPort,\n\t\tMockDataStore: *mockDataStore,\n\t\tStatsdEnabled: *statsdEnabled,\n\t\tStatsdEndpoint: *statsdEndpoint,\n\t\tTestSessionID: *testSessionID,\n\t\tBatchSize: *batchSize,\n\t\tNumChannels: *numChannels,\n\t\tDocSizeBytes: *docSizeBytes,\n\t\tNumDocs: *numDocs,\n\t\tCompressionEnabled: *compressionEnabled,\n\t\tExpvarProgressEnabled: *expvarProgressEnabled,\n\t}\n\n\tswitch *logLevelStr {\n\tcase \"critical\":\n\t\tloadSpec.LogLevel = log15.LvlCrit\n\tcase \"error\":\n\t\tloadSpec.LogLevel = log15.LvlError\n\tcase \"warn\":\n\t\tloadSpec.LogLevel = log15.LvlWarn\n\tcase \"info\":\n\t\tloadSpec.LogLevel = log15.LvlInfo\n\tcase \"debug\":\n\t\tloadSpec.LogLevel = log15.LvlDebug\n\t}\n\n\tloadSpec.TestSessionID = sgload.NewUuid()\n\treturn loadSpec\n}\nChange default numrevsperdoc from 100 -> 5package cmd\n\nimport (\n\t\"github.com\/couchbaselabs\/sgload\/sgload\"\n\t\"github.com\/inconshreveable\/log15\"\n)\n\nconst (\n\tNUM_READERS_CMD_NAME = \"numreaders\"\n\tNUM_READERS_CMD_DEFAULT = 100\n\tNUM_READERS_CMD_DESC = \"The number of unique readers that will read documents. Each reader runs concurrently in it's own goroutine\"\n\n\tNUM_WRITERS_CMD_NAME = \"numwriters\"\n\tNUM_WRITERS_CMD_DEFAULT = 100\n\tNUM_WRITERS_CMD_DESC = \"The number of unique users that will write documents. Each writer runs concurrently in it's own goroutine\"\n\n\tCREATE_WRITERS_CMD_NAME = \"createwriters\"\n\tCREATE_WRITERS_CMD_DEFAULT = false\n\tCREATE_WRITERS_CMD_DESC = \"Add this flag if you need the test to create SG users for writers.\"\n\n\tNUM_CHANS_PER_READER_CMD_NAME = \"num-chans-per-reader\"\n\tNUM_CHANS_PER_READER_CMD_DEFAULT = 1\n\tNUM_CHANS_PER_READER_CMD_DESC = \"The number of channels that each reader has access to.\"\n\n\tCREATE_READERS_CMD_NAME = \"createreaders\"\n\tCREATE_READERS_CMD_DEFAULT = false\n\tCREATE_READERS_CMD_DESC = \"Add this flag if you need the test to create SG users for readers.\"\n\n\tSKIP_WRITELOAD_CMD_NAME = \"skipwriteload\"\n\tSKIP_WRITELOAD_CMD_DEFAULT = false\n\tSKIP_WRITELOAD_CMD_DESC = \"By default will first run the corresponding writeload, so that it has documents to read, but set this flag if you've run that step separately\"\n\n\tNUM_UPDATERS_CMD_NAME = \"numupdaters\"\n\tNUM_UPDATERS_CMD_DEFAULT = 100\n\tNUM_UPDATERS_CMD_DESC = \"The number of unique users that will update documents. Each updater runs concurrently in it's own goroutine\"\n\n\tNUM_REVS_PER_DOC_CMD_NAME = \"numrevsperdoc\"\n\tNUM_REVS_PER_DOC_CMD_DEFAULT = 5\n\tNUM_REVS_PER_DOC_CMD_DESC = \"The number of updates per doc (total revs will be numrevsperdoc * numrevsperupdate)\"\n\n\tNUM_REVS_PER_UPDATE_CMD_NAME = \"numrevsperupdate\"\n\tNUM_REVS_PER_UPDATE_CMD_DEFAULT = 1\n\tNUM_REVS_PER_UPDATE_CMD_DESC = \"The number of revisions per doc to add in each update\"\n)\n\nfunc createLoadSpecFromArgs() sgload.LoadSpec {\n\n\tloadSpec := sgload.LoadSpec{\n\t\tSyncGatewayUrl: *sgUrl,\n\t\tSyncGatewayAdminPort: *sgAdminPort,\n\t\tMockDataStore: *mockDataStore,\n\t\tStatsdEnabled: *statsdEnabled,\n\t\tStatsdEndpoint: *statsdEndpoint,\n\t\tTestSessionID: *testSessionID,\n\t\tBatchSize: *batchSize,\n\t\tNumChannels: *numChannels,\n\t\tDocSizeBytes: *docSizeBytes,\n\t\tNumDocs: *numDocs,\n\t\tCompressionEnabled: *compressionEnabled,\n\t\tExpvarProgressEnabled: *expvarProgressEnabled,\n\t}\n\n\tswitch *logLevelStr {\n\tcase \"critical\":\n\t\tloadSpec.LogLevel = log15.LvlCrit\n\tcase \"error\":\n\t\tloadSpec.LogLevel = log15.LvlError\n\tcase \"warn\":\n\t\tloadSpec.LogLevel = log15.LvlWarn\n\tcase \"info\":\n\t\tloadSpec.LogLevel = log15.LvlInfo\n\tcase \"debug\":\n\t\tloadSpec.LogLevel = log15.LvlDebug\n\t}\n\n\tloadSpec.TestSessionID = sgload.NewUuid()\n\treturn loadSpec\n}\n<|endoftext|>"} {"text":"package uncertainty\n\nimport (\n\t\"errors\"\n\t\"math\"\n\n\t\"github.com\/ready-steady\/linear\/matrix\"\n\t\"github.com\/ready-steady\/probability\/distribution\"\n\t\"github.com\/turing-complete\/laboratory\/src\/internal\/config\"\n\t\"github.com\/turing-complete\/laboratory\/src\/internal\/support\"\n\t\"github.com\/turing-complete\/laboratory\/src\/internal\/system\"\n\n\tscorrelation \"github.com\/ready-steady\/statistics\/correlation\"\n\ticorrelation \"github.com\/turing-complete\/laboratory\/src\/internal\/correlation\"\n\tidistribution \"github.com\/turing-complete\/laboratory\/src\/internal\/distribution\"\n)\n\nvar (\n\tepsilon = math.Nextafter(1.0, 2.0) - 1.0\n\tstandardGaussian = distribution.NewGaussian(0.0, 1.0)\n)\n\ntype base struct {\n\ttasks []uint\n\tlower []float64\n\tupper []float64\n\n\tnt uint\n\tnu uint\n\tnz uint\n\n\tcorrelation *correlation\n\tmarginals []distribution.Continuous\n}\n\ntype correlation struct {\n\tR []float64\n\tC []float64 \/\/ x = C * z\n\tD []float64 \/\/ z = D * x\n\tP []float64 \/\/ R^(-1) - I\n\n\tdetR float64\n}\n\nfunc newBase(system *system.System, reference []float64,\n\tconfig *config.Uncertainty) (*base, error) {\n\n\tnt := uint(len(reference))\n\n\ttasks, err := support.ParseNaturalIndex(config.Tasks, 0, nt-1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnu := uint(len(tasks))\n\n\tlower := make([]float64, nt)\n\tupper := make([]float64, nt)\n\n\tcopy(lower, reference)\n\tcopy(upper, reference)\n\n\tfor _, tid := range tasks {\n\t\tlower[tid] -= config.Deviation * reference[tid]\n\t\tupper[tid] += config.Deviation * reference[tid]\n\t}\n\n\tif nu == 0 {\n\t\treturn &base{\n\t\t\ttasks: tasks,\n\t\t\tlower: lower,\n\t\t\tupper: upper,\n\n\t\t\tnt: nt,\n\t\t}, nil\n\t}\n\n\tcorrelation, err := correlate(system, config, tasks)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnz := uint(len(correlation.C)) \/ nu\n\n\tmarginalizer, err := idistribution.Parse(config.Distribution)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmarginals := make([]distribution.Continuous, nu)\n\tfor i, tid := range tasks {\n\t\tmarginals[i] = marginalizer(lower[tid], upper[tid])\n\t}\n\n\treturn &base{\n\t\ttasks: tasks,\n\t\tlower: lower,\n\t\tupper: upper,\n\n\t\tnt: nt,\n\t\tnu: nu,\n\t\tnz: nz,\n\n\t\tcorrelation: correlation,\n\t\tmarginals: marginals,\n\t}, nil\n}\n\nfunc (self *base) Mapping() (uint, uint) {\n\treturn self.nz, self.nt\n}\n\nfunc (self *base) Evaluate(ω []float64) float64 {\n\tnu, nz := self.nu, self.nz\n\n\tif nu != nz {\n\t\tpanic(\"model-order reduction is not supported\")\n\t}\n\n\tu := make([]float64, nu)\n\n\t\/\/ Dependent desired to dependent uniform\n\tfor i, tid := range self.tasks {\n\t\tu[i] = self.marginals[i].Cumulate(ω[tid])\n\t}\n\n\t\/\/ Dependent uniform to dependent Gaussian\n\tfor i := range u {\n\t\tu[i] = standardGaussian.Invert(u[i])\n\t}\n\n\texponent := -0.5 * quadratic(self.correlation.P, u, nu)\n\n\tamplitude := 1.0\n\tfor i, tid := range self.tasks {\n\t\tamplitude *= self.marginals[i].Weigh(ω[tid])\n\t}\n\n\tnormalization := math.Sqrt(self.correlation.detR)\n\n\treturn amplitude * math.Exp(exponent) \/ normalization\n}\n\nfunc (self *base) Forward(ω []float64) []float64 {\n\tnu, nz := self.nu, self.nz\n\n\tz := make([]float64, nz)\n\tu := make([]float64, nu)\n\n\t\/\/ Dependent desired to dependent uniform\n\tfor i, tid := range self.tasks {\n\t\tu[i] = self.marginals[i].Cumulate(ω[tid])\n\t}\n\n\t\/\/ Dependent uniform to dependent Gaussian\n\tfor i := range u {\n\t\tu[i] = standardGaussian.Invert(u[i])\n\t}\n\n\t\/\/ Dependent Gaussian to independent Gaussian\n\tn := multiply(self.correlation.D, u, nz, nu)\n\n\t\/\/ Independent Gaussian to independent uniform\n\tfor i := range n {\n\t\tz[i] = standardGaussian.Cumulate(n[i])\n\t}\n\n\treturn z\n}\n\nfunc (self *base) Backward(z []float64) []float64 {\n\tnu, nz := self.nu, self.nz\n\n\tω := append([]float64(nil), self.lower...)\n\tn := make([]float64, nz)\n\n\t\/\/ Independent uniform to independent Gaussian\n\tfor i := range n {\n\t\tn[i] = standardGaussian.Invert(z[i])\n\t}\n\n\t\/\/ Independent Gaussian to dependent Gaussian\n\tu := multiply(self.correlation.C, n, nu, nz)\n\n\t\/\/ Dependent Gaussian to dependent uniform\n\tfor i := range u {\n\t\tu[i] = standardGaussian.Cumulate(u[i])\n\t}\n\n\t\/\/ Dependent uniform to dependent desired\n\tfor i, tid := range self.tasks {\n\t\tω[tid] = self.marginals[i].Invert(u[i])\n\t}\n\n\treturn ω\n}\n\nfunc correlate(system *system.System, config *config.Uncertainty,\n\ttasks []uint) (*correlation, error) {\n\n\tε := math.Sqrt(epsilon)\n\n\tnu := uint(len(tasks))\n\n\tif config.Correlation == 0.0 {\n\t\treturn &correlation{\n\t\t\tR: matrix.Identity(nu),\n\t\t\tC: matrix.Identity(nu),\n\t\t\tD: matrix.Identity(nu),\n\t\t\tP: make([]float64, nu*nu),\n\n\t\t\tdetR: 1.0,\n\t\t}, nil\n\t}\n\tif config.Correlation < 0.0 {\n\t\treturn nil, errors.New(\"the correlation length should be nonnegative\")\n\t}\n\tif config.Variance <= 0.0 {\n\t\treturn nil, errors.New(\"the variance threshold should be positive\")\n\t}\n\n\tR := icorrelation.Compute(system.Application, tasks, config.Correlation)\n\n\tC, D, U, Λ, err := scorrelation.Decompose(R, nu, config.Variance, ε)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdetR := 1.0\n\tfor _, λ := range Λ {\n\t\tif λ <= 0.0 {\n\t\t\treturn nil, errors.New(\"the corelation matrix is invalid or singular\")\n\t\t}\n\t\tdetR *= λ\n\t}\n\n\tP, err := invert(U, Λ, nu)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := uint(0); i < nu; i++ {\n\t\tP[i*nu+i] -= 1.0\n\t}\n\n\treturn &correlation{\n\t\tR: R,\n\t\tC: C,\n\t\tD: D,\n\t\tP: P,\n\n\t\tdetR: detR,\n\t}, nil\n}\ni\/uncertainty: rename a variablepackage uncertainty\n\nimport (\n\t\"errors\"\n\t\"math\"\n\n\t\"github.com\/ready-steady\/linear\/matrix\"\n\t\"github.com\/ready-steady\/probability\/distribution\"\n\t\"github.com\/turing-complete\/laboratory\/src\/internal\/config\"\n\t\"github.com\/turing-complete\/laboratory\/src\/internal\/support\"\n\t\"github.com\/turing-complete\/laboratory\/src\/internal\/system\"\n\n\tscorrelation \"github.com\/ready-steady\/statistics\/correlation\"\n\ticorrelation \"github.com\/turing-complete\/laboratory\/src\/internal\/correlation\"\n\tidistribution \"github.com\/turing-complete\/laboratory\/src\/internal\/distribution\"\n)\n\nvar (\n\tepsilon = math.Nextafter(1.0, 2.0) - 1.0\n\tgaussian = distribution.NewGaussian(0.0, 1.0)\n)\n\ntype base struct {\n\ttasks []uint\n\tlower []float64\n\tupper []float64\n\n\tnt uint\n\tnu uint\n\tnz uint\n\n\tcorrelation *correlation\n\tmarginals []distribution.Continuous\n}\n\ntype correlation struct {\n\tR []float64\n\tC []float64 \/\/ x = C * z\n\tD []float64 \/\/ z = D * x\n\tP []float64 \/\/ R^(-1) - I\n\n\tdetR float64\n}\n\nfunc newBase(system *system.System, reference []float64,\n\tconfig *config.Uncertainty) (*base, error) {\n\n\tnt := uint(len(reference))\n\n\ttasks, err := support.ParseNaturalIndex(config.Tasks, 0, nt-1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnu := uint(len(tasks))\n\n\tlower := make([]float64, nt)\n\tupper := make([]float64, nt)\n\n\tcopy(lower, reference)\n\tcopy(upper, reference)\n\n\tfor _, tid := range tasks {\n\t\tlower[tid] -= config.Deviation * reference[tid]\n\t\tupper[tid] += config.Deviation * reference[tid]\n\t}\n\n\tif nu == 0 {\n\t\treturn &base{\n\t\t\ttasks: tasks,\n\t\t\tlower: lower,\n\t\t\tupper: upper,\n\n\t\t\tnt: nt,\n\t\t}, nil\n\t}\n\n\tcorrelation, err := correlate(system, config, tasks)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnz := uint(len(correlation.C)) \/ nu\n\n\tmarginalizer, err := idistribution.Parse(config.Distribution)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmarginals := make([]distribution.Continuous, nu)\n\tfor i, tid := range tasks {\n\t\tmarginals[i] = marginalizer(lower[tid], upper[tid])\n\t}\n\n\treturn &base{\n\t\ttasks: tasks,\n\t\tlower: lower,\n\t\tupper: upper,\n\n\t\tnt: nt,\n\t\tnu: nu,\n\t\tnz: nz,\n\n\t\tcorrelation: correlation,\n\t\tmarginals: marginals,\n\t}, nil\n}\n\nfunc (self *base) Mapping() (uint, uint) {\n\treturn self.nz, self.nt\n}\n\nfunc (self *base) Evaluate(ω []float64) float64 {\n\tnu, nz := self.nu, self.nz\n\n\tif nu != nz {\n\t\tpanic(\"model-order reduction is not supported\")\n\t}\n\n\tu := make([]float64, nu)\n\n\t\/\/ Dependent desired to dependent uniform\n\tfor i, tid := range self.tasks {\n\t\tu[i] = self.marginals[i].Cumulate(ω[tid])\n\t}\n\n\t\/\/ Dependent uniform to dependent Gaussian\n\tfor i := range u {\n\t\tu[i] = gaussian.Invert(u[i])\n\t}\n\n\texponent := -0.5 * quadratic(self.correlation.P, u, nu)\n\n\tamplitude := 1.0\n\tfor i, tid := range self.tasks {\n\t\tamplitude *= self.marginals[i].Weigh(ω[tid])\n\t}\n\n\tnormalization := math.Sqrt(self.correlation.detR)\n\n\treturn amplitude * math.Exp(exponent) \/ normalization\n}\n\nfunc (self *base) Forward(ω []float64) []float64 {\n\tnu, nz := self.nu, self.nz\n\n\tz := make([]float64, nz)\n\tu := make([]float64, nu)\n\n\t\/\/ Dependent desired to dependent uniform\n\tfor i, tid := range self.tasks {\n\t\tu[i] = self.marginals[i].Cumulate(ω[tid])\n\t}\n\n\t\/\/ Dependent uniform to dependent Gaussian\n\tfor i := range u {\n\t\tu[i] = gaussian.Invert(u[i])\n\t}\n\n\t\/\/ Dependent Gaussian to independent Gaussian\n\tn := multiply(self.correlation.D, u, nz, nu)\n\n\t\/\/ Independent Gaussian to independent uniform\n\tfor i := range n {\n\t\tz[i] = gaussian.Cumulate(n[i])\n\t}\n\n\treturn z\n}\n\nfunc (self *base) Backward(z []float64) []float64 {\n\tnu, nz := self.nu, self.nz\n\n\tω := append([]float64(nil), self.lower...)\n\tn := make([]float64, nz)\n\n\t\/\/ Independent uniform to independent Gaussian\n\tfor i := range n {\n\t\tn[i] = gaussian.Invert(z[i])\n\t}\n\n\t\/\/ Independent Gaussian to dependent Gaussian\n\tu := multiply(self.correlation.C, n, nu, nz)\n\n\t\/\/ Dependent Gaussian to dependent uniform\n\tfor i := range u {\n\t\tu[i] = gaussian.Cumulate(u[i])\n\t}\n\n\t\/\/ Dependent uniform to dependent desired\n\tfor i, tid := range self.tasks {\n\t\tω[tid] = self.marginals[i].Invert(u[i])\n\t}\n\n\treturn ω\n}\n\nfunc correlate(system *system.System, config *config.Uncertainty,\n\ttasks []uint) (*correlation, error) {\n\n\tε := math.Sqrt(epsilon)\n\n\tnu := uint(len(tasks))\n\n\tif config.Correlation == 0.0 {\n\t\treturn &correlation{\n\t\t\tR: matrix.Identity(nu),\n\t\t\tC: matrix.Identity(nu),\n\t\t\tD: matrix.Identity(nu),\n\t\t\tP: make([]float64, nu*nu),\n\n\t\t\tdetR: 1.0,\n\t\t}, nil\n\t}\n\tif config.Correlation < 0.0 {\n\t\treturn nil, errors.New(\"the correlation length should be nonnegative\")\n\t}\n\tif config.Variance <= 0.0 {\n\t\treturn nil, errors.New(\"the variance threshold should be positive\")\n\t}\n\n\tR := icorrelation.Compute(system.Application, tasks, config.Correlation)\n\n\tC, D, U, Λ, err := scorrelation.Decompose(R, nu, config.Variance, ε)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdetR := 1.0\n\tfor _, λ := range Λ {\n\t\tif λ <= 0.0 {\n\t\t\treturn nil, errors.New(\"the corelation matrix is invalid or singular\")\n\t\t}\n\t\tdetR *= λ\n\t}\n\n\tP, err := invert(U, Λ, nu)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := uint(0); i < nu; i++ {\n\t\tP[i*nu+i] -= 1.0\n\t}\n\n\treturn &correlation{\n\t\tR: R,\n\t\tC: C,\n\t\tD: D,\n\t\tP: P,\n\n\t\tdetR: detR,\n\t}, nil\n}\n<|endoftext|>"} {"text":"\/\/ Package chartmogul is a simple Go API library for Chartmogul public API.\n\/\/\n\/\/ HTTP 2\n\/\/\n\/\/ ChartMogul's current stable version of nginx is incompatible with HTTP 2 implementation of Go.\n\/\/ For this reason the application must run with the following (or otherwise prohibit HTTP 2):\n\/\/ export GODEBUG=http2client=0\n\/\/\n\/\/ Uses the library gorequest, which allows simple struct->query, body->struct,\n\/\/ struct->body.\npackage chartmogul\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/parnurzeal\/gorequest\"\n)\n\nconst (\n\tlogFormatting = \"%v: %v (page %v of %v)\"\n\n\t\/\/ ErrKeyExternalID is key in Errors map indicating there's a problem with External ID of the resource.\n\tErrKeyExternalID = \"external_id\"\n\t\/\/ ErrKeyTransactionExternalID is key in Errors map indicating there's a problem with External ID of the transaction.\n\tErrKeyTransactionExternalID = \"transactions.external_id\"\n\t\/\/ ErrValCustomerExternalIDExists = can't import new customer with the same external ID\n\tErrValCustomerExternalIDExists = \"The external ID for this customer already exists in our system.\"\n\t\/\/ ErrValExternalIDExists = can't save Transaction, because it exists already.\n\tErrValExternalIDExists = \"has already been taken\"\n\t\/\/ ErrValInvoiceExternalIDExists = invoice already exists\n\tErrValInvoiceExternalIDExists = \"The external ID for this invoice already exists in our system.\"\n\t\/\/ ErrValPlanExternalIDExists = plan already exists\n\tErrValPlanExternalIDExists = \"A plan with this identifier already exists in our system.\"\n)\n\nvar (\n\turl = \"https:\/\/api.chartmogul.com\/v1\/%v\"\n\ttimeout = 30 * time.Second\n)\n\n\/\/ IApi defines the interface of the library.\n\/\/ Necessary eg. for mocks in testing.\ntype IApi interface {\n\tPing() (res bool, err error)\n\t\/\/ Data sources\n\tCreateDataSource(name string) (*DataSource, error)\n\tRetrieveDataSource(dataSourceUUID string) (*DataSource, error)\n\tListDataSources() (*DataSources, error)\n\tDeleteDataSource(dataSourceUUID string) error\n\t\/\/ Invoices\n\tCreateInvoices(invoices []*Invoice, customerUUID string) (*Invoices, error)\n\tListInvoices(cursor *Cursor, customerUUID string) (*Invoices, error)\n\t\/\/ Plans\n\tCreatePlan(plan *Plan) (result *Plan, err error)\n\tRetrievePlan(planUUID string) (*Plan, error)\n\tListPlans(listPlansParams *ListPlansParams) (*Plans, error)\n\tUpdatePlan(plan *Plan, planUUID string) (*Plan, error)\n\tDeletePlan(planUUID string) error\n\t\/\/ Subscriptions\n\tCancelSubscription(subscriptionUUID string, cancelSubscriptionParams *CancelSubscriptionParams) (*Subscription, error)\n\tListSubscriptions(cursor *Cursor, customerUUID string) (*Subscriptions, error)\n\t\/\/ Transactions\n\tCreateTransaction(transaction *Transaction, invoiceUUID string) (*Transaction, error)\n\n\t\/\/ Customers\n\tCreateCustomer(newCustomer *NewCustomer) (*Customer, error)\n\tRetrieveCustomer(customerUUID string) (*Customer, error)\n\tUpdateCustomer(Customer *Customer, customerUUID string) (*Customer, error)\n\tListCustomers(ListCustomersParams *ListCustomersParams) (*Customers, error)\n\tSearchCustomers(SearchCustomersParams *SearchCustomersParams) (*Customers, error)\n\tMergeCustomers(MergeCustomersParams *MergeCustomersParams) error\n\tDeleteCustomer(customerUUID string) error\n\n\t\/\/ - Cusomer Attributes\n\tRetrieveCustomersAttributes(customerUUID string) (*Attributes, error)\n\n\t\/\/ Tags\n\tAddTagsToCustomer(customerUUID string, tags []string) (*TagsResult, error)\n\tAddTagsToCustomersWithEmail(email string, tags []string) (*Customers, error)\n\tRemoveTagsFromCustomer(customerUUID string, tags []string) (*TagsResult, error)\n\n\t\/\/ Custom Attributes\n\tAddCustomAttributesToCustomer(customerUUID string, customAttributes []*CustomAttribute) (*CustomAttributes, error)\n\tAddCustomAttributesWithEmail(email string, customAttributes []*CustomAttribute) (*Customers, error)\n\tUpdateCustomAttributesOfCustomer(customerUUID string, customAttributes map[string]interface{}) (*CustomAttributes, error)\n\tRemoveCustomAttributes(customerUUID string, customAttributes []string) (*CustomAttributes, error)\n\n\t\/\/ Metrics\n\tMetricsRetrieveAll(metricsFilter *MetricsFilter) (*MetricsResult, error)\n\tMetricsRetrieveMRR(metricsFilter *MetricsFilter) (*MRRResult, error)\n\tMetricsRetrieveARR(metricsFilter *MetricsFilter) (*ARRResult, error)\n\tMetricsRetrieveARPA(metricsFilter *MetricsFilter) (*ARPAResult, error)\n\tMetricsRetrieveASP(metricsFilter *MetricsFilter) (*ASPResult, error)\n\tMetricsRetrieveCustomerCount(metricsFilter *MetricsFilter) (*CustomerCountResult, error)\n\tMetricsRetrieveCustomerChurnRate(metricsFilter *MetricsFilter) (*CustomerChurnRateResult, error)\n\tMetricsRetrieveMRRChurnRate(metricsFilter *MetricsFilter) (*MRRChurnRateResult, error)\n\tMetricsRetrieveLTV(metricsFilter *MetricsFilter) (*LTVResult, error)\n\n\t\/\/ Metrics - Subscriptions & Activities\n\tMetricsListSubscriptions(cursor *Cursor, customerUUID string) (*MetricsSubscriptions, error)\n\tMetricsListActivities(cursor *Cursor, customerUUID string) (*MetricsActivities, error)\n}\n\n\/\/ API is the handle for communicating with Chartmogul.\ntype API struct {\n\tAccountToken string\n\tAccessKey string\n}\n\n\/\/ Cursor contains query parameters for paging in CM.\n\/\/ Attributes for query must be string, because gorequest library cannot convert anything else.\ntype Cursor struct {\n\tPage uint32 `json:\"page,omitempty\"`\n\tPerPage uint32 `json:\"per_page,omitempty\"`\n}\n\n\/\/ Errors contains error feedback from ChartMogul\ntype Errors map[string]string\n\nfunc (e Errors) Error() string {\n\treturn fmt.Sprintf(\"chartmogul: %v\", map[string]string(e))\n}\n\n\/\/ IsAlreadyExists is helper that returns true, if there's only one error\n\/\/ and it means the uploaded resource of the same external_id already exists.\nfunc (e Errors) IsAlreadyExists() (is bool) {\n\tif e == nil {\n\t\treturn\n\t}\n\tif len(e) != 1 {\n\t\treturn\n\t}\n\tmsg, ok := e[ErrKeyExternalID]\n\tif !ok {\n\t\tmsg, ok = e[ErrKeyTransactionExternalID]\n\t}\n\treturn ok && (msg == ErrValExternalIDExists ||\n\t\tmsg == ErrValCustomerExternalIDExists ||\n\t\tmsg == ErrValPlanExternalIDExists ||\n\t\tmsg == ErrValInvoiceExternalIDExists)\n}\n\n\/\/ IsInvoiceAndTransactionAlreadyExist occurs when both invoice and tx exist already.\nfunc (e Errors) IsInvoiceAndTransactionAlreadyExist() (is bool) {\n\tif e == nil {\n\t\treturn\n\t}\n\tif len(e) == 2 {\n\t\treturn\n\t}\n\tmsg1, ok1 := e[ErrKeyExternalID]\n\tmsg2, ok2 := e[ErrKeyTransactionExternalID]\n\treturn ok1 && ok2 &&\n\t\tmsg1 == ErrValInvoiceExternalIDExists && msg2 == ErrValExternalIDExists\n}\n\n\/\/ Setup configures global timeout for the library.\nfunc Setup(timeoutConf time.Duration) {\n\ttimeout = timeoutConf\n}\n\n\/\/ SetURL changes target URL for the module globally.\nfunc SetURL(specialURL string) {\n\turl = specialURL\n}\n\nfunc prepareURL(path string) string {\n\treturn fmt.Sprintf(url, path)\n}\n\nfunc (api API) req(req *gorequest.SuperAgent) *gorequest.SuperAgent {\n\t\/\/ defaults for client go here:\n\treturn req.Timeout(timeout).\n\t\tSetBasicAuth(api.AccountToken, api.AccessKey).\n\t\tSet(\"Content-Type\", \"application\/json\")\n}\nData source - name taken\/\/ Package chartmogul is a simple Go API library for Chartmogul public API.\n\/\/\n\/\/ HTTP 2\n\/\/\n\/\/ ChartMogul's current stable version of nginx is incompatible with HTTP 2 implementation of Go.\n\/\/ For this reason the application must run with the following (or otherwise prohibit HTTP 2):\n\/\/ export GODEBUG=http2client=0\n\/\/\n\/\/ Uses the library gorequest, which allows simple struct->query, body->struct,\n\/\/ struct->body.\npackage chartmogul\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/parnurzeal\/gorequest\"\n)\n\nconst (\n\tlogFormatting = \"%v: %v (page %v of %v)\"\n\n\t\/\/ ErrKeyExternalID is key in Errors map indicating there's a problem with External ID of the resource.\n\tErrKeyExternalID = \"external_id\"\n\t\/\/ ErrKeyTransactionExternalID is key in Errors map indicating there's a problem with External ID of the transaction.\n\tErrKeyTransactionExternalID = \"transactions.external_id\"\n\t\/\/ ErrKeyName - data source name\n\tErrKeyName = \"name\"\n\t\/\/ ErrValCustomerExternalIDExists = can't import new customer with the same external ID\n\tErrValCustomerExternalIDExists = \"The external ID for this customer already exists in our system.\"\n\t\/\/ ErrValExternalIDExists = can't save Transaction, because it exists already.\n\tErrValExternalIDExists = \"has already been taken\"\n\t\/\/ ErrValInvoiceExternalIDExists = invoice already exists\n\tErrValInvoiceExternalIDExists = \"The external ID for this invoice already exists in our system.\"\n\t\/\/ ErrValPlanExternalIDExists = plan already exists\n\tErrValPlanExternalIDExists = \"A plan with this identifier already exists in our system.\"\n\t\/\/ ErrValHasAlreadyBeenTaken = data source name taken\n\tErrValHasAlreadyBeenTaken = \"Has already been taken.\"\n)\n\nvar (\n\turl = \"https:\/\/api.chartmogul.com\/v1\/%v\"\n\ttimeout = 30 * time.Second\n)\n\n\/\/ IApi defines the interface of the library.\n\/\/ Necessary eg. for mocks in testing.\ntype IApi interface {\n\tPing() (res bool, err error)\n\t\/\/ Data sources\n\tCreateDataSource(name string) (*DataSource, error)\n\tRetrieveDataSource(dataSourceUUID string) (*DataSource, error)\n\tListDataSources() (*DataSources, error)\n\tDeleteDataSource(dataSourceUUID string) error\n\t\/\/ Invoices\n\tCreateInvoices(invoices []*Invoice, customerUUID string) (*Invoices, error)\n\tListInvoices(cursor *Cursor, customerUUID string) (*Invoices, error)\n\t\/\/ Plans\n\tCreatePlan(plan *Plan) (result *Plan, err error)\n\tRetrievePlan(planUUID string) (*Plan, error)\n\tListPlans(listPlansParams *ListPlansParams) (*Plans, error)\n\tUpdatePlan(plan *Plan, planUUID string) (*Plan, error)\n\tDeletePlan(planUUID string) error\n\t\/\/ Subscriptions\n\tCancelSubscription(subscriptionUUID string, cancelSubscriptionParams *CancelSubscriptionParams) (*Subscription, error)\n\tListSubscriptions(cursor *Cursor, customerUUID string) (*Subscriptions, error)\n\t\/\/ Transactions\n\tCreateTransaction(transaction *Transaction, invoiceUUID string) (*Transaction, error)\n\n\t\/\/ Customers\n\tCreateCustomer(newCustomer *NewCustomer) (*Customer, error)\n\tRetrieveCustomer(customerUUID string) (*Customer, error)\n\tUpdateCustomer(Customer *Customer, customerUUID string) (*Customer, error)\n\tListCustomers(ListCustomersParams *ListCustomersParams) (*Customers, error)\n\tSearchCustomers(SearchCustomersParams *SearchCustomersParams) (*Customers, error)\n\tMergeCustomers(MergeCustomersParams *MergeCustomersParams) error\n\tDeleteCustomer(customerUUID string) error\n\n\t\/\/ - Cusomer Attributes\n\tRetrieveCustomersAttributes(customerUUID string) (*Attributes, error)\n\n\t\/\/ Tags\n\tAddTagsToCustomer(customerUUID string, tags []string) (*TagsResult, error)\n\tAddTagsToCustomersWithEmail(email string, tags []string) (*Customers, error)\n\tRemoveTagsFromCustomer(customerUUID string, tags []string) (*TagsResult, error)\n\n\t\/\/ Custom Attributes\n\tAddCustomAttributesToCustomer(customerUUID string, customAttributes []*CustomAttribute) (*CustomAttributes, error)\n\tAddCustomAttributesWithEmail(email string, customAttributes []*CustomAttribute) (*Customers, error)\n\tUpdateCustomAttributesOfCustomer(customerUUID string, customAttributes map[string]interface{}) (*CustomAttributes, error)\n\tRemoveCustomAttributes(customerUUID string, customAttributes []string) (*CustomAttributes, error)\n\n\t\/\/ Metrics\n\tMetricsRetrieveAll(metricsFilter *MetricsFilter) (*MetricsResult, error)\n\tMetricsRetrieveMRR(metricsFilter *MetricsFilter) (*MRRResult, error)\n\tMetricsRetrieveARR(metricsFilter *MetricsFilter) (*ARRResult, error)\n\tMetricsRetrieveARPA(metricsFilter *MetricsFilter) (*ARPAResult, error)\n\tMetricsRetrieveASP(metricsFilter *MetricsFilter) (*ASPResult, error)\n\tMetricsRetrieveCustomerCount(metricsFilter *MetricsFilter) (*CustomerCountResult, error)\n\tMetricsRetrieveCustomerChurnRate(metricsFilter *MetricsFilter) (*CustomerChurnRateResult, error)\n\tMetricsRetrieveMRRChurnRate(metricsFilter *MetricsFilter) (*MRRChurnRateResult, error)\n\tMetricsRetrieveLTV(metricsFilter *MetricsFilter) (*LTVResult, error)\n\n\t\/\/ Metrics - Subscriptions & Activities\n\tMetricsListSubscriptions(cursor *Cursor, customerUUID string) (*MetricsSubscriptions, error)\n\tMetricsListActivities(cursor *Cursor, customerUUID string) (*MetricsActivities, error)\n}\n\n\/\/ API is the handle for communicating with Chartmogul.\ntype API struct {\n\tAccountToken string\n\tAccessKey string\n}\n\n\/\/ Cursor contains query parameters for paging in CM.\n\/\/ Attributes for query must be string, because gorequest library cannot convert anything else.\ntype Cursor struct {\n\tPage uint32 `json:\"page,omitempty\"`\n\tPerPage uint32 `json:\"per_page,omitempty\"`\n}\n\n\/\/ Errors contains error feedback from ChartMogul\ntype Errors map[string]string\n\nfunc (e Errors) Error() string {\n\treturn fmt.Sprintf(\"chartmogul: %v\", map[string]string(e))\n}\n\n\/\/ IsAlreadyExists is helper that returns true, if there's only one error\n\/\/ and it means the uploaded resource of the same external_id already exists.\nfunc (e Errors) IsAlreadyExists() (is bool) {\n\tif e == nil {\n\t\treturn\n\t}\n\tif len(e) != 1 {\n\t\treturn\n\t}\n\tmsg, ok := e[ErrKeyExternalID]\n\tif !ok {\n\t\tmsg, ok = e[ErrKeyTransactionExternalID]\n\t}\n\tif !ok {\n\t\tmsg, ok = e[ErrKeyName]\n\t\treturn ok && msg == ErrValHasAlreadyBeenTaken\n\t}\n\treturn msg == ErrValExternalIDExists ||\n\t\tmsg == ErrValCustomerExternalIDExists ||\n\t\tmsg == ErrValPlanExternalIDExists ||\n\t\tmsg == ErrValInvoiceExternalIDExists\n}\n\n\/\/ IsInvoiceAndTransactionAlreadyExist occurs when both invoice and tx exist already.\nfunc (e Errors) IsInvoiceAndTransactionAlreadyExist() (is bool) {\n\tif e == nil {\n\t\treturn\n\t}\n\tif len(e) == 2 {\n\t\treturn\n\t}\n\tmsg1, ok1 := e[ErrKeyExternalID]\n\tmsg2, ok2 := e[ErrKeyTransactionExternalID]\n\treturn ok1 && ok2 &&\n\t\tmsg1 == ErrValInvoiceExternalIDExists && msg2 == ErrValExternalIDExists\n}\n\n\/\/ Setup configures global timeout for the library.\nfunc Setup(timeoutConf time.Duration) {\n\ttimeout = timeoutConf\n}\n\n\/\/ SetURL changes target URL for the module globally.\nfunc SetURL(specialURL string) {\n\turl = specialURL\n}\n\nfunc prepareURL(path string) string {\n\treturn fmt.Sprintf(url, path)\n}\n\nfunc (api API) req(req *gorequest.SuperAgent) *gorequest.SuperAgent {\n\t\/\/ defaults for client go here:\n\treturn req.Timeout(timeout).\n\t\tSetBasicAuth(api.AccountToken, api.AccessKey).\n\t\tSet(\"Content-Type\", \"application\/json\")\n}\n<|endoftext|>"} {"text":"Add ID to DatasourcePermission<|endoftext|>"} {"text":"package testutil\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"v.io\/tools\/lib\/collect\"\n\t\"v.io\/tools\/lib\/util\"\n)\n\nvar (\n\tjenkinsHost = \"http:\/\/veyron-jenkins:8001\/jenkins\"\n\t\/\/ The token below belongs to jingjin@google.com.\n\tjenkinsToken = \"0e67bfe70302a528807d3594730c9d8b\"\n\tnetrcFile = filepath.Join(os.Getenv(\"HOME\"), \".netrc\")\n)\n\nconst (\n\tdummyTestResult = `\n\n\n \n \n <\/testcase>\n <\/testsuite>\n<\/testsuites>\n`\n)\n\n\/\/ findTestResultFiles returns a slice of paths to presubmit test\n\/\/ results.\nfunc findTestResultFiles(ctx *util.Context) ([]string, error) {\n\tresult := []string{}\n\troot, err := util.VanadiumRoot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Collect javascript test results.\n\tjsDir := filepath.Join(root, \"release\/javascript\/core\", \"test_out\")\n\tif _, err := os.Stat(jsDir); err == nil {\n\t\tfileInfoList, err := ioutil.ReadDir(jsDir)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ReadDir(%v) failed: %v\", jsDir, err)\n\t\t}\n\t\tfor _, fileInfo := range fileInfoList {\n\t\t\tname := fileInfo.Name()\n\t\t\tif strings.HasSuffix(name, \"_integration.out\") || strings.HasSuffix(name, \"_spec.out\") {\n\t\t\t\tresult = append(result, filepath.Join(jsDir, name))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Collect non-javascript test results.\n\tworkspaceDir := os.Getenv(\"WORKSPACE\")\n\tfileInfoList, err := ioutil.ReadDir(workspaceDir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ReadDir(%v) failed: %v\", workspaceDir, err)\n\t}\n\tfor _, fileInfo := range fileInfoList {\n\t\tfileName := fileInfo.Name()\n\t\tif strings.HasPrefix(fileName, \"tests_\") && strings.HasSuffix(fileName, \".xml\") ||\n\t\t\tstrings.HasPrefix(fileName, \"status_\") && strings.HasSuffix(fileName, \".json\") {\n\t\t\tresult = append(result, filepath.Join(workspaceDir, fileName))\n\t\t}\n\t}\n\treturn result, nil\n}\n\n\/\/ requireEnv makes sure that the given environment variables are set.\nfunc requireEnv(names []string) error {\n\tfor _, name := range names {\n\t\tif os.Getenv(name) == \"\" {\n\t\t\treturn fmt.Errorf(\"environment variable %q is not set\", name)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ vanadiumPresubmitPoll polls vanadium projects for new patchsets for\n\/\/ which to run presubmit tests.\nfunc vanadiumPresubmitPoll(ctx *util.Context, testName string) (_ *TestResult, e error) {\n\troot, err := util.VanadiumRoot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Initialize the test.\n\tcleanup, result, err := initTest(ctx, testName, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if result != nil {\n\t\treturn result, nil\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\t\/\/ Use the \"presubmit query\" command to poll for new changes.\n\tlogfile := filepath.Join(root, \".presubmit_log\")\n\targs := []string{}\n\tif ctx.Verbose() {\n\t\targs = append(args, \"-v\")\n\t}\n\targs = append(args, \"-host\", jenkinsHost, \"-token\", jenkinsToken, \"-netrc\", netrcFile, \"query\", \"-log_file\", logfile)\n\tif err := ctx.Run().Command(\"presubmit\", args...); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &TestResult{Status: TestPassed}, nil\n}\n\n\/\/ vanadiumPresubmitTest runs presubmit tests for vanadium projects.\nfunc vanadiumPresubmitTest(ctx *util.Context, testName string) (_ *TestResult, e error) {\n\tif err := requireEnv([]string{\"BUILD_NUMBER\", \"REFS\", \"REPOS\", \"WORKSPACE\"}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Initialize the test.\n\tcleanup, result, err := initTest(ctx, testName, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if result != nil {\n\t\treturn result, nil\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\t\/\/ Cleanup the test results possibly left behind by the\n\t\/\/ previous presubmit test.\n\ttestResultFiles, err := findTestResultFiles(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, file := range testResultFiles {\n\t\tif err := ctx.Run().RemoveAll(file); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Use the \"presubmit test\" command to run the presubmit test.\n\targs := []string{}\n\tif ctx.Verbose() {\n\t\targs = append(args, \"-v\")\n\t}\n\targs = append(args,\n\t\t\"-host\", jenkinsHost,\n\t\t\"-token\", jenkinsToken,\n\t\t\"-netrc\", netrcFile,\n\t\t\"test\",\n\t\t\"-build_number\", os.Getenv(\"BUILD_NUMBER\"),\n\t\t\"-manifest\", \"default\",\n\t\t\"-repos\", os.Getenv(\"REPOS\"),\n\t\t\"-refs\", os.Getenv(\"REFS\"),\n\t)\n\tif err := ctx.Run().Command(\"presubmit\", args...); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Remove any test result files that are empty.\n\ttestResultFiles, err = findTestResultFiles(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, file := range testResultFiles {\n\t\tif fileInfo, err := os.Stat(file); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tif fileInfo.Size() == 0 {\n\t\t\t\tif err := ctx.Run().RemoveAll(file); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Generate a dummy test results file if the tests we run\n\t\/\/ didn't produce any non-empty files.\n\ttestResultFiles, err = findTestResultFiles(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(testResultFiles) == 0 {\n\t\tworkspaceDir := os.Getenv(\"WORKSPACE\")\n\t\tdummyFile, perm := filepath.Join(workspaceDir, \"tests_dummy.xml\"), os.FileMode(0644)\n\t\tif err := ctx.Run().WriteFile(dummyFile, []byte(dummyTestResult), perm); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"WriteFile(%v) failed: %v\", dummyFile, err)\n\t\t}\n\t}\n\n\treturn &TestResult{Status: TestPassed}, nil\n}\n\n\/\/ vanadiumPresubmitTestNew runs presubmit tests for a given project specified\n\/\/ in TEST environment variable.\n\/\/ TODO(jingjin): replace \"vanadiumPresubmitTest\" function with this one after\n\/\/ the transition is done.\nfunc vanadiumPresubmitTestNew(ctx *util.Context, testName string) (_ *TestResult, e error) {\n\tif err := requireEnv([]string{\"BUILD_NUMBER\", \"REFS\", \"REPOS\", \"TEST\", \"WORKSPACE\"}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Initialize the test.\n\tcleanup, result, err := initTest(ctx, testName, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if result != nil {\n\t\treturn result, nil\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\t\/\/ Cleanup the test results possibly left behind by the\n\t\/\/ previous presubmit test.\n\ttestResultFiles, err := findTestResultFiles(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, file := range testResultFiles {\n\t\tif err := ctx.Run().RemoveAll(file); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Use the \"presubmit test\" command to run the presubmit test.\n\targs := []string{}\n\tif ctx.Verbose() {\n\t\targs = append(args, \"-v\")\n\t}\n\targs = append(args,\n\t\t\"-host\", jenkinsHost,\n\t\t\"-token\", jenkinsToken,\n\t\t\"-netrc\", netrcFile,\n\t\t\"-project\", \"vanadium-presubmit-test-new\",\n\t\t\"test\",\n\t\t\"-build_number\", os.Getenv(\"BUILD_NUMBER\"),\n\t\t\"-manifest\", \"default\",\n\t\t\"-repos\", os.Getenv(\"REPOS\"),\n\t\t\"-refs\", os.Getenv(\"REFS\"),\n\t\t\"-test\", os.Getenv(\"TEST\"),\n\t)\n\tif err := ctx.Run().Command(\"presubmit\", args...); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Remove any test result files that are empty.\n\ttestResultFiles, err = findTestResultFiles(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, file := range testResultFiles {\n\t\tfileInfo, err := os.Stat(file)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif fileInfo.Size() == 0 {\n\t\t\tif err := ctx.Run().RemoveAll(file); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Generate a dummy test results file if the tests we run\n\t\/\/ didn't produce any non-empty files.\n\ttestResultFiles, err = findTestResultFiles(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(testResultFiles) == 0 {\n\t\tworkspaceDir := os.Getenv(\"WORKSPACE\")\n\t\tdummyFile, perm := filepath.Join(workspaceDir, \"tests_dummy.xml\"), os.FileMode(0644)\n\t\tif err := ctx.Run().WriteFile(dummyFile, []byte(dummyTestResult), perm); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"WriteFile(%v) failed: %v\", dummyFile, err)\n\t\t}\n\t}\n\n\treturn &TestResult{Status: TestPassed}, nil\n}\n\n\/\/ vanadiumPresubmitResult runs \"presubmit result\" command to process and post test resutls.\nfunc vanadiumPresubmitResult(ctx *util.Context, testName string) (_ *TestResult, e error) {\n\tif err := requireEnv([]string{\"BUILD_NUMBER\", \"REFS\", \"REPOS\", \"WORKSPACE\"}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Initialize the test.\n\tcleanup, result, err := initTest(ctx, testName, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if result != nil {\n\t\treturn result, nil\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\t\/\/ Run \"presubmit result\".\n\targs := []string{}\n\tif ctx.Verbose() {\n\t\targs = append(args, \"-v\")\n\t}\n\targs = append(args,\n\t\t\"-host\", jenkinsHost,\n\t\t\"-token\", jenkinsToken,\n\t\t\"-netrc\", netrcFile,\n\t\t\"-project\", \"vanadium-presubmit-test-new\",\n\t\t\"result\",\n\t\t\"-build_number\", os.Getenv(\"BUILD_NUMBER\"),\n\t\t\"-refs\", os.Getenv(\"REFS\"),\n\t\t\"-repos\", os.Getenv(\"REPOS\"),\n\t)\n\tif err := ctx.Run().Command(\"presubmit\", args...); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &TestResult{Status: TestPassed}, nil\n}\nTBR: lib\/testutil\/presubmit: generate dummy xUnit report to work with new presubmit test.package testutil\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"v.io\/tools\/lib\/collect\"\n\t\"v.io\/tools\/lib\/util\"\n)\n\nvar (\n\tjenkinsHost = \"http:\/\/veyron-jenkins:8001\/jenkins\"\n\t\/\/ The token below belongs to jingjin@google.com.\n\tjenkinsToken = \"0e67bfe70302a528807d3594730c9d8b\"\n\tnetrcFile = filepath.Join(os.Getenv(\"HOME\"), \".netrc\")\n)\n\nconst (\n\tdummyTestResult = `\n\n\n \n \n <\/testcase>\n <\/testsuite>\n<\/testsuites>\n`\n)\n\n\/\/ findTestResultFiles returns a slice of paths to presubmit test\n\/\/ results.\nfunc findTestResultFiles(ctx *util.Context) ([]string, error) {\n\tresult := []string{}\n\troot, err := util.VanadiumRoot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Collect javascript test results.\n\tjsDir := filepath.Join(root, \"release\/javascript\/core\", \"test_out\")\n\tif _, err := os.Stat(jsDir); err == nil {\n\t\tfileInfoList, err := ioutil.ReadDir(jsDir)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ReadDir(%v) failed: %v\", jsDir, err)\n\t\t}\n\t\tfor _, fileInfo := range fileInfoList {\n\t\t\tname := fileInfo.Name()\n\t\t\tif strings.HasSuffix(name, \"_integration.out\") || strings.HasSuffix(name, \"_spec.out\") {\n\t\t\t\tresult = append(result, filepath.Join(jsDir, name))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Collect non-javascript test results.\n\tworkspaceDir := os.Getenv(\"WORKSPACE\")\n\tfileInfoList, err := ioutil.ReadDir(workspaceDir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ReadDir(%v) failed: %v\", workspaceDir, err)\n\t}\n\tfor _, fileInfo := range fileInfoList {\n\t\tfileName := fileInfo.Name()\n\t\tif strings.HasPrefix(fileName, \"tests_\") && strings.HasSuffix(fileName, \".xml\") ||\n\t\t\tstrings.HasPrefix(fileName, \"status_\") && strings.HasSuffix(fileName, \".json\") {\n\t\t\tresult = append(result, filepath.Join(workspaceDir, fileName))\n\t\t}\n\t}\n\treturn result, nil\n}\n\n\/\/ requireEnv makes sure that the given environment variables are set.\nfunc requireEnv(names []string) error {\n\tfor _, name := range names {\n\t\tif os.Getenv(name) == \"\" {\n\t\t\treturn fmt.Errorf(\"environment variable %q is not set\", name)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ vanadiumPresubmitPoll polls vanadium projects for new patchsets for\n\/\/ which to run presubmit tests.\nfunc vanadiumPresubmitPoll(ctx *util.Context, testName string) (_ *TestResult, e error) {\n\troot, err := util.VanadiumRoot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Initialize the test.\n\tcleanup, result, err := initTest(ctx, testName, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if result != nil {\n\t\treturn result, nil\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\t\/\/ Use the \"presubmit query\" command to poll for new changes.\n\tlogfile := filepath.Join(root, \".presubmit_log\")\n\targs := []string{}\n\tif ctx.Verbose() {\n\t\targs = append(args, \"-v\")\n\t}\n\targs = append(args, \"-host\", jenkinsHost, \"-token\", jenkinsToken, \"-netrc\", netrcFile, \"query\", \"-log_file\", logfile)\n\tif err := ctx.Run().Command(\"presubmit\", args...); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &TestResult{Status: TestPassed}, nil\n}\n\n\/\/ vanadiumPresubmitTest runs presubmit tests for vanadium projects.\nfunc vanadiumPresubmitTest(ctx *util.Context, testName string) (_ *TestResult, e error) {\n\tif err := requireEnv([]string{\"BUILD_NUMBER\", \"REFS\", \"REPOS\", \"WORKSPACE\"}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Initialize the test.\n\tcleanup, result, err := initTest(ctx, testName, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if result != nil {\n\t\treturn result, nil\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\t\/\/ Cleanup the test results possibly left behind by the\n\t\/\/ previous presubmit test.\n\ttestResultFiles, err := findTestResultFiles(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, file := range testResultFiles {\n\t\tif err := ctx.Run().RemoveAll(file); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Use the \"presubmit test\" command to run the presubmit test.\n\targs := []string{}\n\tif ctx.Verbose() {\n\t\targs = append(args, \"-v\")\n\t}\n\targs = append(args,\n\t\t\"-host\", jenkinsHost,\n\t\t\"-token\", jenkinsToken,\n\t\t\"-netrc\", netrcFile,\n\t\t\"test\",\n\t\t\"-build_number\", os.Getenv(\"BUILD_NUMBER\"),\n\t\t\"-manifest\", \"default\",\n\t\t\"-repos\", os.Getenv(\"REPOS\"),\n\t\t\"-refs\", os.Getenv(\"REFS\"),\n\t)\n\tif err := ctx.Run().Command(\"presubmit\", args...); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Remove any test result files that are empty.\n\ttestResultFiles, err = findTestResultFiles(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, file := range testResultFiles {\n\t\tif fileInfo, err := os.Stat(file); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tif fileInfo.Size() == 0 {\n\t\t\t\tif err := ctx.Run().RemoveAll(file); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Generate a dummy test results file if the tests we run\n\t\/\/ didn't produce any non-empty files.\n\ttestResultFiles, err = findTestResultFiles(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(testResultFiles) == 0 {\n\t\tworkspaceDir := os.Getenv(\"WORKSPACE\")\n\t\tdummyFile, perm := filepath.Join(workspaceDir, \"tests_dummy.xml\"), os.FileMode(0644)\n\t\tif err := ctx.Run().WriteFile(dummyFile, []byte(dummyTestResult), perm); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"WriteFile(%v) failed: %v\", dummyFile, err)\n\t\t}\n\t}\n\n\treturn &TestResult{Status: TestPassed}, nil\n}\n\n\/\/ vanadiumPresubmitTestNew runs presubmit tests for a given project specified\n\/\/ in TEST environment variable.\n\/\/ TODO(jingjin): replace \"vanadiumPresubmitTest\" function with this one after\n\/\/ the transition is done.\nfunc vanadiumPresubmitTestNew(ctx *util.Context, testName string) (_ *TestResult, e error) {\n\tif err := requireEnv([]string{\"BUILD_NUMBER\", \"REFS\", \"REPOS\", \"TEST\", \"WORKSPACE\"}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Initialize the test.\n\tcleanup, result, err := initTest(ctx, testName, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if result != nil {\n\t\treturn result, nil\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\t\/\/ Cleanup the test results possibly left behind by the\n\t\/\/ previous presubmit test.\n\ttestResultFiles, err := findTestResultFiles(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, file := range testResultFiles {\n\t\tif err := ctx.Run().RemoveAll(file); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Use the \"presubmit test\" command to run the presubmit test.\n\targs := []string{}\n\tif ctx.Verbose() {\n\t\targs = append(args, \"-v\")\n\t}\n\targs = append(args,\n\t\t\"-host\", jenkinsHost,\n\t\t\"-token\", jenkinsToken,\n\t\t\"-netrc\", netrcFile,\n\t\t\"-project\", \"vanadium-presubmit-test-new\",\n\t\t\"test\",\n\t\t\"-build_number\", os.Getenv(\"BUILD_NUMBER\"),\n\t\t\"-manifest\", \"default\",\n\t\t\"-repos\", os.Getenv(\"REPOS\"),\n\t\t\"-refs\", os.Getenv(\"REFS\"),\n\t\t\"-test\", os.Getenv(\"TEST\"),\n\t)\n\tif err := ctx.Run().Command(\"presubmit\", args...); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Remove any test result files that are empty.\n\ttestResultFiles, err = findTestResultFiles(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, file := range testResultFiles {\n\t\tfileInfo, err := os.Stat(file)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif fileInfo.Size() == 0 {\n\t\t\tif err := ctx.Run().RemoveAll(file); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Generate a dummy test results file if the tests we run\n\t\/\/ didn't produce any non-empty files.\n\ttestResultFiles, err = findTestResultFiles(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thasXUnitReport := false\n\tfor _, file := range testResultFiles {\n\t\tif strings.HasSuffix(file, \".xml\") {\n\t\t\thasXUnitReport = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !hasXUnitReport {\n\t\tworkspaceDir := os.Getenv(\"WORKSPACE\")\n\t\tdummyFile, perm := filepath.Join(workspaceDir, \"tests_dummy.xml\"), os.FileMode(0644)\n\t\tif err := ctx.Run().WriteFile(dummyFile, []byte(dummyTestResult), perm); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"WriteFile(%v) failed: %v\", dummyFile, err)\n\t\t}\n\t}\n\n\treturn &TestResult{Status: TestPassed}, nil\n}\n\n\/\/ vanadiumPresubmitResult runs \"presubmit result\" command to process and post test resutls.\nfunc vanadiumPresubmitResult(ctx *util.Context, testName string) (_ *TestResult, e error) {\n\tif err := requireEnv([]string{\"BUILD_NUMBER\", \"REFS\", \"REPOS\", \"WORKSPACE\"}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Initialize the test.\n\tcleanup, result, err := initTest(ctx, testName, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if result != nil {\n\t\treturn result, nil\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\t\/\/ Run \"presubmit result\".\n\targs := []string{}\n\tif ctx.Verbose() {\n\t\targs = append(args, \"-v\")\n\t}\n\targs = append(args,\n\t\t\"-host\", jenkinsHost,\n\t\t\"-token\", jenkinsToken,\n\t\t\"-netrc\", netrcFile,\n\t\t\"-project\", \"vanadium-presubmit-test-new\",\n\t\t\"result\",\n\t\t\"-build_number\", os.Getenv(\"BUILD_NUMBER\"),\n\t\t\"-refs\", os.Getenv(\"REFS\"),\n\t\t\"-repos\", os.Getenv(\"REPOS\"),\n\t)\n\tif err := ctx.Run().Command(\"presubmit\", args...); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &TestResult{Status: TestPassed}, nil\n}\n<|endoftext|>"} {"text":"package gin\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strings\"\n)\n\ntype Builder interface {\n\tBuild() error\n\tBinary() string\n\tErrors() string\n}\n\ntype builder struct {\n\tdir string\n\tbinary string\n\terrors string\n\tuseGodep bool\n}\n\nfunc NewBuilder(dir string, bin string, useGodep bool) Builder {\n\tif len(bin) == 0 {\n\t\tbin = \"bin\"\n\t}\n\n\t\/\/ does not work on Windows without the \".exe\" extension\n\tif runtime.GOOS == \"windows\" {\n\t\tif !strings.HasSuffix(bin, \".exe\") { \/\/ check if it already has the .exe extension\n\t\t\tbin += \".exe\"\n\t\t}\n\t}\n\n\treturn &builder{dir: dir, binary: bin, useGodep: useGodep}\n}\n\nfunc (b *builder) Binary() string {\n\treturn b.binary\n}\n\nfunc (b *builder) Errors() string {\n\treturn b.errors\n}\n\nfunc (b *builder) Build() error {\n\tvar command *exec.Cmd\n\tif b.useGodep {\n\t\tcommand = exec.Command(\"godep\", \"go\", \"build\", \"-o\", b.binary, \"github.com\/eave\/eave-go\")\n\t} else {\n\t\tcommand = exec.Command(\"go\", \"build\", \"-o\", b.binary, \"github.com\/eave\/eave-go\")\n\t}\n\tcommand.Dir = b.dir\n\n\toutput, err := command.CombinedOutput()\n\n\tif command.ProcessState.Success() {\n\t\tb.errors = \"\"\n\t} else {\n\t\tb.errors = string(output)\n\t}\n\n\tif len(b.errors) > 0 {\n\t\treturn fmt.Errorf(b.errors)\n\t}\n\n\treturn err\n}\nUse correct packagepackage gin\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strings\"\n)\n\ntype Builder interface {\n\tBuild() error\n\tBinary() string\n\tErrors() string\n}\n\ntype builder struct {\n\tdir string\n\tbinary string\n\terrors string\n\tuseGodep bool\n}\n\nfunc NewBuilder(dir string, bin string, useGodep bool) Builder {\n\tif len(bin) == 0 {\n\t\tbin = \"bin\"\n\t}\n\n\t\/\/ does not work on Windows without the \".exe\" extension\n\tif runtime.GOOS == \"windows\" {\n\t\tif !strings.HasSuffix(bin, \".exe\") { \/\/ check if it already has the .exe extension\n\t\t\tbin += \".exe\"\n\t\t}\n\t}\n\n\treturn &builder{dir: dir, binary: bin, useGodep: useGodep}\n}\n\nfunc (b *builder) Binary() string {\n\treturn b.binary\n}\n\nfunc (b *builder) Errors() string {\n\treturn b.errors\n}\n\nfunc (b *builder) Build() error {\n\tvar command *exec.Cmd\n\tif b.useGodep {\n\t\tcommand = exec.Command(\"godep\", \"go\", \"build\", \"-o\", b.binary, \"github.com\/helloeave\/eave-go\")\n\t} else {\n\t\tcommand = exec.Command(\"go\", \"build\", \"-o\", b.binary, \"github.com\/helloeave\/eave-go\")\n\t}\n\tcommand.Dir = b.dir\n\n\toutput, err := command.CombinedOutput()\n\n\tif command.ProcessState.Success() {\n\t\tb.errors = \"\"\n\t} else {\n\t\tb.errors = string(output)\n\t}\n\n\tif len(b.errors) > 0 {\n\t\treturn fmt.Errorf(b.errors)\n\t}\n\n\treturn err\n}\n<|endoftext|>"} {"text":"package coordinator\n\nimport (\n\t\"bytes\"\n\tlog \"code.google.com\/p\/log4go\"\n\t\"configuration\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/goraft\/raft\"\n\t\"github.com\/gorilla\/mux\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"protocol\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tDEFAULT_ROOT_PWD = \"root\"\n)\n\n\/\/ The raftd server is a combination of the Raft server and an HTTP\n\/\/ server which acts as the transport.\ntype RaftServer struct {\n\tname string\n\thost string\n\tport int\n\tpath string\n\trouter *mux.Router\n\traftServer raft.Server\n\thttpServer *http.Server\n\tclusterConfig *ClusterConfiguration\n\tmutex sync.RWMutex\n\tlistener net.Listener\n\tclosing bool\n\tconfig *configuration.Configuration\n}\n\nvar registeredCommands bool\nvar replicateWrite = protocol.Request_REPLICATION_WRITE\nvar replicateDelete = protocol.Request_REPLICATION_DELETE\n\n\/\/ Creates a new server.\nfunc NewRaftServer(config *configuration.Configuration, clusterConfig *ClusterConfiguration) *RaftServer {\n\tif !registeredCommands {\n\t\tregisteredCommands = true\n\t\tfor _, command := range internalRaftCommands {\n\t\t\traft.RegisterCommand(command)\n\t\t}\n\t}\n\n\ts := &RaftServer{\n\t\thost: config.HostnameOrDetect(),\n\t\tport: config.RaftServerPort,\n\t\tpath: config.RaftDir,\n\t\tclusterConfig: clusterConfig,\n\t\trouter: mux.NewRouter(),\n\t\tconfig: config,\n\t}\n\trand.Seed(time.Now().Unix())\n\t\/\/ Read existing name or generate a new one.\n\tif b, err := ioutil.ReadFile(filepath.Join(s.path, \"name\")); err == nil {\n\t\ts.name = string(b)\n\t} else {\n\t\ts.name = fmt.Sprintf(\"%07x\", rand.Int())[0:7]\n\t\tif err = ioutil.WriteFile(filepath.Join(s.path, \"name\"), []byte(s.name), 0644); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\treturn s\n}\n\nfunc (s *RaftServer) leaderConnectString() (string, bool) {\n\tleader := s.raftServer.Leader()\n\tpeers := s.raftServer.Peers()\n\tif peer, ok := peers[leader]; !ok {\n\t\treturn \"\", false\n\t} else {\n\t\treturn peer.ConnectionString, true\n\t}\n}\n\nfunc (s *RaftServer) doOrProxyCommand(command raft.Command, commandType string) (interface{}, error) {\n\tif s.raftServer.State() == raft.Leader {\n\t\tvalue, err := s.raftServer.Do(command)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Cannot run command %#v. %s\", command, err)\n\t\t}\n\t\treturn value, err\n\t} else {\n\t\tif leader, ok := s.leaderConnectString(); !ok {\n\t\t\treturn nil, errors.New(\"Couldn't connect to the cluster leader...\")\n\t\t} else {\n\t\t\tvar b bytes.Buffer\n\t\t\tjson.NewEncoder(&b).Encode(command)\n\t\t\tresp, err := http.Post(leader+\"\/process_command\/\"+commandType, \"application\/json\", &b)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tbody, err2 := ioutil.ReadAll(resp.Body)\n\n\t\t\tif resp.StatusCode != 200 {\n\t\t\t\treturn nil, errors.New(strings.TrimSpace(string(body)))\n\t\t\t}\n\n\t\t\tvar js interface{}\n\t\t\tjson.Unmarshal(body, &js)\n\t\t\treturn js, err2\n\t\t}\n\t}\n\treturn nil, nil\n}\n\nfunc (s *RaftServer) CreateDatabase(name string, replicationFactor uint8) error {\n\tif replicationFactor == 0 {\n\t\treplicationFactor = 1\n\t}\n\tcommand := NewCreateDatabaseCommand(name, replicationFactor)\n\t_, err := s.doOrProxyCommand(command, \"create_db\")\n\treturn err\n}\n\nfunc (s *RaftServer) DropDatabase(name string) error {\n\tcommand := NewDropDatabaseCommand(name)\n\t_, err := s.doOrProxyCommand(command, \"drop_db\")\n\treturn err\n}\n\nfunc (s *RaftServer) SaveDbUser(u *dbUser) error {\n\tcommand := NewSaveDbUserCommand(u)\n\t_, err := s.doOrProxyCommand(command, \"save_db_user\")\n\treturn err\n}\n\nfunc (s *RaftServer) ChangeDbUserPassword(db, username string, hash []byte) error {\n\tcommand := NewChangeDbUserPasswordCommand(db, username, string(hash))\n\t_, err := s.doOrProxyCommand(command, \"change_db_user_password\")\n\treturn err\n}\n\nfunc (s *RaftServer) SaveClusterAdminUser(u *clusterAdmin) error {\n\tcommand := NewSaveClusterAdminCommand(u)\n\t_, err := s.doOrProxyCommand(command, \"save_cluster_admin_user\")\n\treturn err\n}\n\nfunc (s *RaftServer) CreateRootUser() error {\n\tu := &clusterAdmin{CommonUser{\"root\", \"\", false}}\n\thash, _ := hashPassword(DEFAULT_ROOT_PWD)\n\tu.changePassword(string(hash))\n\treturn s.SaveClusterAdminUser(u)\n}\n\nfunc (s *RaftServer) ActivateServer(server *ClusterServer) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) AddServer(server *ClusterServer, insertIndex int) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) MovePotentialServer(server *ClusterServer, insertIndex int) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) ReplaceServer(oldServer *ClusterServer, replacement *ClusterServer) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) connectionString() string {\n\treturn fmt.Sprintf(\"http:\/\/%s:%d\", s.host, s.port)\n}\n\nfunc (s *RaftServer) startRaft() error {\n\tlog.Info(\"Initializing Raft Server: %s %d\", s.path, s.port)\n\n\t\/\/ Initialize and start Raft server.\n\ttransporter := raft.NewHTTPTransporter(\"\/raft\")\n\tvar err error\n\ts.raftServer, err = raft.NewServer(s.name, s.path, transporter, nil, s.clusterConfig, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttransporter.Install(s.raftServer, s)\n\ts.raftServer.Start()\n\n\tif !s.raftServer.IsLogEmpty() {\n\t\tlog.Info(\"Recovered from log\")\n\t\treturn nil\n\t}\n\n\tpotentialLeaders := s.config.SeedServers\n\n\tif len(potentialLeaders) == 0 {\n\t\tlog.Info(\"Starting as new Raft leader...\")\n\t\tname := s.raftServer.Name()\n\t\tconnectionString := s.connectionString()\n\t\t_, err := s.raftServer.Do(&InfluxJoinCommand{\n\t\t\tName: name,\n\t\t\tConnectionString: connectionString,\n\t\t\tProtobufConnectionString: s.config.ProtobufConnectionString(),\n\t\t})\n\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\n\t\tcommand := NewAddPotentialServerCommand(&ClusterServer{\n\t\t\tRaftName: name,\n\t\t\tRaftConnectionString: connectionString,\n\t\t\tProtobufConnectionString: s.config.ProtobufConnectionString(),\n\t\t})\n\t\t_, err = s.doOrProxyCommand(command, \"add_server\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = s.CreateRootUser()\n\t\treturn err\n\t}\n\n\tfor {\n\t\tfor _, leader := range potentialLeaders {\n\t\t\tlog.Info(\"(raft:%s) Attempting to join leader: %s\", s.raftServer.Name(), leader)\n\n\t\t\tif err := s.Join(leader); err == nil {\n\t\t\t\tlog.Info(\"Joined: %s\", leader)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tlog.Warn(\"Couldn't join any of the seeds, sleeping and retrying...\")\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\tcontinue\n\t}\n\treturn nil\n}\n\nfunc (s *RaftServer) ListenAndServe() error {\n\tl, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", s.port))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn s.Serve(l)\n}\n\nfunc (s *RaftServer) Serve(l net.Listener) error {\n\ts.port = l.Addr().(*net.TCPAddr).Port\n\ts.listener = l\n\n\tlog.Info(\"Initializing Raft HTTP server\")\n\n\t\/\/ Initialize and start HTTP server.\n\ts.httpServer = &http.Server{\n\t\tHandler: s.router,\n\t}\n\n\ts.router.HandleFunc(\"\/cluster_config\", s.configHandler).Methods(\"GET\")\n\ts.router.HandleFunc(\"\/join\", s.joinHandler).Methods(\"POST\")\n\ts.router.HandleFunc(\"\/process_command\/{command_type}\", s.processCommandHandler).Methods(\"POST\")\n\n\tlog.Info(\"Raft Server Listening at %s\", s.connectionString())\n\n\tgo func() {\n\t\ts.httpServer.Serve(l)\n\t}()\n\tstarted := make(chan error)\n\tgo func() {\n\t\tstarted <- s.startRaft()\n\t}()\n\terr := <-started\n\t\/\/\ttime.Sleep(3 * time.Second)\n\treturn err\n}\n\nfunc (self *RaftServer) Close() {\n\tif !self.closing || self.raftServer == nil {\n\t\tself.closing = true\n\t\tself.raftServer.Stop()\n\t\tself.listener.Close()\n\t}\n}\n\n\/\/ This is a hack around Gorilla mux not providing the correct net\/http\n\/\/ HandleFunc() interface.\nfunc (s *RaftServer) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\ts.router.HandleFunc(pattern, handler)\n}\n\n\/\/ Joins to the leader of an existing cluster.\nfunc (s *RaftServer) Join(leader string) error {\n\tcommand := &InfluxJoinCommand{\n\t\tName: s.raftServer.Name(),\n\t\tConnectionString: s.connectionString(),\n\t\tProtobufConnectionString: s.config.ProtobufConnectionString(),\n\t}\n\tconnectUrl := leader\n\tif !strings.HasPrefix(connectUrl, \"http:\/\/\") {\n\t\tconnectUrl = \"http:\/\/\" + connectUrl\n\t}\n\tif !strings.HasSuffix(connectUrl, \"\/join\") {\n\t\tconnectUrl = connectUrl + \"\/join\"\n\t}\n\n\tvar b bytes.Buffer\n\tjson.NewEncoder(&b).Encode(command)\n\tresp, err := http.Post(connectUrl, \"application\/json\", &b)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusTemporaryRedirect {\n\t\taddress := resp.Header.Get(\"Location\")\n\t\tlog.Debug(\"Redirected to %s to join leader\\n\", address)\n\t\treturn s.Join(address)\n\t}\n\n\treturn nil\n}\n\nfunc (s *RaftServer) retryCommand(command raft.Command, retries int) (ret interface{}, err error) {\n\tfor retries = retries; retries > 0; retries-- {\n\t\tret, err = s.raftServer.Do(command)\n\t\tif err == nil {\n\t\t\treturn ret, nil\n\t\t}\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\tfmt.Println(\"Retrying RAFT command...\")\n\t}\n\treturn\n}\n\nfunc (s *RaftServer) joinHandler(w http.ResponseWriter, req *http.Request) {\n\tif s.raftServer.State() == raft.Leader {\n\t\tcommand := &InfluxJoinCommand{}\n\t\tif err := json.NewDecoder(req.Body).Decode(&command); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\t\/\/ during the test suite the join command will sometimes time out.. just retry a few times\n\t\tif _, err := s.raftServer.Do(command); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tserver := s.clusterConfig.GetServerByRaftName(command.Name)\n\t\t\/\/ it's a new server the cluster has never seen, make it a potential\n\t\tif server == nil {\n\t\t\taddServer := NewAddPotentialServerCommand(&ClusterServer{RaftName: command.Name, RaftConnectionString: command.ConnectionString, ProtobufConnectionString: command.ProtobufConnectionString})\n\t\t\tif _, err := s.raftServer.Do(addServer); err != nil {\n\t\t\t\tlog.Error(\"Error joining raft server: \", err, command)\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif leader, ok := s.leaderConnectString(); ok {\n\t\t\tlog.Debug(\"redirecting to leader to join...\")\n\t\t\thttp.Redirect(w, req, leader+\"\/join\", http.StatusTemporaryRedirect)\n\t\t} else {\n\t\t\thttp.Error(w, errors.New(\"Couldn't find leader of the cluster to join\").Error(), http.StatusInternalServerError)\n\t\t}\n\t}\n}\n\nfunc (s *RaftServer) configHandler(w http.ResponseWriter, req *http.Request) {\n\tjsonObject := make(map[string]interface{})\n\tdbs := make([]string, 0)\n\tfor db, _ := range s.clusterConfig.databaseReplicationFactors {\n\t\tdbs = append(dbs, db)\n\t}\n\tjsonObject[\"databases\"] = dbs\n\tjsonObject[\"cluster_admins\"] = s.clusterConfig.clusterAdmins\n\tjsonObject[\"database_users\"] = s.clusterConfig.dbUsers\n\tjs, err := json.Marshal(jsonObject)\n\tif err != nil {\n\t\tlog.Error(\"ERROR marshalling config: \", err)\n\t}\n\tw.Write(js)\n}\n\nfunc (s *RaftServer) marshalAndDoCommandFromBody(command raft.Command, req *http.Request) (interface{}, error) {\n\tif err := json.NewDecoder(req.Body).Decode(&command); err != nil {\n\t\treturn nil, err\n\t}\n\tif result, err := s.raftServer.Do(command); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn result, nil\n\t}\n}\n\nfunc (s *RaftServer) processCommandHandler(w http.ResponseWriter, req *http.Request) {\n\tvars := mux.Vars(req)\n\tvalue := vars[\"command_type\"]\n\tcommand := internalRaftCommands[value]\n\n\tif result, err := s.marshalAndDoCommandFromBody(command, req); err != nil {\n\t\tlog.Error(\"command %T failed: %s\", command, err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t} else {\n\t\tif result != nil {\n\t\t\tjs, _ := json.Marshal(result)\n\t\t\tw.Write(js)\n\t\t}\n\t}\n}\nremove an unnecessary continuepackage coordinator\n\nimport (\n\t\"bytes\"\n\tlog \"code.google.com\/p\/log4go\"\n\t\"configuration\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/goraft\/raft\"\n\t\"github.com\/gorilla\/mux\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"protocol\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tDEFAULT_ROOT_PWD = \"root\"\n)\n\n\/\/ The raftd server is a combination of the Raft server and an HTTP\n\/\/ server which acts as the transport.\ntype RaftServer struct {\n\tname string\n\thost string\n\tport int\n\tpath string\n\trouter *mux.Router\n\traftServer raft.Server\n\thttpServer *http.Server\n\tclusterConfig *ClusterConfiguration\n\tmutex sync.RWMutex\n\tlistener net.Listener\n\tclosing bool\n\tconfig *configuration.Configuration\n}\n\nvar registeredCommands bool\nvar replicateWrite = protocol.Request_REPLICATION_WRITE\nvar replicateDelete = protocol.Request_REPLICATION_DELETE\n\n\/\/ Creates a new server.\nfunc NewRaftServer(config *configuration.Configuration, clusterConfig *ClusterConfiguration) *RaftServer {\n\tif !registeredCommands {\n\t\tregisteredCommands = true\n\t\tfor _, command := range internalRaftCommands {\n\t\t\traft.RegisterCommand(command)\n\t\t}\n\t}\n\n\ts := &RaftServer{\n\t\thost: config.HostnameOrDetect(),\n\t\tport: config.RaftServerPort,\n\t\tpath: config.RaftDir,\n\t\tclusterConfig: clusterConfig,\n\t\trouter: mux.NewRouter(),\n\t\tconfig: config,\n\t}\n\trand.Seed(time.Now().Unix())\n\t\/\/ Read existing name or generate a new one.\n\tif b, err := ioutil.ReadFile(filepath.Join(s.path, \"name\")); err == nil {\n\t\ts.name = string(b)\n\t} else {\n\t\ts.name = fmt.Sprintf(\"%07x\", rand.Int())[0:7]\n\t\tif err = ioutil.WriteFile(filepath.Join(s.path, \"name\"), []byte(s.name), 0644); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\treturn s\n}\n\nfunc (s *RaftServer) leaderConnectString() (string, bool) {\n\tleader := s.raftServer.Leader()\n\tpeers := s.raftServer.Peers()\n\tif peer, ok := peers[leader]; !ok {\n\t\treturn \"\", false\n\t} else {\n\t\treturn peer.ConnectionString, true\n\t}\n}\n\nfunc (s *RaftServer) doOrProxyCommand(command raft.Command, commandType string) (interface{}, error) {\n\tif s.raftServer.State() == raft.Leader {\n\t\tvalue, err := s.raftServer.Do(command)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Cannot run command %#v. %s\", command, err)\n\t\t}\n\t\treturn value, err\n\t} else {\n\t\tif leader, ok := s.leaderConnectString(); !ok {\n\t\t\treturn nil, errors.New(\"Couldn't connect to the cluster leader...\")\n\t\t} else {\n\t\t\tvar b bytes.Buffer\n\t\t\tjson.NewEncoder(&b).Encode(command)\n\t\t\tresp, err := http.Post(leader+\"\/process_command\/\"+commandType, \"application\/json\", &b)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tbody, err2 := ioutil.ReadAll(resp.Body)\n\n\t\t\tif resp.StatusCode != 200 {\n\t\t\t\treturn nil, errors.New(strings.TrimSpace(string(body)))\n\t\t\t}\n\n\t\t\tvar js interface{}\n\t\t\tjson.Unmarshal(body, &js)\n\t\t\treturn js, err2\n\t\t}\n\t}\n\treturn nil, nil\n}\n\nfunc (s *RaftServer) CreateDatabase(name string, replicationFactor uint8) error {\n\tif replicationFactor == 0 {\n\t\treplicationFactor = 1\n\t}\n\tcommand := NewCreateDatabaseCommand(name, replicationFactor)\n\t_, err := s.doOrProxyCommand(command, \"create_db\")\n\treturn err\n}\n\nfunc (s *RaftServer) DropDatabase(name string) error {\n\tcommand := NewDropDatabaseCommand(name)\n\t_, err := s.doOrProxyCommand(command, \"drop_db\")\n\treturn err\n}\n\nfunc (s *RaftServer) SaveDbUser(u *dbUser) error {\n\tcommand := NewSaveDbUserCommand(u)\n\t_, err := s.doOrProxyCommand(command, \"save_db_user\")\n\treturn err\n}\n\nfunc (s *RaftServer) ChangeDbUserPassword(db, username string, hash []byte) error {\n\tcommand := NewChangeDbUserPasswordCommand(db, username, string(hash))\n\t_, err := s.doOrProxyCommand(command, \"change_db_user_password\")\n\treturn err\n}\n\nfunc (s *RaftServer) SaveClusterAdminUser(u *clusterAdmin) error {\n\tcommand := NewSaveClusterAdminCommand(u)\n\t_, err := s.doOrProxyCommand(command, \"save_cluster_admin_user\")\n\treturn err\n}\n\nfunc (s *RaftServer) CreateRootUser() error {\n\tu := &clusterAdmin{CommonUser{\"root\", \"\", false}}\n\thash, _ := hashPassword(DEFAULT_ROOT_PWD)\n\tu.changePassword(string(hash))\n\treturn s.SaveClusterAdminUser(u)\n}\n\nfunc (s *RaftServer) ActivateServer(server *ClusterServer) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) AddServer(server *ClusterServer, insertIndex int) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) MovePotentialServer(server *ClusterServer, insertIndex int) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) ReplaceServer(oldServer *ClusterServer, replacement *ClusterServer) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) connectionString() string {\n\treturn fmt.Sprintf(\"http:\/\/%s:%d\", s.host, s.port)\n}\n\nfunc (s *RaftServer) startRaft() error {\n\tlog.Info(\"Initializing Raft Server: %s %d\", s.path, s.port)\n\n\t\/\/ Initialize and start Raft server.\n\ttransporter := raft.NewHTTPTransporter(\"\/raft\")\n\tvar err error\n\ts.raftServer, err = raft.NewServer(s.name, s.path, transporter, nil, s.clusterConfig, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttransporter.Install(s.raftServer, s)\n\ts.raftServer.Start()\n\n\tif !s.raftServer.IsLogEmpty() {\n\t\tlog.Info(\"Recovered from log\")\n\t\treturn nil\n\t}\n\n\tpotentialLeaders := s.config.SeedServers\n\n\tif len(potentialLeaders) == 0 {\n\t\tlog.Info(\"Starting as new Raft leader...\")\n\t\tname := s.raftServer.Name()\n\t\tconnectionString := s.connectionString()\n\t\t_, err := s.raftServer.Do(&InfluxJoinCommand{\n\t\t\tName: name,\n\t\t\tConnectionString: connectionString,\n\t\t\tProtobufConnectionString: s.config.ProtobufConnectionString(),\n\t\t})\n\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\n\t\tcommand := NewAddPotentialServerCommand(&ClusterServer{\n\t\t\tRaftName: name,\n\t\t\tRaftConnectionString: connectionString,\n\t\t\tProtobufConnectionString: s.config.ProtobufConnectionString(),\n\t\t})\n\t\t_, err = s.doOrProxyCommand(command, \"add_server\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = s.CreateRootUser()\n\t\treturn err\n\t}\n\n\tfor {\n\t\tfor _, leader := range potentialLeaders {\n\t\t\tlog.Info(\"(raft:%s) Attempting to join leader: %s\", s.raftServer.Name(), leader)\n\n\t\t\tif err := s.Join(leader); err == nil {\n\t\t\t\tlog.Info(\"Joined: %s\", leader)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tlog.Warn(\"Couldn't join any of the seeds, sleeping and retrying...\")\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\treturn nil\n}\n\nfunc (s *RaftServer) ListenAndServe() error {\n\tl, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", s.port))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn s.Serve(l)\n}\n\nfunc (s *RaftServer) Serve(l net.Listener) error {\n\ts.port = l.Addr().(*net.TCPAddr).Port\n\ts.listener = l\n\n\tlog.Info(\"Initializing Raft HTTP server\")\n\n\t\/\/ Initialize and start HTTP server.\n\ts.httpServer = &http.Server{\n\t\tHandler: s.router,\n\t}\n\n\ts.router.HandleFunc(\"\/cluster_config\", s.configHandler).Methods(\"GET\")\n\ts.router.HandleFunc(\"\/join\", s.joinHandler).Methods(\"POST\")\n\ts.router.HandleFunc(\"\/process_command\/{command_type}\", s.processCommandHandler).Methods(\"POST\")\n\n\tlog.Info(\"Raft Server Listening at %s\", s.connectionString())\n\n\tgo func() {\n\t\ts.httpServer.Serve(l)\n\t}()\n\tstarted := make(chan error)\n\tgo func() {\n\t\tstarted <- s.startRaft()\n\t}()\n\terr := <-started\n\t\/\/\ttime.Sleep(3 * time.Second)\n\treturn err\n}\n\nfunc (self *RaftServer) Close() {\n\tif !self.closing || self.raftServer == nil {\n\t\tself.closing = true\n\t\tself.raftServer.Stop()\n\t\tself.listener.Close()\n\t}\n}\n\n\/\/ This is a hack around Gorilla mux not providing the correct net\/http\n\/\/ HandleFunc() interface.\nfunc (s *RaftServer) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\ts.router.HandleFunc(pattern, handler)\n}\n\n\/\/ Joins to the leader of an existing cluster.\nfunc (s *RaftServer) Join(leader string) error {\n\tcommand := &InfluxJoinCommand{\n\t\tName: s.raftServer.Name(),\n\t\tConnectionString: s.connectionString(),\n\t\tProtobufConnectionString: s.config.ProtobufConnectionString(),\n\t}\n\tconnectUrl := leader\n\tif !strings.HasPrefix(connectUrl, \"http:\/\/\") {\n\t\tconnectUrl = \"http:\/\/\" + connectUrl\n\t}\n\tif !strings.HasSuffix(connectUrl, \"\/join\") {\n\t\tconnectUrl = connectUrl + \"\/join\"\n\t}\n\n\tvar b bytes.Buffer\n\tjson.NewEncoder(&b).Encode(command)\n\tresp, err := http.Post(connectUrl, \"application\/json\", &b)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusTemporaryRedirect {\n\t\taddress := resp.Header.Get(\"Location\")\n\t\tlog.Debug(\"Redirected to %s to join leader\\n\", address)\n\t\treturn s.Join(address)\n\t}\n\n\treturn nil\n}\n\nfunc (s *RaftServer) retryCommand(command raft.Command, retries int) (ret interface{}, err error) {\n\tfor retries = retries; retries > 0; retries-- {\n\t\tret, err = s.raftServer.Do(command)\n\t\tif err == nil {\n\t\t\treturn ret, nil\n\t\t}\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\tfmt.Println(\"Retrying RAFT command...\")\n\t}\n\treturn\n}\n\nfunc (s *RaftServer) joinHandler(w http.ResponseWriter, req *http.Request) {\n\tif s.raftServer.State() == raft.Leader {\n\t\tcommand := &InfluxJoinCommand{}\n\t\tif err := json.NewDecoder(req.Body).Decode(&command); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\t\/\/ during the test suite the join command will sometimes time out.. just retry a few times\n\t\tif _, err := s.raftServer.Do(command); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tserver := s.clusterConfig.GetServerByRaftName(command.Name)\n\t\t\/\/ it's a new server the cluster has never seen, make it a potential\n\t\tif server == nil {\n\t\t\taddServer := NewAddPotentialServerCommand(&ClusterServer{RaftName: command.Name, RaftConnectionString: command.ConnectionString, ProtobufConnectionString: command.ProtobufConnectionString})\n\t\t\tif _, err := s.raftServer.Do(addServer); err != nil {\n\t\t\t\tlog.Error(\"Error joining raft server: \", err, command)\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif leader, ok := s.leaderConnectString(); ok {\n\t\t\tlog.Debug(\"redirecting to leader to join...\")\n\t\t\thttp.Redirect(w, req, leader+\"\/join\", http.StatusTemporaryRedirect)\n\t\t} else {\n\t\t\thttp.Error(w, errors.New(\"Couldn't find leader of the cluster to join\").Error(), http.StatusInternalServerError)\n\t\t}\n\t}\n}\n\nfunc (s *RaftServer) configHandler(w http.ResponseWriter, req *http.Request) {\n\tjsonObject := make(map[string]interface{})\n\tdbs := make([]string, 0)\n\tfor db, _ := range s.clusterConfig.databaseReplicationFactors {\n\t\tdbs = append(dbs, db)\n\t}\n\tjsonObject[\"databases\"] = dbs\n\tjsonObject[\"cluster_admins\"] = s.clusterConfig.clusterAdmins\n\tjsonObject[\"database_users\"] = s.clusterConfig.dbUsers\n\tjs, err := json.Marshal(jsonObject)\n\tif err != nil {\n\t\tlog.Error(\"ERROR marshalling config: \", err)\n\t}\n\tw.Write(js)\n}\n\nfunc (s *RaftServer) marshalAndDoCommandFromBody(command raft.Command, req *http.Request) (interface{}, error) {\n\tif err := json.NewDecoder(req.Body).Decode(&command); err != nil {\n\t\treturn nil, err\n\t}\n\tif result, err := s.raftServer.Do(command); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn result, nil\n\t}\n}\n\nfunc (s *RaftServer) processCommandHandler(w http.ResponseWriter, req *http.Request) {\n\tvars := mux.Vars(req)\n\tvalue := vars[\"command_type\"]\n\tcommand := internalRaftCommands[value]\n\n\tif result, err := s.marshalAndDoCommandFromBody(command, req); err != nil {\n\t\tlog.Error(\"command %T failed: %s\", command, err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t} else {\n\t\tif result != nil {\n\t\t\tjs, _ := json.Marshal(result)\n\t\t\tw.Write(js)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package compiler\n\nimport (\n\t\"bytecode\"\n\t\"bytecode\/eval\"\n\t\"bytecode\/ir\"\n\t\"dt\"\n\t\"fmt\"\n\t\"go\/types\"\n\t\"lisp\"\n\t\"sexp\"\n\t\"tu\"\n)\n\n\/\/ These operations have opcode for 2 operands.\n\/\/ For 2+ operands ordinary function call is used.\nvar opNames = [...]lisp.Symbol{\n\tir.OpNumAdd: \"+\",\n\tir.OpNumSub: \"-\",\n\tir.OpNumMul: \"*\",\n\tir.OpNumQuo: \"\/\",\n\tir.OpNumGt: \">\",\n\tir.OpNumLt: \"<\",\n\tir.OpNumEq: \"=\",\n}\n\n\/\/ #FIXME: either make compiler object reusable,\n\/\/ or make it private type and expose Compile as a\n\/\/ free standing function.\n\n\/\/ Compiler converts Sexp forms into bytecode objects.\ntype Compiler struct {\n\tcode *code\n\tconstPool dt.ConstPool\n\tsymPool dt.SymbolPool\n}\n\nfunc New() *Compiler {\n\treturn &Compiler{\n\t\tcode: newCode(),\n\t}\n}\n\nfunc (cl *Compiler) CompileFunc(f *tu.Func) *bytecode.Func {\n\tfor _, param := range f.Params {\n\t\tcl.symPool.Insert(param)\n\t}\n\n\tcl.compileStmtList(f.Body)\n\n\tobject := cl.createObject()\n\teval.Object(&object, len(f.Params))\n\tcl.ensureTrailingReturn()\n\n\treturn &bytecode.Func{\n\t\tObject: object,\n\t\tArgsDesc: argsDescriptor(len(f.Params), f.Variadic),\n\t}\n}\n\n\/\/ Insert trailing \"return\" opcode if its not already there.\nfunc (cl *Compiler) ensureTrailingReturn() {\n\tlastInstr := cl.code.lastInstr()\n\n\tif lastInstr.Op != ir.OpReturn {\n\t\t\/\/ Check is needed to avoid generation of \"dead\" return.\n\t\tif lastInstr.Op != ir.OpPanic {\n\t\t\tcl.emit(ir.Return)\n\t\t}\n\t}\n}\n\nfunc (cl *Compiler) compileStmt(form sexp.Form) {\n\tswitch form := form.(type) {\n\tcase *sexp.Return:\n\t\tcl.compileReturn(form)\n\tcase *sexp.If:\n\t\tcl.compileIf(form)\n\tcase *sexp.Block:\n\t\tcl.compileBlock(form)\n\tcase *sexp.FormList:\n\t\tcl.compileStmtList(form.Forms)\n\tcase *sexp.Bind:\n\t\tcl.compileBind(form)\n\tcase *sexp.Rebind:\n\t\tcl.compileRebind(form)\n\tcase *sexp.MapSet:\n\t\tcl.compileMapSet(form)\n\tcase sexp.ExprStmt:\n\t\tcl.compileExprStmt(form.Form)\n\tcase *sexp.Panic:\n\t\tcl.compilePanic(form.ErrorData)\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unexpected stmt: %#v\\n\", form))\n\t}\n}\n\nfunc (cl *Compiler) compileExpr(form sexp.Form) {\n\tswitch form := form.(type) {\n\tcase *sexp.NumAdd:\n\t\tcl.compileAddSub(ir.OpNumAdd, form.Args)\n\tcase *sexp.NumSub:\n\t\tcl.compileAddSub(ir.OpNumSub, form.Args)\n\tcase *sexp.NumMul:\n\t\tcl.compileOp(ir.OpNumMul, form.Args)\n\tcase *sexp.NumQuo:\n\t\tcl.compileOp(ir.OpNumQuo, form.Args)\n\tcase *sexp.NumGt:\n\t\tcl.compileOp(ir.OpNumGt, form.Args)\n\tcase *sexp.NumLt:\n\t\tcl.compileOp(ir.OpNumLt, form.Args)\n\tcase *sexp.NumEq:\n\t\tcl.compileOp(ir.OpNumEq, form.Args)\n\tcase *sexp.Concat:\n\t\tcl.compileConcat(form)\n\n\tcase sexp.Int:\n\t\tcl.emitConst(cl.constPool.InsertInt(form.Val))\n\tcase sexp.Float:\n\t\tcl.emitConst(cl.constPool.InsertFloat(form.Val))\n\tcase sexp.String:\n\t\tcl.emitConst(cl.constPool.InsertString(form.Val))\n\tcase sexp.Symbol:\n\t\tcl.emitConst(cl.constPool.InsertSym(lisp.Symbol(form.Val)))\n\tcase sexp.Bool:\n\t\tcl.compileBool(form)\n\n\tcase sexp.Var:\n\t\tcl.compileVar(form)\n\n\tcase *sexp.Call:\n\t\tcl.compileCall(lisp.Symbol(form.Fn), form.Args...)\n\n\tcase sexp.MakeMap:\n\t\tcl.compileMakeMap(form)\n\n\tcase *sexp.TypeAssert:\n\t\tcl.compileTypeAssert(form)\n\tcase *sexp.LispTypeAssert:\n\t\tcl.compileLispTypeAssert(form)\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unexpected expr: %#v\\n\", form))\n\t}\n}\n\nfunc (cl *Compiler) compileOp(op ir.Opcode, args []sexp.Form) {\n\tif len(args) == 2 {\n\t\tcl.compileExprList(args)\n\t\tcl.emit(ir.Instr{Op: op})\n\t} else {\n\t\tcl.compileCall(opNames[op], args...)\n\t}\n}\n\nfunc (cl *Compiler) compileCall(fn lisp.Symbol, args ...sexp.Form) {\n\tif !optCall(cl, fn, args) {\n\t\tcl.emitConst(cl.constPool.InsertSym(fn))\n\t\tcl.compileExprList(args)\n\t\tcl.emit(ir.Call(len(args)))\n\t}\n}\n\nfunc (cl *Compiler) compileReturn(form *sexp.Return) {\n\tswitch len(form.Results) {\n\tcase 0:\n\t\tcl.emit(ir.Return)\n\tcase 1:\n\t\tcl.compileExpr(form.Results[0])\n\t\tcl.emit(ir.Return)\n\n\tdefault:\n\t\tpanic(\"unimplemented\") \/\/ #REFS: 1.\n\t}\n}\n\nfunc (cl *Compiler) compileIf(form *sexp.If) {\n\tcl.compileExpr(form.Test)\n\tlabel := cl.emitJmp(ir.OpJmpNil, \"then\")\n\tcl.compileStmtList(form.Then.Forms)\n\tlabel.bind(\"else\")\n\tif form.Else != nil {\n\t\tcl.compileStmt(form.Else)\n\t}\n}\n\nfunc (cl *Compiler) compileBlock(form *sexp.Block) {\n\tcl.compileStmtList(form.Forms)\n\tcl.symPool.Drop(form.Scope.Len())\n\tcl.emit(ir.Instr{Op: ir.OpScopeExit, Data: uint16(form.Scope.Len())})\n}\n\nfunc (cl *Compiler) compileBind(form *sexp.Bind) {\n\tcl.compileExpr(form.Init)\n\tid := cl.symPool.Insert(form.Name)\n\tcl.emit(ir.LocalBind(id))\n}\n\nfunc (cl *Compiler) compileRebind(form *sexp.Rebind) {\n\tcl.compileExpr(form.Expr)\n\tid := cl.symPool.Find(form.Name)\n\tcl.emit(ir.LocalSet(id))\n}\n\nfunc (cl *Compiler) compileMakeMap(form sexp.MakeMap) {\n\tcl.compileCall(\n\t\t\"make-hash-table\",\n\t\tsym(\":size\"), form.SizeHint,\n\t\tsym(\":test\"), sym(\"equal\"),\n\t)\n}\n\nfunc (cl *Compiler) compileMapSet(form *sexp.MapSet) {\n\tcl.compileCall(\"puthash\", form.Key, form.Val, form.Map)\n\tcl.emit(ir.Drop(1)) \/\/ Discard expression result.\n}\n\nfunc (cl *Compiler) compilePanic(errorData sexp.Form) {\n\tcl.emitConst(cl.constPool.InsertSym(\"Go--panic\"))\n\tcl.compileExpr(errorData)\n\tcl.emit(ir.Panic)\n\tcl.code.pushBlock(\"panic\")\n}\n\nfunc (cl *Compiler) compileVar(form sexp.Var) {\n\t\/\/ #FIXME: it could be a global var.\n\tid := cl.symPool.Find(form.Name)\n\tcl.code.pushInstr(ir.LocalRef(id))\n}\n\nfunc (cl *Compiler) compileTypeAssert(form *sexp.TypeAssert) {\n\tpanic(\"unimplemented\")\n}\n\nfunc (cl *Compiler) compileLispTypeAssert(form *sexp.LispTypeAssert) {\n\tif types.Identical(lisp.Types.Bool, form.Type) {\n\t\treturn\n\t}\n\n\tvar checker ir.Instr\n\tvar blamer lisp.Symbol\n\tif types.Identical(lisp.Types.Int, form.Type) {\n\t\tchecker = ir.IsInt\n\t\tblamer = \"Go--!object-int\"\n\t} else if types.Identical(lisp.Types.String, form.Type) {\n\t\tchecker = ir.IsString\n\t\tblamer = \"Go--!object-string\"\n\t} else if types.Identical(lisp.Types.Symbol, form.Type) {\n\t\tchecker = ir.IsSymbol\n\t\tblamer = \"Go--!object-symbol\"\n\t} else {\n\t\tpanic(\"unimplemented\")\n\t}\n\n\tcl.compileExpr(form.Expr) \/\/ Arg to assert.\n\tcl.emit(ir.StackRef(0)) \/\/ Preserve arg (dup).\n\tcl.emit(checker) \/\/ Type check.\n\tlabel := cl.emitJmp(ir.OpJmpNotNil, \"lisp-type-assert-fail\")\n\t{\n\t\tcl.emitConst(cl.constPool.InsertSym(blamer))\n\t\tcl.emit(ir.StackRef(1)) \/\/ Value that failed assertion.\n\t\tcl.emit(ir.NoreturnCall(1))\n\t}\n\tlabel.bind(\"lisp-type-assert-pass\")\n}\n\nfunc (cl *Compiler) compileConcat(form *sexp.Concat) {\n\tcl.compileExprList(form.Args)\n\tcl.emit(ir.Concat(len(form.Args)))\n}\n\nfunc (cl *Compiler) compileAddSub(op ir.Opcode, args []sexp.Form) {\n\tif !optAddSub(cl, op, args) {\n\t\tcl.compileOp(op, args)\n\t}\n}\n\nfunc (cl *Compiler) compileBool(form sexp.Bool) {\n\tif form.Val {\n\t\tcl.emitConst(cl.constPool.InsertSym(\"t\"))\n\t} else {\n\t\tcl.emitConst(cl.constPool.InsertSym(\"nil\"))\n\t}\n}\n\nfunc (cl *Compiler) compileExprStmt(form sexp.Form) {\n\tcl.compileExpr(form)\n\tcl.emit(ir.Drop(1)) \/\/ Discard expression result.\n}\n\nfunc (cl *Compiler) compileInstr(instr ir.Instr, argc int, args []sexp.Form) {\n\tif len(args) != argc {\n\t\t\/\/ #FIXME: need better error handling here.\n\t\tpanic(fmt.Sprintf(\"%s expected %d args, got %d\",\n\t\t\tinstr.Op, argc, len(args)))\n\t}\n\tcl.compileExprList(args)\n\tcl.emit(instr)\n}\n\nfunc (cl *Compiler) compileStmtList(forms []sexp.Form) {\n\tfor _, form := range forms {\n\t\tcl.compileStmt(form)\n\t}\n}\n\nfunc (cl *Compiler) compileExprList(forms []sexp.Form) {\n\tfor _, form := range forms {\n\t\tcl.compileExpr(form)\n\t}\n}\n\nfunc (cl *Compiler) emit(instr ir.Instr) {\n\tcl.code.pushInstr(instr)\n}\n\nfunc (cl *Compiler) emitConst(cpIndex int) {\n\tcl.emit(ir.ConstRef(cpIndex))\n}\n\nfunc (cl *Compiler) emitJmp(op ir.Opcode, branchName string) jmpLabel {\n\tlabel := cl.code.pushJmp(op)\n\tcl.code.pushBlock(branchName)\n\treturn label\n}\n\nfunc (cl *Compiler) pushBlock(name string) {\n\tcl.code.pushBlock(name)\n}\n\nfunc (cl *Compiler) createObject() bytecode.Object {\n\treturn bytecode.Object{\n\t\tBlocks: cl.code.blocks,\n\t\tConstPool: cl.constPool,\n\t\tLocals: cl.symPool.Symbols(),\n\t}\n}\nlisp type assertion for floatspackage compiler\n\nimport (\n\t\"bytecode\"\n\t\"bytecode\/eval\"\n\t\"bytecode\/ir\"\n\t\"dt\"\n\t\"fmt\"\n\t\"go\/types\"\n\t\"lisp\"\n\t\"sexp\"\n\t\"tu\"\n)\n\n\/\/ These operations have opcode for 2 operands.\n\/\/ For 2+ operands ordinary function call is used.\nvar opNames = [...]lisp.Symbol{\n\tir.OpNumAdd: \"+\",\n\tir.OpNumSub: \"-\",\n\tir.OpNumMul: \"*\",\n\tir.OpNumQuo: \"\/\",\n\tir.OpNumGt: \">\",\n\tir.OpNumLt: \"<\",\n\tir.OpNumEq: \"=\",\n}\n\n\/\/ #FIXME: either make compiler object reusable,\n\/\/ or make it private type and expose Compile as a\n\/\/ free standing function.\n\n\/\/ Compiler converts Sexp forms into bytecode objects.\ntype Compiler struct {\n\tcode *code\n\tconstPool dt.ConstPool\n\tsymPool dt.SymbolPool\n}\n\nfunc New() *Compiler {\n\treturn &Compiler{\n\t\tcode: newCode(),\n\t}\n}\n\nfunc (cl *Compiler) CompileFunc(f *tu.Func) *bytecode.Func {\n\tfor _, param := range f.Params {\n\t\tcl.symPool.Insert(param)\n\t}\n\n\tcl.compileStmtList(f.Body)\n\n\tobject := cl.createObject()\n\teval.Object(&object, len(f.Params))\n\tcl.ensureTrailingReturn()\n\n\treturn &bytecode.Func{\n\t\tObject: object,\n\t\tArgsDesc: argsDescriptor(len(f.Params), f.Variadic),\n\t}\n}\n\n\/\/ Insert trailing \"return\" opcode if its not already there.\nfunc (cl *Compiler) ensureTrailingReturn() {\n\tlastInstr := cl.code.lastInstr()\n\n\tif lastInstr.Op != ir.OpReturn {\n\t\t\/\/ Check is needed to avoid generation of \"dead\" return.\n\t\tif lastInstr.Op != ir.OpPanic {\n\t\t\tcl.emit(ir.Return)\n\t\t}\n\t}\n}\n\nfunc (cl *Compiler) compileStmt(form sexp.Form) {\n\tswitch form := form.(type) {\n\tcase *sexp.Return:\n\t\tcl.compileReturn(form)\n\tcase *sexp.If:\n\t\tcl.compileIf(form)\n\tcase *sexp.Block:\n\t\tcl.compileBlock(form)\n\tcase *sexp.FormList:\n\t\tcl.compileStmtList(form.Forms)\n\tcase *sexp.Bind:\n\t\tcl.compileBind(form)\n\tcase *sexp.Rebind:\n\t\tcl.compileRebind(form)\n\tcase *sexp.MapSet:\n\t\tcl.compileMapSet(form)\n\tcase sexp.ExprStmt:\n\t\tcl.compileExprStmt(form.Form)\n\tcase *sexp.Panic:\n\t\tcl.compilePanic(form.ErrorData)\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unexpected stmt: %#v\\n\", form))\n\t}\n}\n\nfunc (cl *Compiler) compileExpr(form sexp.Form) {\n\tswitch form := form.(type) {\n\tcase *sexp.NumAdd:\n\t\tcl.compileAddSub(ir.OpNumAdd, form.Args)\n\tcase *sexp.NumSub:\n\t\tcl.compileAddSub(ir.OpNumSub, form.Args)\n\tcase *sexp.NumMul:\n\t\tcl.compileOp(ir.OpNumMul, form.Args)\n\tcase *sexp.NumQuo:\n\t\tcl.compileOp(ir.OpNumQuo, form.Args)\n\tcase *sexp.NumGt:\n\t\tcl.compileOp(ir.OpNumGt, form.Args)\n\tcase *sexp.NumLt:\n\t\tcl.compileOp(ir.OpNumLt, form.Args)\n\tcase *sexp.NumEq:\n\t\tcl.compileOp(ir.OpNumEq, form.Args)\n\tcase *sexp.Concat:\n\t\tcl.compileConcat(form)\n\n\tcase sexp.Int:\n\t\tcl.emitConst(cl.constPool.InsertInt(form.Val))\n\tcase sexp.Float:\n\t\tcl.emitConst(cl.constPool.InsertFloat(form.Val))\n\tcase sexp.String:\n\t\tcl.emitConst(cl.constPool.InsertString(form.Val))\n\tcase sexp.Symbol:\n\t\tcl.emitConst(cl.constPool.InsertSym(lisp.Symbol(form.Val)))\n\tcase sexp.Bool:\n\t\tcl.compileBool(form)\n\n\tcase sexp.Var:\n\t\tcl.compileVar(form)\n\n\tcase *sexp.Call:\n\t\tcl.compileCall(lisp.Symbol(form.Fn), form.Args...)\n\n\tcase sexp.MakeMap:\n\t\tcl.compileMakeMap(form)\n\n\tcase *sexp.TypeAssert:\n\t\tcl.compileTypeAssert(form)\n\tcase *sexp.LispTypeAssert:\n\t\tcl.compileLispTypeAssert(form)\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unexpected expr: %#v\\n\", form))\n\t}\n}\n\nfunc (cl *Compiler) compileOp(op ir.Opcode, args []sexp.Form) {\n\tif len(args) == 2 {\n\t\tcl.compileExprList(args)\n\t\tcl.emit(ir.Instr{Op: op})\n\t} else {\n\t\tcl.compileCall(opNames[op], args...)\n\t}\n}\n\nfunc (cl *Compiler) compileCall(fn lisp.Symbol, args ...sexp.Form) {\n\tif !optCall(cl, fn, args) {\n\t\tcl.emitConst(cl.constPool.InsertSym(fn))\n\t\tcl.compileExprList(args)\n\t\tcl.emit(ir.Call(len(args)))\n\t}\n}\n\nfunc (cl *Compiler) compileReturn(form *sexp.Return) {\n\tswitch len(form.Results) {\n\tcase 0:\n\t\tcl.emit(ir.Return)\n\tcase 1:\n\t\tcl.compileExpr(form.Results[0])\n\t\tcl.emit(ir.Return)\n\n\tdefault:\n\t\tpanic(\"unimplemented\") \/\/ #REFS: 1.\n\t}\n}\n\nfunc (cl *Compiler) compileIf(form *sexp.If) {\n\tcl.compileExpr(form.Test)\n\tlabel := cl.emitJmp(ir.OpJmpNil, \"then\")\n\tcl.compileStmtList(form.Then.Forms)\n\tlabel.bind(\"else\")\n\tif form.Else != nil {\n\t\tcl.compileStmt(form.Else)\n\t}\n}\n\nfunc (cl *Compiler) compileBlock(form *sexp.Block) {\n\tcl.compileStmtList(form.Forms)\n\tcl.symPool.Drop(form.Scope.Len())\n\tcl.emit(ir.Instr{Op: ir.OpScopeExit, Data: uint16(form.Scope.Len())})\n}\n\nfunc (cl *Compiler) compileBind(form *sexp.Bind) {\n\tcl.compileExpr(form.Init)\n\tid := cl.symPool.Insert(form.Name)\n\tcl.emit(ir.LocalBind(id))\n}\n\nfunc (cl *Compiler) compileRebind(form *sexp.Rebind) {\n\tcl.compileExpr(form.Expr)\n\tid := cl.symPool.Find(form.Name)\n\tcl.emit(ir.LocalSet(id))\n}\n\nfunc (cl *Compiler) compileMakeMap(form sexp.MakeMap) {\n\tcl.compileCall(\n\t\t\"make-hash-table\",\n\t\tsym(\":size\"), form.SizeHint,\n\t\tsym(\":test\"), sym(\"equal\"),\n\t)\n}\n\nfunc (cl *Compiler) compileMapSet(form *sexp.MapSet) {\n\tcl.compileCall(\"puthash\", form.Key, form.Val, form.Map)\n\tcl.emit(ir.Drop(1)) \/\/ Discard expression result.\n}\n\nfunc (cl *Compiler) compilePanic(errorData sexp.Form) {\n\tcl.emitConst(cl.constPool.InsertSym(\"Go--panic\"))\n\tcl.compileExpr(errorData)\n\tcl.emit(ir.Panic)\n\tcl.code.pushBlock(\"panic\")\n}\n\nfunc (cl *Compiler) compileVar(form sexp.Var) {\n\t\/\/ #FIXME: it could be a global var.\n\tid := cl.symPool.Find(form.Name)\n\tcl.code.pushInstr(ir.LocalRef(id))\n}\n\nfunc (cl *Compiler) compileTypeAssert(form *sexp.TypeAssert) {\n\tpanic(\"unimplemented\")\n}\n\nfunc (cl *Compiler) compileLispTypeAssert(form *sexp.LispTypeAssert) {\n\t\/\/ Bool type needs no assertion at all.\n\tif types.Identical(lisp.Types.Bool, form.Type) {\n\t\treturn\n\t}\n\n\tvar blamer lisp.Symbol \/\/ Panic trigger\n\n\tcl.compileExpr(form.Expr) \/\/ Arg to assert.\n\n\tif types.Identical(lisp.Types.Float, form.Type) {\n\t\t\/\/ For floats we do not have floatp opcode.\n\t\tcl.emitConst(cl.constPool.InsertSym(\"floatp\"))\n\t\tcl.emit(ir.StackRef(1)) \/\/ Preserve arg (dup).\n\t\tcl.emit(ir.Call(1))\n\t\tblamer = \"Go--!object-float\"\n\t} else {\n\t\tvar checker ir.Instr\n\t\tif types.Identical(lisp.Types.Int, form.Type) {\n\t\t\tchecker = ir.IsInt\n\t\t\tblamer = \"Go--!object-int\"\n\t\t} else if types.Identical(lisp.Types.String, form.Type) {\n\t\t\tchecker = ir.IsString\n\t\t\tblamer = \"Go--!object-string\"\n\t\t} else if types.Identical(lisp.Types.Symbol, form.Type) {\n\t\t\tchecker = ir.IsSymbol\n\t\t\tblamer = \"Go--!object-symbol\"\n\t\t} else {\n\t\t\tpanic(\"unimplemented\")\n\t\t}\n\n\t\tcl.emit(ir.StackRef(0)) \/\/ Preserve arg (dup).\n\t\tcl.emit(checker) \/\/ Type check.\n\t}\n\n\tlabel := cl.emitJmp(ir.OpJmpNotNil, \"lisp-type-assert-fail\")\n\t{\n\t\tcl.emitConst(cl.constPool.InsertSym(blamer))\n\t\tcl.emit(ir.StackRef(1)) \/\/ Value that failed assertion.\n\t\tcl.emit(ir.NoreturnCall(1))\n\t}\n\tlabel.bind(\"lisp-type-assert-pass\")\n}\n\nfunc (cl *Compiler) compileConcat(form *sexp.Concat) {\n\tcl.compileExprList(form.Args)\n\tcl.emit(ir.Concat(len(form.Args)))\n}\n\nfunc (cl *Compiler) compileAddSub(op ir.Opcode, args []sexp.Form) {\n\tif !optAddSub(cl, op, args) {\n\t\tcl.compileOp(op, args)\n\t}\n}\n\nfunc (cl *Compiler) compileBool(form sexp.Bool) {\n\tif form.Val {\n\t\tcl.emitConst(cl.constPool.InsertSym(\"t\"))\n\t} else {\n\t\tcl.emitConst(cl.constPool.InsertSym(\"nil\"))\n\t}\n}\n\nfunc (cl *Compiler) compileExprStmt(form sexp.Form) {\n\tcl.compileExpr(form)\n\tcl.emit(ir.Drop(1)) \/\/ Discard expression result.\n}\n\nfunc (cl *Compiler) compileInstr(instr ir.Instr, argc int, args []sexp.Form) {\n\tif len(args) != argc {\n\t\t\/\/ #FIXME: need better error handling here.\n\t\tpanic(fmt.Sprintf(\"%s expected %d args, got %d\",\n\t\t\tinstr.Op, argc, len(args)))\n\t}\n\tcl.compileExprList(args)\n\tcl.emit(instr)\n}\n\nfunc (cl *Compiler) compileStmtList(forms []sexp.Form) {\n\tfor _, form := range forms {\n\t\tcl.compileStmt(form)\n\t}\n}\n\nfunc (cl *Compiler) compileExprList(forms []sexp.Form) {\n\tfor _, form := range forms {\n\t\tcl.compileExpr(form)\n\t}\n}\n\nfunc (cl *Compiler) emit(instr ir.Instr) {\n\tcl.code.pushInstr(instr)\n}\n\nfunc (cl *Compiler) emitConst(cpIndex int) {\n\tcl.emit(ir.ConstRef(cpIndex))\n}\n\nfunc (cl *Compiler) emitJmp(op ir.Opcode, branchName string) jmpLabel {\n\tlabel := cl.code.pushJmp(op)\n\tcl.code.pushBlock(branchName)\n\treturn label\n}\n\nfunc (cl *Compiler) pushBlock(name string) {\n\tcl.code.pushBlock(name)\n}\n\nfunc (cl *Compiler) createObject() bytecode.Object {\n\treturn bytecode.Object{\n\t\tBlocks: cl.code.blocks,\n\t\tConstPool: cl.constPool,\n\t\tLocals: cl.symPool.Symbols(),\n\t}\n}\n<|endoftext|>"} {"text":"package isolation_segments\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\n\t. \"github.com\/cloudfoundry\/cf-acceptance-tests\/cats_suite_helpers\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gbytes\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/cf\"\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/helpers\"\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/workflowhelpers\"\n\t\"github.com\/cloudfoundry\/cf-acceptance-tests\/helpers\/app_helpers\"\n\t\"github.com\/cloudfoundry\/cf-acceptance-tests\/helpers\/assets\"\n\t\"github.com\/cloudfoundry\/cf-acceptance-tests\/helpers\/config\"\n\t\"github.com\/cloudfoundry\/cf-acceptance-tests\/helpers\/random_name\"\n)\n\nconst (\n\tSHARED_ISOLATION_SEGMENT_GUID = \"933b4c58-120b-499a-b85d-4b6fc9e2903b\"\n\tbinaryHi = \"Hello from a binary\"\n)\n\nfunc entitleOrgToIsolationSegment(orgGuid, isoSegGuid string) {\n\tEventually(cf.Cf(\"curl\",\n\t\tfmt.Sprintf(\"\/v3\/isolation_segments\/%s\/relationships\/organizations\", isoSegGuid),\n\t\t\"-X\",\n\t\t\"POST\",\n\t\t\"-d\",\n\t\tfmt.Sprintf(`{\"data\":[{ \"guid\":\"%s\" }]}`, orgGuid)),\n\t\tConfig.DefaultTimeoutDuration()).Should(Exit(0))\n}\n\nfunc assignIsolationSegmentToSpace(spaceGuid, isoSegGuid string) {\n\tEventually(cf.Cf(\"curl\", fmt.Sprintf(\"\/v3\/spaces\/%s\/relationships\/isolation_segment\", spaceGuid),\n\t\t\"-X\",\n\t\t\"PATCH\",\n\t\t\"-d\",\n\t\tfmt.Sprintf(`{\"data\":{\"guid\":\"%s\"}}`, isoSegGuid)),\n\t\tConfig.DefaultTimeoutDuration()).Should(Exit(0))\n}\n\nfunc setDefaultIsolationSegment(orgGuid, isoSegGuid string) {\n\tEventually(cf.Cf(\"curl\",\n\t\tfmt.Sprintf(\"\/v3\/organizations\/%s\/relationships\/default_isolation_segment\", orgGuid),\n\t\t\"-X\",\n\t\t\"PATCH\",\n\t\t\"-d\",\n\t\tfmt.Sprintf(`{\"data\":{\"guid\":\"%s\"}}`, isoSegGuid)),\n\t\tConfig.DefaultTimeoutDuration()).Should(Exit(0))\n}\n\nfunc getGuid(response []byte) string {\n\ttype resource struct {\n\t\tGuid string `json:\"guid\"`\n\t}\n\tvar GetResponse struct {\n\t\tResources []resource `json:\"resources\"`\n\t}\n\n\terr := json.Unmarshal(response, &GetResponse)\n\tExpect(err).ToNot(HaveOccurred())\n\n\tif len(GetResponse.Resources) == 0 {\n\t\tFail(\"No guid found for response\")\n\t}\n\n\treturn GetResponse.Resources[0].Guid\n}\n\nfunc getIsolationSegmentGuid(name string) string {\n\tsession := cf.Cf(\"curl\", fmt.Sprintf(\"\/v3\/isolation_segments?names=%s\", name))\n\tbytes := session.Wait(Config.DefaultTimeoutDuration()).Out.Contents()\n\treturn getGuid(bytes)\n}\n\nfunc isolationSegmentExists(name string) bool {\n\tsession := cf.Cf(\"curl\", fmt.Sprintf(\"\/v3\/isolation_segments?names=%s\", name))\n\tbytes := session.Wait(Config.DefaultTimeoutDuration()).Out.Contents()\n\ttype resource struct {\n\t\tGuid string `json:\"guid\"`\n\t}\n\tvar GetResponse struct {\n\t\tResources []resource `json:\"resources\"`\n\t}\n\n\terr := json.Unmarshal(bytes, &GetResponse)\n\tExpect(err).ToNot(HaveOccurred())\n\treturn len(GetResponse.Resources) > 0\n}\n\nfunc createIsolationSegment(name string) string {\n\tsession := cf.Cf(\"curl\", \"\/v3\/isolation_segments\", \"-X\", \"POST\", \"-d\", fmt.Sprintf(`{\"name\":\"%s\"}`, name))\n\tbytes := session.Wait(Config.DefaultTimeoutDuration()).Out.Contents()\n\n\tvar isolation_segment struct {\n\t\tGuid string `json:\"guid\"`\n\t}\n\terr := json.Unmarshal(bytes, &isolation_segment)\n\tExpect(err).ToNot(HaveOccurred())\n\n\treturn isolation_segment.Guid\n}\n\nfunc deleteIsolationSegment(guid string) {\n\tEventually(cf.Cf(\"curl\", fmt.Sprintf(\"\/v3\/isolation_segments\/%s\", guid), \"-X\", \"DELETE\"), Config.DefaultTimeoutDuration()).Should(Exit(0))\n}\n\nfunc createOrGetIsolationSegment(name string) string {\n\tvar isoSegGuid string\n\tif isolationSegmentExists(name) {\n\t\tisoSegGuid = getIsolationSegmentGuid(name)\n\t} else {\n\t\tisoSegGuid = createIsolationSegment(name)\n\t}\n\treturn isoSegGuid\n}\n\nvar _ = IsolationSegmentsDescribe(\"IsolationSegments\", func() {\n\tvar orgGuid, orgName string\n\tvar spaceGuid, spaceName string\n\tvar isoSegGuid, isoSegName string\n\tvar testSetup *workflowhelpers.ReproducibleTestSuiteSetup\n\n\tBeforeEach(func() {\n\t\t\/\/ New up a organization since we will be assigning isolation segments.\n\t\t\/\/ This has a potential to cause other tests to fail if running in parallel mode.\n\t\tcfg, _ := config.NewCatsConfig(os.Getenv(\"CONFIG\"))\n\t\ttestSetup = workflowhelpers.NewTestSuiteSetup(cfg)\n\t\ttestSetup.Setup()\n\n\t\torgName = testSetup.RegularUserContext().Org\n\t\tspaceName = testSetup.RegularUserContext().Space\n\t\tisoSegName = Config.GetIsolationSegmentName()\n\n\t\tsession := cf.Cf(\"curl\", fmt.Sprintf(\"\/v3\/organizations?names=%s\", orgName))\n\t\tbytes := session.Wait(Config.DefaultTimeoutDuration()).Out.Contents()\n\t\torgGuid = getGuid(bytes)\n\t})\n\n\tAfterEach(func() {\n\t\ttestSetup.Teardown()\n\n\t\tif isoSegGuid != \"\" {\n\t\t\tworkflowhelpers.AsUser(testSetup.AdminUserContext(), testSetup.ShortTimeout(), func() {\n\t\t\t\tdeleteIsolationSegment(isoSegGuid)\n\t\t\t})\n\t\t\tisoSegGuid = \"\"\n\t\t}\n\t})\n\n\tContext(\"When an organization has the shared segment as its default\", func() {\n\t\tBeforeEach(func() {\n\t\t\tentitleOrgToIsolationSegment(orgGuid, SHARED_ISOLATION_SEGMENT_GUID)\n\t\t})\n\n\t\tIt(\"can run an app to a space with no assigned segment\", func() {\n\t\t\tappName := random_name.CATSRandomName(\"APP\")\n\t\t\tEventually(cf.Cf(\n\t\t\t\t\"push\", appName,\n\t\t\t\t\"-p\", assets.NewAssets().Binary,\n\t\t\t\t\"--no-start\",\n\t\t\t\t\"-m\", DEFAULT_MEMORY_LIMIT,\n\t\t\t\t\"-b\", \"binary_buildpack\",\n\t\t\t\t\"-d\", Config.GetAppsDomain(),\n\t\t\t\t\"-c\", \".\/app\"),\n\t\t\t\tConfig.CfPushTimeoutDuration()).Should(Exit(0))\n\n\t\t\tapp_helpers.EnableDiego(appName)\n\t\t\tEventually(cf.Cf(\"start\", appName), Config.CfPushTimeoutDuration()).Should(Exit(0))\n\t\t\tEventually(helpers.CurlingAppRoot(Config, appName), Config.DefaultTimeoutDuration()).Should(ContainSubstring(binaryHi))\n\t\t})\n\t})\n\n\tContext(\"When the user-provided Isolation Segment has an associated cell\", func() {\n\t\tBeforeEach(func() {\n\t\t\tworkflowhelpers.AsUser(testSetup.AdminUserContext(), testSetup.ShortTimeout(), func() {\n\t\t\t\tisoSegGuid = createOrGetIsolationSegment(isoSegName)\n\t\t\t\tentitleOrgToIsolationSegment(orgGuid, isoSegGuid)\n\t\t\t\tsetDefaultIsolationSegment(orgGuid, isoSegGuid)\n\t\t\t})\n\t\t})\n\n\t\tIt(\"can run an app to an org where the default is the user-provided isolation segment\", func() {\n\t\t\tappName := random_name.CATSRandomName(\"APP\")\n\t\t\tEventually(cf.Cf(\n\t\t\t\t\"push\", appName,\n\t\t\t\t\"-p\", assets.NewAssets().Binary,\n\t\t\t\t\"--no-start\",\n\t\t\t\t\"-m\", DEFAULT_MEMORY_LIMIT,\n\t\t\t\t\"-b\", \"binary_buildpack\",\n\t\t\t\t\"-d\", Config.GetAppsDomain(),\n\t\t\t\t\"-c\", \".\/app\"),\n\t\t\t\tConfig.CfPushTimeoutDuration()).Should(Exit(0))\n\n\t\t\tapp_helpers.EnableDiego(appName)\n\t\t\tEventually(cf.Cf(\"start\", appName), Config.CfPushTimeoutDuration()).Should(Exit(0))\n\t\t\tEventually(helpers.CurlingAppRoot(Config, appName), Config.DefaultTimeoutDuration()).Should(ContainSubstring(binaryHi))\n\t\t})\n\t})\n\n\tContext(\"When the Isolation Segment has no associated cells\", func() {\n\t\tBeforeEach(func() {\n\t\t\tworkflowhelpers.AsUser(testSetup.AdminUserContext(), testSetup.ShortTimeout(), func() {\n\t\t\t\tisoSegGuid = createIsolationSegment(random_name.CATSRandomName(\"fake-iso-seg\"))\n\t\t\t\tentitleOrgToIsolationSegment(orgGuid, isoSegGuid)\n\t\t\t\tsetDefaultIsolationSegment(orgGuid, isoSegGuid)\n\t\t\t})\n\t\t})\n\n\t\tIt(\"fails to start an app in the Isolation Segment\", func() {\n\t\t\tappName := random_name.CATSRandomName(\"APP\")\n\t\t\tEventually(cf.Cf(\n\t\t\t\t\"push\", appName,\n\t\t\t\t\"-p\", assets.NewAssets().Binary,\n\t\t\t\t\"--no-start\",\n\t\t\t\t\"-m\", DEFAULT_MEMORY_LIMIT,\n\t\t\t\t\"-b\", \"binary_buildpack\",\n\t\t\t\t\"-d\", Config.GetAppsDomain(),\n\t\t\t\t\"-c\", \".\/app\"),\n\t\t\t\tConfig.CfPushTimeoutDuration()).Should(Exit(0))\n\n\t\t\tapp_helpers.EnableDiego(appName)\n\t\t\tEventually(cf.Cf(\"start\", appName), Config.CfPushTimeoutDuration()).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"When the organization has not been entitled to the Isolation Segment\", func() {\n\t\tBeforeEach(func() {\n\t\t\tworkflowhelpers.AsUser(testSetup.AdminUserContext(), testSetup.ShortTimeout(), func() {\n\t\t\t\tisoSegGuid = createOrGetIsolationSegment(isoSegName)\n\t\t\t})\n\t\t})\n\n\t\tIt(\"fails to set the isolation segment as the default\", func() {\n\t\t\tworkflowhelpers.AsUser(TestSetup.AdminUserContext(), Config.DefaultTimeoutDuration(), func() {\n\t\t\t\tsession := cf.Cf(\"curl\",\n\t\t\t\t\tfmt.Sprintf(\"\/v3\/organizations\/%s\/relationships\/default_isolation_segment\", orgGuid),\n\t\t\t\t\t\"-X\",\n\t\t\t\t\t\"PATCH\",\n\t\t\t\t\t\"-d\",\n\t\t\t\t\tfmt.Sprintf(`{\"data\":{\"guid\":\"%s\"}}`, isoSegGuid)).Wait(Config.DefaultTimeoutDuration())\n\t\t\t\tExpect(session).To(Exit(0))\n\t\t\t\tExpect(session).To(Say(\"Ensure it has been entitled to this organization\"))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"When the space has been assigned an Isolation Segment\", func() {\n\t\tBeforeEach(func() {\n\t\t\tworkflowhelpers.AsUser(testSetup.AdminUserContext(), testSetup.ShortTimeout(), func() {\n\t\t\t\tisoSegGuid = createOrGetIsolationSegment(isoSegName)\n\t\t\t\tentitleOrgToIsolationSegment(orgGuid, isoSegGuid)\n\t\t\t\tsession := cf.Cf(\"curl\", fmt.Sprintf(\"\/v3\/spaces?names=%s\", spaceName))\n\t\t\t\tbytes := session.Wait(Config.DefaultTimeoutDuration()).Out.Contents()\n\t\t\t\tspaceGuid = getGuid(bytes)\n\t\t\t\tassignIsolationSegmentToSpace(spaceGuid, isoSegGuid)\n\t\t\t})\n\t\t})\n\n\t\tIt(\"can run an app in that isolation segment\", func() {\n\t\t\tappName := random_name.CATSRandomName(\"APP\")\n\t\t\tEventually(cf.Cf(\n\t\t\t\t\"push\", appName,\n\t\t\t\t\"-p\", assets.NewAssets().Binary,\n\t\t\t\t\"--no-start\",\n\t\t\t\t\"-m\", DEFAULT_MEMORY_LIMIT,\n\t\t\t\t\"-b\", \"binary_buildpack\",\n\t\t\t\t\"-d\", Config.GetAppsDomain(),\n\t\t\t\t\"-c\", \".\/app\"),\n\t\t\t\tConfig.CfPushTimeoutDuration()).Should(Exit(0))\n\n\t\t\tapp_helpers.EnableDiego(appName)\n\t\t\tEventually(cf.Cf(\"start\", appName), Config.CfPushTimeoutDuration()).Should(Exit(0))\n\t\t\tEventually(helpers.CurlingAppRoot(Config, appName), Config.DefaultTimeoutDuration()).Should(ContainSubstring(binaryHi))\n\t\t})\n\t})\n})\nupdate set default iso seg fail messagepackage isolation_segments\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\n\t. \"github.com\/cloudfoundry\/cf-acceptance-tests\/cats_suite_helpers\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gbytes\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/cf\"\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/helpers\"\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/workflowhelpers\"\n\t\"github.com\/cloudfoundry\/cf-acceptance-tests\/helpers\/app_helpers\"\n\t\"github.com\/cloudfoundry\/cf-acceptance-tests\/helpers\/assets\"\n\t\"github.com\/cloudfoundry\/cf-acceptance-tests\/helpers\/config\"\n\t\"github.com\/cloudfoundry\/cf-acceptance-tests\/helpers\/random_name\"\n)\n\nconst (\n\tSHARED_ISOLATION_SEGMENT_GUID = \"933b4c58-120b-499a-b85d-4b6fc9e2903b\"\n\tbinaryHi = \"Hello from a binary\"\n)\n\nfunc entitleOrgToIsolationSegment(orgGuid, isoSegGuid string) {\n\tEventually(cf.Cf(\"curl\",\n\t\tfmt.Sprintf(\"\/v3\/isolation_segments\/%s\/relationships\/organizations\", isoSegGuid),\n\t\t\"-X\",\n\t\t\"POST\",\n\t\t\"-d\",\n\t\tfmt.Sprintf(`{\"data\":[{ \"guid\":\"%s\" }]}`, orgGuid)),\n\t\tConfig.DefaultTimeoutDuration()).Should(Exit(0))\n}\n\nfunc assignIsolationSegmentToSpace(spaceGuid, isoSegGuid string) {\n\tEventually(cf.Cf(\"curl\", fmt.Sprintf(\"\/v3\/spaces\/%s\/relationships\/isolation_segment\", spaceGuid),\n\t\t\"-X\",\n\t\t\"PATCH\",\n\t\t\"-d\",\n\t\tfmt.Sprintf(`{\"data\":{\"guid\":\"%s\"}}`, isoSegGuid)),\n\t\tConfig.DefaultTimeoutDuration()).Should(Exit(0))\n}\n\nfunc setDefaultIsolationSegment(orgGuid, isoSegGuid string) {\n\tEventually(cf.Cf(\"curl\",\n\t\tfmt.Sprintf(\"\/v3\/organizations\/%s\/relationships\/default_isolation_segment\", orgGuid),\n\t\t\"-X\",\n\t\t\"PATCH\",\n\t\t\"-d\",\n\t\tfmt.Sprintf(`{\"data\":{\"guid\":\"%s\"}}`, isoSegGuid)),\n\t\tConfig.DefaultTimeoutDuration()).Should(Exit(0))\n}\n\nfunc getGuid(response []byte) string {\n\ttype resource struct {\n\t\tGuid string `json:\"guid\"`\n\t}\n\tvar GetResponse struct {\n\t\tResources []resource `json:\"resources\"`\n\t}\n\n\terr := json.Unmarshal(response, &GetResponse)\n\tExpect(err).ToNot(HaveOccurred())\n\n\tif len(GetResponse.Resources) == 0 {\n\t\tFail(\"No guid found for response\")\n\t}\n\n\treturn GetResponse.Resources[0].Guid\n}\n\nfunc getIsolationSegmentGuid(name string) string {\n\tsession := cf.Cf(\"curl\", fmt.Sprintf(\"\/v3\/isolation_segments?names=%s\", name))\n\tbytes := session.Wait(Config.DefaultTimeoutDuration()).Out.Contents()\n\treturn getGuid(bytes)\n}\n\nfunc isolationSegmentExists(name string) bool {\n\tsession := cf.Cf(\"curl\", fmt.Sprintf(\"\/v3\/isolation_segments?names=%s\", name))\n\tbytes := session.Wait(Config.DefaultTimeoutDuration()).Out.Contents()\n\ttype resource struct {\n\t\tGuid string `json:\"guid\"`\n\t}\n\tvar GetResponse struct {\n\t\tResources []resource `json:\"resources\"`\n\t}\n\n\terr := json.Unmarshal(bytes, &GetResponse)\n\tExpect(err).ToNot(HaveOccurred())\n\treturn len(GetResponse.Resources) > 0\n}\n\nfunc createIsolationSegment(name string) string {\n\tsession := cf.Cf(\"curl\", \"\/v3\/isolation_segments\", \"-X\", \"POST\", \"-d\", fmt.Sprintf(`{\"name\":\"%s\"}`, name))\n\tbytes := session.Wait(Config.DefaultTimeoutDuration()).Out.Contents()\n\n\tvar isolation_segment struct {\n\t\tGuid string `json:\"guid\"`\n\t}\n\terr := json.Unmarshal(bytes, &isolation_segment)\n\tExpect(err).ToNot(HaveOccurred())\n\n\treturn isolation_segment.Guid\n}\n\nfunc deleteIsolationSegment(guid string) {\n\tEventually(cf.Cf(\"curl\", fmt.Sprintf(\"\/v3\/isolation_segments\/%s\", guid), \"-X\", \"DELETE\"), Config.DefaultTimeoutDuration()).Should(Exit(0))\n}\n\nfunc createOrGetIsolationSegment(name string) string {\n\tvar isoSegGuid string\n\tif isolationSegmentExists(name) {\n\t\tisoSegGuid = getIsolationSegmentGuid(name)\n\t} else {\n\t\tisoSegGuid = createIsolationSegment(name)\n\t}\n\treturn isoSegGuid\n}\n\nvar _ = IsolationSegmentsDescribe(\"IsolationSegments\", func() {\n\tvar orgGuid, orgName string\n\tvar spaceGuid, spaceName string\n\tvar isoSegGuid, isoSegName string\n\tvar testSetup *workflowhelpers.ReproducibleTestSuiteSetup\n\n\tBeforeEach(func() {\n\t\t\/\/ New up a organization since we will be assigning isolation segments.\n\t\t\/\/ This has a potential to cause other tests to fail if running in parallel mode.\n\t\tcfg, _ := config.NewCatsConfig(os.Getenv(\"CONFIG\"))\n\t\ttestSetup = workflowhelpers.NewTestSuiteSetup(cfg)\n\t\ttestSetup.Setup()\n\n\t\torgName = testSetup.RegularUserContext().Org\n\t\tspaceName = testSetup.RegularUserContext().Space\n\t\tisoSegName = Config.GetIsolationSegmentName()\n\n\t\tsession := cf.Cf(\"curl\", fmt.Sprintf(\"\/v3\/organizations?names=%s\", orgName))\n\t\tbytes := session.Wait(Config.DefaultTimeoutDuration()).Out.Contents()\n\t\torgGuid = getGuid(bytes)\n\t})\n\n\tAfterEach(func() {\n\t\ttestSetup.Teardown()\n\n\t\tif isoSegGuid != \"\" {\n\t\t\tworkflowhelpers.AsUser(testSetup.AdminUserContext(), testSetup.ShortTimeout(), func() {\n\t\t\t\tdeleteIsolationSegment(isoSegGuid)\n\t\t\t})\n\t\t\tisoSegGuid = \"\"\n\t\t}\n\t})\n\n\tContext(\"When an organization has the shared segment as its default\", func() {\n\t\tBeforeEach(func() {\n\t\t\tentitleOrgToIsolationSegment(orgGuid, SHARED_ISOLATION_SEGMENT_GUID)\n\t\t})\n\n\t\tIt(\"can run an app to a space with no assigned segment\", func() {\n\t\t\tappName := random_name.CATSRandomName(\"APP\")\n\t\t\tEventually(cf.Cf(\n\t\t\t\t\"push\", appName,\n\t\t\t\t\"-p\", assets.NewAssets().Binary,\n\t\t\t\t\"--no-start\",\n\t\t\t\t\"-m\", DEFAULT_MEMORY_LIMIT,\n\t\t\t\t\"-b\", \"binary_buildpack\",\n\t\t\t\t\"-d\", Config.GetAppsDomain(),\n\t\t\t\t\"-c\", \".\/app\"),\n\t\t\t\tConfig.CfPushTimeoutDuration()).Should(Exit(0))\n\n\t\t\tapp_helpers.EnableDiego(appName)\n\t\t\tEventually(cf.Cf(\"start\", appName), Config.CfPushTimeoutDuration()).Should(Exit(0))\n\t\t\tEventually(helpers.CurlingAppRoot(Config, appName), Config.DefaultTimeoutDuration()).Should(ContainSubstring(binaryHi))\n\t\t})\n\t})\n\n\tContext(\"When the user-provided Isolation Segment has an associated cell\", func() {\n\t\tBeforeEach(func() {\n\t\t\tworkflowhelpers.AsUser(testSetup.AdminUserContext(), testSetup.ShortTimeout(), func() {\n\t\t\t\tisoSegGuid = createOrGetIsolationSegment(isoSegName)\n\t\t\t\tentitleOrgToIsolationSegment(orgGuid, isoSegGuid)\n\t\t\t\tsetDefaultIsolationSegment(orgGuid, isoSegGuid)\n\t\t\t})\n\t\t})\n\n\t\tIt(\"can run an app to an org where the default is the user-provided isolation segment\", func() {\n\t\t\tappName := random_name.CATSRandomName(\"APP\")\n\t\t\tEventually(cf.Cf(\n\t\t\t\t\"push\", appName,\n\t\t\t\t\"-p\", assets.NewAssets().Binary,\n\t\t\t\t\"--no-start\",\n\t\t\t\t\"-m\", DEFAULT_MEMORY_LIMIT,\n\t\t\t\t\"-b\", \"binary_buildpack\",\n\t\t\t\t\"-d\", Config.GetAppsDomain(),\n\t\t\t\t\"-c\", \".\/app\"),\n\t\t\t\tConfig.CfPushTimeoutDuration()).Should(Exit(0))\n\n\t\t\tapp_helpers.EnableDiego(appName)\n\t\t\tEventually(cf.Cf(\"start\", appName), Config.CfPushTimeoutDuration()).Should(Exit(0))\n\t\t\tEventually(helpers.CurlingAppRoot(Config, appName), Config.DefaultTimeoutDuration()).Should(ContainSubstring(binaryHi))\n\t\t})\n\t})\n\n\tContext(\"When the Isolation Segment has no associated cells\", func() {\n\t\tBeforeEach(func() {\n\t\t\tworkflowhelpers.AsUser(testSetup.AdminUserContext(), testSetup.ShortTimeout(), func() {\n\t\t\t\tisoSegGuid = createIsolationSegment(random_name.CATSRandomName(\"fake-iso-seg\"))\n\t\t\t\tentitleOrgToIsolationSegment(orgGuid, isoSegGuid)\n\t\t\t\tsetDefaultIsolationSegment(orgGuid, isoSegGuid)\n\t\t\t})\n\t\t})\n\n\t\tIt(\"fails to start an app in the Isolation Segment\", func() {\n\t\t\tappName := random_name.CATSRandomName(\"APP\")\n\t\t\tEventually(cf.Cf(\n\t\t\t\t\"push\", appName,\n\t\t\t\t\"-p\", assets.NewAssets().Binary,\n\t\t\t\t\"--no-start\",\n\t\t\t\t\"-m\", DEFAULT_MEMORY_LIMIT,\n\t\t\t\t\"-b\", \"binary_buildpack\",\n\t\t\t\t\"-d\", Config.GetAppsDomain(),\n\t\t\t\t\"-c\", \".\/app\"),\n\t\t\t\tConfig.CfPushTimeoutDuration()).Should(Exit(0))\n\n\t\t\tapp_helpers.EnableDiego(appName)\n\t\t\tEventually(cf.Cf(\"start\", appName), Config.CfPushTimeoutDuration()).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"When the organization has not been entitled to the Isolation Segment\", func() {\n\t\tBeforeEach(func() {\n\t\t\tworkflowhelpers.AsUser(testSetup.AdminUserContext(), testSetup.ShortTimeout(), func() {\n\t\t\t\tisoSegGuid = createOrGetIsolationSegment(isoSegName)\n\t\t\t})\n\t\t})\n\n\t\tIt(\"fails to set the isolation segment as the default\", func() {\n\t\t\tworkflowhelpers.AsUser(TestSetup.AdminUserContext(), Config.DefaultTimeoutDuration(), func() {\n\t\t\t\tsession := cf.Cf(\"curl\",\n\t\t\t\t\tfmt.Sprintf(\"\/v3\/organizations\/%s\/relationships\/default_isolation_segment\", orgGuid),\n\t\t\t\t\t\"-X\",\n\t\t\t\t\t\"PATCH\",\n\t\t\t\t\t\"-d\",\n\t\t\t\t\tfmt.Sprintf(`{\"data\":{\"guid\":\"%s\"}}`, isoSegGuid)).Wait(Config.DefaultTimeoutDuration())\n\t\t\t\tExpect(session).To(Exit(0))\n\t\t\t\tExpect(session).To(Say(\"Ensure it has been entitled to the organization\"))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"When the space has been assigned an Isolation Segment\", func() {\n\t\tBeforeEach(func() {\n\t\t\tworkflowhelpers.AsUser(testSetup.AdminUserContext(), testSetup.ShortTimeout(), func() {\n\t\t\t\tisoSegGuid = createOrGetIsolationSegment(isoSegName)\n\t\t\t\tentitleOrgToIsolationSegment(orgGuid, isoSegGuid)\n\t\t\t\tsession := cf.Cf(\"curl\", fmt.Sprintf(\"\/v3\/spaces?names=%s\", spaceName))\n\t\t\t\tbytes := session.Wait(Config.DefaultTimeoutDuration()).Out.Contents()\n\t\t\t\tspaceGuid = getGuid(bytes)\n\t\t\t\tassignIsolationSegmentToSpace(spaceGuid, isoSegGuid)\n\t\t\t})\n\t\t})\n\n\t\tIt(\"can run an app in that isolation segment\", func() {\n\t\t\tappName := random_name.CATSRandomName(\"APP\")\n\t\t\tEventually(cf.Cf(\n\t\t\t\t\"push\", appName,\n\t\t\t\t\"-p\", assets.NewAssets().Binary,\n\t\t\t\t\"--no-start\",\n\t\t\t\t\"-m\", DEFAULT_MEMORY_LIMIT,\n\t\t\t\t\"-b\", \"binary_buildpack\",\n\t\t\t\t\"-d\", Config.GetAppsDomain(),\n\t\t\t\t\"-c\", \".\/app\"),\n\t\t\t\tConfig.CfPushTimeoutDuration()).Should(Exit(0))\n\n\t\t\tapp_helpers.EnableDiego(appName)\n\t\t\tEventually(cf.Cf(\"start\", appName), Config.CfPushTimeoutDuration()).Should(Exit(0))\n\t\t\tEventually(helpers.CurlingAppRoot(Config, appName), Config.DefaultTimeoutDuration()).Should(ContainSubstring(binaryHi))\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"package db\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/bosssauce\/ponzu\/content\"\n\t\"github.com\/bosssauce\/ponzu\/management\/editor\"\n\t\"github.com\/bosssauce\/ponzu\/management\/manager\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/gorilla\/schema\"\n)\n\n\/\/ SetContent inserts or updates values in the database.\n\/\/ The `target` argument is a string made up of namespace:id (string:int)\nfunc SetContent(target string, data url.Values) (int, error) {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\t\/\/ check if content id == -1 (indicating new post).\n\t\/\/ if so, run an insert which will assign the next auto incremented int.\n\t\/\/ this is done because boltdb begins its bucket auto increment value at 0,\n\t\/\/ which is the zero-value of an int in the Item struct field for ID.\n\t\/\/ this is a problem when the original first post (with auto ID = 0) gets\n\t\/\/ overwritten by any new post, originally having no ID, defauting to 0.\n\tif id == \"-1\" {\n\t\treturn insert(ns, data)\n\t}\n\n\treturn update(ns, id, data)\n}\n\nfunc update(ns, id string, data url.Values) (int, error) {\n\tvar specifier string \/\/ i.e. _pending, _sorted, etc.\n\tif strings.Contains(ns, \"_\") {\n\t\tspec := strings.Split(ns, \"_\")\n\t\tns = spec[0]\n\t\tspecifier = \"_\" + spec[1]\n\t}\n\n\tcid, err := strconv.Atoi(id)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\terr = store.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(ns + specifier))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tj, err := postToJSON(ns, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(fmt.Sprintf(\"%d\", cid)), j)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn 0, nil\n\t}\n\n\tif specifier == \"\" {\n\t\tgo SortContent(ns)\n\t}\n\n\treturn cid, nil\n}\n\nfunc insert(ns string, data url.Values) (int, error) {\n\tvar effectedID int\n\tvar specifier string \/\/ i.e. _pending, _sorted, etc.\n\tif strings.Contains(ns, \"_\") {\n\t\tspec := strings.Split(ns, \"_\")\n\t\tns = spec[0]\n\t\tspecifier = \"_\" + spec[1]\n\t}\n\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(ns + specifier))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ get the next available ID and convert to string\n\t\t\/\/ also set effectedID to int of ID\n\t\tid, err := b.NextSequence()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcid := strconv.FormatUint(id, 10)\n\t\teffectedID, err = strconv.Atoi(cid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata.Set(\"id\", cid)\n\n\t\tj, err := postToJSON(ns, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(cid), j)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif specifier == \"\" {\n\t\tgo SortContent(ns)\n\t}\n\n\treturn effectedID, nil\n}\n\n\/\/ DeleteContent removes an item from the database. Deleting a non-existent item\n\/\/ will return a nil error.\nfunc DeleteContent(target string) error {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\ttx.Bucket([]byte(ns)).Delete([]byte(id))\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ exception to typical \"run in goroutine\" pattern:\n\t\/\/ we want to have an updated admin view as soon as this is deleted, so\n\t\/\/ in some cases, the delete and redirect is faster than the sort,\n\t\/\/ thus still showing a deleted post in the admin view.\n\tSortContent(ns)\n\n\treturn nil\n}\n\n\/\/ Content retrives one item from the database. Non-existent values will return an empty []byte\n\/\/ The `target` argument is a string made up of namespace:id (string:int)\nfunc Content(target string) ([]byte, error) {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\tval := &bytes.Buffer{}\n\terr := store.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(ns))\n\t\t_, err := val.Write(b.Get([]byte(id)))\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn val.Bytes(), nil\n}\n\n\/\/ ContentAll retrives all items from the database within the provided namespace\nfunc ContentAll(namespace string) [][]byte {\n\tvar posts [][]byte\n\tstore.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(namespace))\n\n\t\tif b == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tnumKeys := b.Stats().KeyN\n\t\tposts = make([][]byte, 0, numKeys)\n\n\t\tb.ForEach(func(k, v []byte) error {\n\t\t\tposts = append(posts, v)\n\n\t\t\treturn nil\n\t\t})\n\n\t\treturn nil\n\t})\n\n\treturn posts\n}\n\n\/\/ QueryOptions holds options for a query\ntype QueryOptions struct {\n\tCount int\n\tOffset int\n\tOrder string\n}\n\n\/\/ Query retrieves a set of content from the db based on options\nfunc Query(namespace string, opts QueryOptions) [][]byte {\n\tvar posts [][]byte\n\tstore.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(namespace))\n\t\tc := b.Cursor()\n\t\tn := b.Stats().KeyN\n\n\t\tvar start, end int\n\t\tswitch opts.Count {\n\t\tcase -1:\n\t\t\tstart = 0\n\t\t\tend = n\n\n\t\tdefault:\n\t\t\tstart = opts.Count * opts.Offset\n\t\t\tend = start + opts.Count\n\t\t}\n\n\t\t\/\/ bounds check on posts given the start & end count\n\t\tif start > n {\n\t\t\tstart = n - opts.Count\n\t\t}\n\t\tif end > n {\n\t\t\tend = n\n\t\t}\n\n\t\ti := 0 \/\/ count of num posts added\n\t\tcur := 0 \/\/ count of where cursor is\n\t\tswitch opts.Order {\n\t\tcase \"asc\":\n\t\t\tfor k, v := c.Last(); k != nil; c.Prev() {\n\t\t\t\tif cur < end {\n\t\t\t\t\tcur++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif cur >= start {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif i >= opts.Count {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tposts = append(posts, v)\n\t\t\t\ti++\n\t\t\t\tcur++\n\t\t\t}\n\n\t\tcase \"desc\":\n\t\t\tfor k, v := c.First(); k != nil; c.Next() {\n\t\t\t\tif cur < start {\n\t\t\t\t\tcur++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif cur >= end {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif i >= opts.Count {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tposts = append(posts, v)\n\t\t\t\ti++\n\t\t\t\tcur++\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\t\/\/ if opts.order == \"asc\" {\n\t\/\/ \tposts = []json.RawMessage{}\n\t\/\/ \tfor i := len(posts) - 1; i >= 0; i-- {\n\t\/\/ \t\tposts = append(all, posts[i])\n\t\/\/ \t}\n\t\/\/ }\n\n\treturn posts\n}\n\n\/\/ SortContent sorts all content of the type supplied as the namespace by time,\n\/\/ in descending order, from most recent to least recent\n\/\/ Should be called from a goroutine after SetContent is successful\nfunc SortContent(namespace string) {\n\t\/\/ only sort main content types i.e. Post\n\tif strings.Contains(namespace, \"_\") {\n\t\treturn\n\t}\n\n\tall := ContentAll(namespace)\n\n\tvar posts sortablePosts\n\t\/\/ decode each (json) into type to then sort\n\tfor i := range all {\n\t\tj := all[i]\n\t\tpost := content.Types[namespace]()\n\n\t\terr := json.Unmarshal(j, &post)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error decoding json while sorting\", namespace, \":\", err)\n\t\t\treturn\n\t\t}\n\n\t\tposts = append(posts, post.(editor.Sortable))\n\t}\n\n\t\/\/ sort posts\n\tsort.Sort(posts)\n\n\t\/\/ store in _sorted bucket, first delete existing\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\tbname := []byte(namespace + \"_sorted\")\n\t\terr := tx.DeleteBucket(bname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tb, err := tx.CreateBucketIfNotExists(bname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ encode to json and store as 'i:post.Time()':post\n\t\tfor i := range posts {\n\t\t\tj, err := json.Marshal(posts[i])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcid := fmt.Sprintf(\"%d:%d\", i, posts[i].Time())\n\t\t\terr = b.Put([]byte(cid), j)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Println(\"Error while updating db with sorted\", namespace, err)\n\t}\n\n}\n\ntype sortablePosts []editor.Sortable\n\nfunc (s sortablePosts) Len() int {\n\treturn len(s)\n}\n\nfunc (s sortablePosts) Less(i, j int) bool {\n\treturn s[i].Time() > s[j].Time()\n}\n\nfunc (s sortablePosts) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\nfunc postToJSON(ns string, data url.Values) ([]byte, error) {\n\t\/\/ find the content type and decode values into it\n\tns = strings.TrimSuffix(ns, \"_external\")\n\tt, ok := content.Types[ns]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(content.ErrTypeNotRegistered, ns)\n\t}\n\tpost := t()\n\n\tdec := schema.NewDecoder()\n\tdec.SetAliasTag(\"json\") \/\/ allows simpler struct tagging when creating a content type\n\tdec.IgnoreUnknownKeys(true) \/\/ will skip over form values submitted, but not in struct\n\terr := dec.Decode(post, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tslug, err := manager.Slug(post.(editor.Editable))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpost.(editor.Editable).SetSlug(slug)\n\n\t\/\/ marshall content struct to json for db storage\n\tj, err := json.Marshal(post)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn j, nil\n}\nreassigning k, v with next or prev recordpackage db\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/bosssauce\/ponzu\/content\"\n\t\"github.com\/bosssauce\/ponzu\/management\/editor\"\n\t\"github.com\/bosssauce\/ponzu\/management\/manager\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/gorilla\/schema\"\n)\n\n\/\/ SetContent inserts or updates values in the database.\n\/\/ The `target` argument is a string made up of namespace:id (string:int)\nfunc SetContent(target string, data url.Values) (int, error) {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\t\/\/ check if content id == -1 (indicating new post).\n\t\/\/ if so, run an insert which will assign the next auto incremented int.\n\t\/\/ this is done because boltdb begins its bucket auto increment value at 0,\n\t\/\/ which is the zero-value of an int in the Item struct field for ID.\n\t\/\/ this is a problem when the original first post (with auto ID = 0) gets\n\t\/\/ overwritten by any new post, originally having no ID, defauting to 0.\n\tif id == \"-1\" {\n\t\treturn insert(ns, data)\n\t}\n\n\treturn update(ns, id, data)\n}\n\nfunc update(ns, id string, data url.Values) (int, error) {\n\tvar specifier string \/\/ i.e. _pending, _sorted, etc.\n\tif strings.Contains(ns, \"_\") {\n\t\tspec := strings.Split(ns, \"_\")\n\t\tns = spec[0]\n\t\tspecifier = \"_\" + spec[1]\n\t}\n\n\tcid, err := strconv.Atoi(id)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\terr = store.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(ns + specifier))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tj, err := postToJSON(ns, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(fmt.Sprintf(\"%d\", cid)), j)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn 0, nil\n\t}\n\n\tif specifier == \"\" {\n\t\tgo SortContent(ns)\n\t}\n\n\treturn cid, nil\n}\n\nfunc insert(ns string, data url.Values) (int, error) {\n\tvar effectedID int\n\tvar specifier string \/\/ i.e. _pending, _sorted, etc.\n\tif strings.Contains(ns, \"_\") {\n\t\tspec := strings.Split(ns, \"_\")\n\t\tns = spec[0]\n\t\tspecifier = \"_\" + spec[1]\n\t}\n\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists([]byte(ns + specifier))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ get the next available ID and convert to string\n\t\t\/\/ also set effectedID to int of ID\n\t\tid, err := b.NextSequence()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcid := strconv.FormatUint(id, 10)\n\t\teffectedID, err = strconv.Atoi(cid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata.Set(\"id\", cid)\n\n\t\tj, err := postToJSON(ns, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(cid), j)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif specifier == \"\" {\n\t\tgo SortContent(ns)\n\t}\n\n\treturn effectedID, nil\n}\n\n\/\/ DeleteContent removes an item from the database. Deleting a non-existent item\n\/\/ will return a nil error.\nfunc DeleteContent(target string) error {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\ttx.Bucket([]byte(ns)).Delete([]byte(id))\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ exception to typical \"run in goroutine\" pattern:\n\t\/\/ we want to have an updated admin view as soon as this is deleted, so\n\t\/\/ in some cases, the delete and redirect is faster than the sort,\n\t\/\/ thus still showing a deleted post in the admin view.\n\tSortContent(ns)\n\n\treturn nil\n}\n\n\/\/ Content retrives one item from the database. Non-existent values will return an empty []byte\n\/\/ The `target` argument is a string made up of namespace:id (string:int)\nfunc Content(target string) ([]byte, error) {\n\tt := strings.Split(target, \":\")\n\tns, id := t[0], t[1]\n\n\tval := &bytes.Buffer{}\n\terr := store.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(ns))\n\t\t_, err := val.Write(b.Get([]byte(id)))\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn val.Bytes(), nil\n}\n\n\/\/ ContentAll retrives all items from the database within the provided namespace\nfunc ContentAll(namespace string) [][]byte {\n\tvar posts [][]byte\n\tstore.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(namespace))\n\n\t\tif b == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tnumKeys := b.Stats().KeyN\n\t\tposts = make([][]byte, 0, numKeys)\n\n\t\tb.ForEach(func(k, v []byte) error {\n\t\t\tposts = append(posts, v)\n\n\t\t\treturn nil\n\t\t})\n\n\t\treturn nil\n\t})\n\n\treturn posts\n}\n\n\/\/ QueryOptions holds options for a query\ntype QueryOptions struct {\n\tCount int\n\tOffset int\n\tOrder string\n}\n\n\/\/ Query retrieves a set of content from the db based on options\nfunc Query(namespace string, opts QueryOptions) [][]byte {\n\tvar posts [][]byte\n\tstore.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(namespace))\n\t\tc := b.Cursor()\n\t\tn := b.Stats().KeyN\n\n\t\tvar start, end int\n\t\tswitch opts.Count {\n\t\tcase -1:\n\t\t\tstart = 0\n\t\t\tend = n\n\n\t\tdefault:\n\t\t\tstart = opts.Count * opts.Offset\n\t\t\tend = start + opts.Count\n\t\t}\n\n\t\t\/\/ bounds check on posts given the start & end count\n\t\tif start > n {\n\t\t\tstart = n - opts.Count\n\t\t}\n\t\tif end > n {\n\t\t\tend = n\n\t\t}\n\n\t\ti := 0 \/\/ count of num posts added\n\t\tcur := 0 \/\/ count of where cursor is\n\t\tswitch opts.Order {\n\t\tcase \"asc\":\n\t\t\tfor k, v := c.Last(); k != nil; k, v = c.Prev() {\n\t\t\t\tif cur < end {\n\t\t\t\t\tcur++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif cur >= start {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif i >= opts.Count {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tposts = append(posts, v)\n\t\t\t\ti++\n\t\t\t\tcur++\n\t\t\t}\n\n\t\tcase \"desc\":\n\t\t\tfor k, v := c.First(); k != nil; k, v = c.Next() {\n\t\t\t\tif cur < start {\n\t\t\t\t\tcur++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif cur >= end {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif i >= opts.Count {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tposts = append(posts, v)\n\t\t\t\ti++\n\t\t\t\tcur++\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\t\/\/ if opts.order == \"asc\" {\n\t\/\/ \tposts = []json.RawMessage{}\n\t\/\/ \tfor i := len(posts) - 1; i >= 0; i-- {\n\t\/\/ \t\tposts = append(all, posts[i])\n\t\/\/ \t}\n\t\/\/ }\n\n\treturn posts\n}\n\n\/\/ SortContent sorts all content of the type supplied as the namespace by time,\n\/\/ in descending order, from most recent to least recent\n\/\/ Should be called from a goroutine after SetContent is successful\nfunc SortContent(namespace string) {\n\t\/\/ only sort main content types i.e. Post\n\tif strings.Contains(namespace, \"_\") {\n\t\treturn\n\t}\n\n\tall := ContentAll(namespace)\n\n\tvar posts sortablePosts\n\t\/\/ decode each (json) into type to then sort\n\tfor i := range all {\n\t\tj := all[i]\n\t\tpost := content.Types[namespace]()\n\n\t\terr := json.Unmarshal(j, &post)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error decoding json while sorting\", namespace, \":\", err)\n\t\t\treturn\n\t\t}\n\n\t\tposts = append(posts, post.(editor.Sortable))\n\t}\n\n\t\/\/ sort posts\n\tsort.Sort(posts)\n\n\t\/\/ store in _sorted bucket, first delete existing\n\terr := store.Update(func(tx *bolt.Tx) error {\n\t\tbname := []byte(namespace + \"_sorted\")\n\t\terr := tx.DeleteBucket(bname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tb, err := tx.CreateBucketIfNotExists(bname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ encode to json and store as 'i:post.Time()':post\n\t\tfor i := range posts {\n\t\t\tj, err := json.Marshal(posts[i])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcid := fmt.Sprintf(\"%d:%d\", i, posts[i].Time())\n\t\t\terr = b.Put([]byte(cid), j)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Println(\"Error while updating db with sorted\", namespace, err)\n\t}\n\n}\n\ntype sortablePosts []editor.Sortable\n\nfunc (s sortablePosts) Len() int {\n\treturn len(s)\n}\n\nfunc (s sortablePosts) Less(i, j int) bool {\n\treturn s[i].Time() > s[j].Time()\n}\n\nfunc (s sortablePosts) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\nfunc postToJSON(ns string, data url.Values) ([]byte, error) {\n\t\/\/ find the content type and decode values into it\n\tns = strings.TrimSuffix(ns, \"_external\")\n\tt, ok := content.Types[ns]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(content.ErrTypeNotRegistered, ns)\n\t}\n\tpost := t()\n\n\tdec := schema.NewDecoder()\n\tdec.SetAliasTag(\"json\") \/\/ allows simpler struct tagging when creating a content type\n\tdec.IgnoreUnknownKeys(true) \/\/ will skip over form values submitted, but not in struct\n\terr := dec.Decode(post, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tslug, err := manager.Slug(post.(editor.Editable))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpost.(editor.Editable).SetSlug(slug)\n\n\t\/\/ marshall content struct to json for db storage\n\tj, err := json.Marshal(post)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn j, nil\n}\n<|endoftext|>"} {"text":"\/*\n Copyright 2014 CoreOS, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage transport\n\nimport (\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestWriteReadTimeoutListener(t *testing.T) {\n\tln, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected listen error: %v\", err)\n\t}\n\twln := rwTimeoutListener{\n\t\tListener: ln,\n\t\twtimeoutd: 10 * time.Millisecond,\n\t\trdtimeoutd: 10 * time.Millisecond,\n\t}\n\tstop := make(chan struct{})\n\n\tblocker := func() {\n\t\tconn, err := net.Dial(\"tcp\", ln.Addr().String())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected dail error: %v\", err)\n\t\t}\n\t\tdefer conn.Close()\n\t\t\/\/ block the receiver until the writer timeout\n\t\t<-stop\n\t}\n\tgo blocker()\n\n\tconn, err := wln.Accept()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected accept error: %v\", err)\n\t}\n\tdefer conn.Close()\n\n\t\/\/ fill the socket buffer\n\tdata := make([]byte, 5*1024*1024)\n\ttimer := time.AfterFunc(wln.wtimeoutd*5, func() {\n\t\tt.Fatal(\"wait timeout\")\n\t})\n\tdefer timer.Stop()\n\n\t_, err = conn.Write(data)\n\tif operr, ok := err.(*net.OpError); !ok || operr.Op != \"write\" || !operr.Timeout() {\n\t\tt.Errorf(\"err = %v, want write i\/o timeout error\", err)\n\t}\n\tstop <- struct{}{}\n\n\ttimer.Reset(wln.rdtimeoutd * 5)\n\tgo blocker()\n\n\tconn, err = wln.Accept()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected accept error: %v\", err)\n\t}\n\tbuf := make([]byte, 10)\n\t_, err = conn.Read(buf)\n\tif operr, ok := err.(*net.OpError); !ok || operr.Op != \"read\" || !operr.Timeout() {\n\t\tt.Errorf(\"err = %v, want write i\/o timeout error\", err)\n\t}\n\tstop <- struct{}{}\n}\npkg\/transport: add NewTimeoutListener test\/*\n Copyright 2014 CoreOS, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage transport\n\nimport (\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n)\n\n\/\/ TestNewTimeoutListener tests that NewTimeoutListener returns a\n\/\/ rwTimeoutListener struct with timeouts set.\nfunc TestNewTimeoutListener(t *testing.T) {\n\tl, err := NewTimeoutListener(\":0\", \"http\", TLSInfo{}, time.Hour, time.Hour)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected NewTimeoutListener error: %v\", err)\n\t}\n\tdefer l.Close()\n\ttln := l.(*rwTimeoutListener)\n\tif tln.rdtimeoutd != time.Hour {\n\t\tt.Errorf(\"read timeout = %s, want %s\", tln.rdtimeoutd, time.Hour)\n\t}\n\tif tln.wtimeoutd != time.Hour {\n\t\tt.Errorf(\"write timeout = %s, want %s\", tln.wtimeoutd, time.Hour)\n\t}\n}\n\nfunc TestWriteReadTimeoutListener(t *testing.T) {\n\tln, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected listen error: %v\", err)\n\t}\n\twln := rwTimeoutListener{\n\t\tListener: ln,\n\t\twtimeoutd: 10 * time.Millisecond,\n\t\trdtimeoutd: 10 * time.Millisecond,\n\t}\n\tstop := make(chan struct{})\n\n\tblocker := func() {\n\t\tconn, err := net.Dial(\"tcp\", ln.Addr().String())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected dail error: %v\", err)\n\t\t}\n\t\tdefer conn.Close()\n\t\t\/\/ block the receiver until the writer timeout\n\t\t<-stop\n\t}\n\tgo blocker()\n\n\tconn, err := wln.Accept()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected accept error: %v\", err)\n\t}\n\tdefer conn.Close()\n\n\t\/\/ fill the socket buffer\n\tdata := make([]byte, 5*1024*1024)\n\ttimer := time.AfterFunc(wln.wtimeoutd*5, func() {\n\t\tt.Fatal(\"wait timeout\")\n\t})\n\tdefer timer.Stop()\n\n\t_, err = conn.Write(data)\n\tif operr, ok := err.(*net.OpError); !ok || operr.Op != \"write\" || !operr.Timeout() {\n\t\tt.Errorf(\"err = %v, want write i\/o timeout error\", err)\n\t}\n\tstop <- struct{}{}\n\n\ttimer.Reset(wln.rdtimeoutd * 5)\n\tgo blocker()\n\n\tconn, err = wln.Accept()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected accept error: %v\", err)\n\t}\n\tbuf := make([]byte, 10)\n\t_, err = conn.Read(buf)\n\tif operr, ok := err.(*net.OpError); !ok || operr.Op != \"read\" || !operr.Timeout() {\n\t\tt.Errorf(\"err = %v, want write i\/o timeout error\", err)\n\t}\n\tstop <- struct{}{}\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2020 The cert-manager Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage vault\n\nimport (\n\t\"crypto\/x509\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\tvault \"github.com\/hashicorp\/vault\/api\"\n\t\"github.com\/hashicorp\/vault\/sdk\/helper\/certutil\"\n\tcorelisters \"k8s.io\/client-go\/listers\/core\/v1\"\n\n\tv1 \"github.com\/jetstack\/cert-manager\/pkg\/apis\/certmanager\/v1\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/util\/pki\"\n)\n\nvar _ Interface = &Vault{}\n\n\/\/ ClientBuilder is a function type that returns a new Interface.\n\/\/ Can be used in tests to create a mock signer of Vault certificate requests.\ntype ClientBuilder func(namespace string, secretsLister corelisters.SecretLister,\n\tissuer v1.GenericIssuer) (Interface, error)\n\n\/\/ Interface implements various high level functionality related to connecting\n\/\/ with a Vault server, verifying its status and signing certificate request for\n\/\/ Vault's certificate.\n\/\/ TODO: Sys() is duplicated here and in Client interface\ntype Interface interface {\n\tSign(csrPEM []byte, duration time.Duration) (certPEM []byte, caPEM []byte, err error)\n\tSys() *vault.Sys\n\tIsVaultInitializedAndUnsealed() error\n}\n\n\/\/ Client implements functionality to talk to a Vault server.\ntype Client interface {\n\tNewRequest(method, requestPath string) *vault.Request\n\tRawRequest(r *vault.Request) (*vault.Response, error)\n\tSetToken(v string)\n\tToken() string\n\tSys() *vault.Sys\n}\n\n\/\/ Vault implements Interface and holds a Vault issuer, secrets lister and a\n\/\/ Vault client.\ntype Vault struct {\n\tsecretsLister corelisters.SecretLister\n\tissuer v1.GenericIssuer\n\tnamespace string\n\n\tclient Client\n}\n\n\/\/ New returns a new Vault instance with the given namespace, issuer and\n\/\/ secrets lister.\n\/\/ Returned errors may be network failures and should be considered for\n\/\/ retrying.\nfunc New(namespace string, secretsLister corelisters.SecretLister, issuer v1.GenericIssuer) (Interface, error) {\n\tv := &Vault{\n\t\tsecretsLister: secretsLister,\n\t\tnamespace: namespace,\n\t\tissuer: issuer,\n\t}\n\n\tcfg, err := v.newConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient, err := vault.NewClient(cfg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error initializing Vault client: %s\", err.Error())\n\t}\n\n\tif err := v.setToken(client); err != nil {\n\t\treturn nil, err\n\t}\n\n\tv.client = client\n\n\treturn v, nil\n}\n\n\/\/ Sign will connect to a Vault instance to sign a certificate signing request.\nfunc (v *Vault) Sign(csrPEM []byte, duration time.Duration) (cert []byte, ca []byte, err error) {\n\tcsr, err := pki.DecodeX509CertificateRequestBytes(csrPEM)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to decode CSR for signing: %s\", err)\n\t}\n\n\tparameters := map[string]string{\n\t\t\"common_name\": csr.Subject.CommonName,\n\t\t\"alt_names\": strings.Join(csr.DNSNames, \",\"),\n\t\t\"ip_sans\": strings.Join(pki.IPAddressesToString(csr.IPAddresses), \",\"),\n\t\t\"uri_sans\": strings.Join(pki.URLsToString(csr.URIs), \",\"),\n\t\t\"ttl\": duration.String(),\n\t\t\"csr\": string(csrPEM),\n\n\t\t\"exclude_cn_from_sans\": \"true\",\n\t}\n\n\tvaultIssuer := v.issuer.GetSpec().Vault\n\turl := path.Join(\"\/v1\", vaultIssuer.Path)\n\n\trequest := v.client.NewRequest(\"POST\", url)\n\n\tv.addVaultNamespaceToRequest(request)\n\n\tif err := request.SetJSONBody(parameters); err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to build vault request: %s\", err)\n\t}\n\n\tresp, err := v.client.RawRequest(request)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to sign certificate by vault: %s\", err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\tvaultResult := certutil.Secret{}\n\terr = resp.DecodeJSON(&vaultResult)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to decode response returned by vault: %s\", err)\n\t}\n\n\treturn extractCertificatesFromVaultCertificateSecret(&vaultResult)\n}\n\nfunc (v *Vault) setToken(client Client) error {\n\ttokenRef := v.issuer.GetSpec().Vault.Auth.TokenSecretRef\n\tif tokenRef != nil {\n\t\ttoken, err := v.tokenRef(tokenRef.Name, v.namespace, tokenRef.Key)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tclient.SetToken(token)\n\n\t\treturn nil\n\t}\n\n\tappRole := v.issuer.GetSpec().Vault.Auth.AppRole\n\tif appRole != nil {\n\t\ttoken, err := v.requestTokenWithAppRoleRef(client, appRole)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tclient.SetToken(token)\n\n\t\treturn nil\n\t}\n\n\tkubernetesAuth := v.issuer.GetSpec().Vault.Auth.Kubernetes\n\tif kubernetesAuth != nil {\n\t\ttoken, err := v.requestTokenWithKubernetesAuth(client, kubernetesAuth)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error reading Kubernetes service account token from %s: %s\", kubernetesAuth.SecretRef.Name, err.Error())\n\t\t}\n\t\tclient.SetToken(token)\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"error initializing Vault client: tokenSecretRef, appRoleSecretRef, or Kubernetes auth role not set\")\n}\n\nfunc (v *Vault) newConfig() (*vault.Config, error) {\n\tcfg := vault.DefaultConfig()\n\tcfg.Address = v.issuer.GetSpec().Vault.Server\n\n\tcerts := v.issuer.GetSpec().Vault.CABundle\n\tif len(certs) == 0 {\n\t\treturn cfg, nil\n\t}\n\n\tcaCertPool := x509.NewCertPool()\n\tok := caCertPool.AppendCertsFromPEM(certs)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"error loading Vault CA bundle\")\n\t}\n\n\tcfg.HttpClient.Transport.(*http.Transport).TLSClientConfig.RootCAs = caCertPool\n\n\treturn cfg, nil\n}\n\nfunc (v *Vault) tokenRef(name, namespace, key string) (string, error) {\n\tsecret, err := v.secretsLister.Secrets(namespace).Get(name)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif key == \"\" {\n\t\tkey = v1.DefaultVaultTokenAuthSecretKey\n\t}\n\n\tkeyBytes, ok := secret.Data[key]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"no data for %q in secret '%s\/%s'\", key, name, namespace)\n\t}\n\n\ttoken := string(keyBytes)\n\ttoken = strings.TrimSpace(token)\n\n\treturn token, nil\n}\n\nfunc (v *Vault) appRoleRef(appRole *v1.VaultAppRole) (roleId, secretId string, err error) {\n\troleId = strings.TrimSpace(appRole.RoleId)\n\n\tsecret, err := v.secretsLister.Secrets(v.namespace).Get(appRole.SecretRef.Name)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tkey := appRole.SecretRef.Key\n\n\tkeyBytes, ok := secret.Data[key]\n\tif !ok {\n\t\treturn \"\", \"\", fmt.Errorf(\"no data for %q in secret '%s\/%s'\", key, v.namespace, appRole.SecretRef.Name)\n\t}\n\n\tsecretId = string(keyBytes)\n\tsecretId = strings.TrimSpace(secretId)\n\n\treturn roleId, secretId, nil\n}\n\nfunc (v *Vault) requestTokenWithAppRoleRef(client Client, appRole *v1.VaultAppRole) (string, error) {\n\troleId, secretId, err := v.appRoleRef(appRole)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tparameters := map[string]string{\n\t\t\"role_id\": roleId,\n\t\t\"secret_id\": secretId,\n\t}\n\n\tauthPath := appRole.Path\n\tif authPath == \"\" {\n\t\tauthPath = \"approle\"\n\t}\n\n\turl := path.Join(\"\/v1\", \"auth\", authPath, \"login\")\n\n\trequest := client.NewRequest(\"POST\", url)\n\n\terr = request.SetJSONBody(parameters)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error encoding Vault parameters: %s\", err.Error())\n\t}\n\n\tv.addVaultNamespaceToRequest(request)\n\n\tresp, err := client.RawRequest(request)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error logging in to Vault server: %s\", err.Error())\n\t}\n\n\tdefer resp.Body.Close()\n\n\tvaultResult := vault.Secret{}\n\tif err := resp.DecodeJSON(&vaultResult); err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to decode JSON payload: %s\", err.Error())\n\t}\n\n\ttoken, err := vaultResult.TokenID()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to read token: %s\", err.Error())\n\t}\n\n\tif token == \"\" {\n\t\treturn \"\", errors.New(\"no token returned\")\n\t}\n\n\treturn token, nil\n}\n\nfunc (v *Vault) requestTokenWithKubernetesAuth(client Client, kubernetesAuth *v1.VaultKubernetesAuth) (string, error) {\n\tsecret, err := v.secretsLister.Secrets(v.namespace).Get(kubernetesAuth.SecretRef.Name)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkey := kubernetesAuth.SecretRef.Key\n\tif key == \"\" {\n\t\tkey = v1.DefaultVaultTokenAuthSecretKey\n\t}\n\n\tkeyBytes, ok := secret.Data[key]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"no data for %q in secret '%s\/%s'\", key, v.namespace, kubernetesAuth.SecretRef.Name)\n\t}\n\n\tjwt := string(keyBytes)\n\n\tparameters := map[string]string{\n\t\t\"role\": kubernetesAuth.Role,\n\t\t\"jwt\": jwt,\n\t}\n\n\tmountPath := kubernetesAuth.Path\n\tif mountPath == \"\" {\n\t\tmountPath = v1.DefaultVaultKubernetesAuthMountPath\n\t}\n\n\turl := filepath.Join(mountPath, \"login\")\n\trequest := client.NewRequest(\"POST\", url)\n\terr = request.SetJSONBody(parameters)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error encoding Vault parameters: %s\", err.Error())\n\t}\n\n\tv.addVaultNamespaceToRequest(request)\n\n\tresp, err := client.RawRequest(request)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error calling Vault server: %s\", err.Error())\n\t}\n\n\tdefer resp.Body.Close()\n\tvaultResult := vault.Secret{}\n\terr = resp.DecodeJSON(&vaultResult)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to decode JSON payload: %s\", err.Error())\n\t}\n\n\ttoken, err := vaultResult.TokenID()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to read token: %s\", err.Error())\n\t}\n\n\treturn token, nil\n}\n\nfunc (v *Vault) Sys() *vault.Sys {\n\treturn v.client.Sys()\n}\n\nfunc extractCertificatesFromVaultCertificateSecret(secret *certutil.Secret) ([]byte, []byte, error) {\n\tparsedBundle, err := certutil.ParsePKIMap(secret.Data)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to decode response returned by vault: %s\", err)\n\t}\n\n\tvbundle, err := parsedBundle.ToCertBundle()\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"unable to convert certificate bundle to PEM bundle: %s\", err.Error())\n\t}\n\n\tbundle, err := pki.ParseSingleCertificateChainPEM([]byte(\n\t\tstrings.Join(append(\n\t\t\tvbundle.CAChain,\n\t\t\tvbundle.IssuingCA,\n\t\t\tvbundle.Certificate,\n\t\t), \"\\n\")))\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to parse certificate chain from vault: %w\", err)\n\t}\n\n\treturn bundle.ChainPEM, bundle.CAPEM, nil\n}\n\nfunc (v *Vault) IsVaultInitializedAndUnsealed() error {\n\thealthURL := path.Join(\"\/v1\", \"sys\", \"health\")\n\thealthRequest := v.client.NewRequest(\"GET\", healthURL)\n\thealthResp, err := v.client.RawRequest(healthRequest)\n\t\/\/ 429 = if unsealed and standby\n\t\/\/ 472 = if disaster recovery mode replication secondary and active\n\t\/\/ 473 = if performance standby\n\tif err != nil && healthResp.StatusCode != 429 && healthResp.StatusCode != 472 && healthResp.StatusCode != 473 {\n\t\treturn err\n\t}\n\tdefer healthResp.Body.Close()\n\treturn nil\n}\n\nfunc (v *Vault) addVaultNamespaceToRequest(request *vault.Request) {\n\tvaultIssuer := v.issuer.GetSpec().Vault\n\tif vaultIssuer != nil && vaultIssuer.Namespace != \"\" {\n\t\tif request.Headers != nil {\n\t\t\trequest.Headers.Add(\"X-VAULT-NAMESPACE\", vaultIssuer.Namespace)\n\t\t} else {\n\t\t\tvaultReqHeaders := http.Header{}\n\t\t\tvaultReqHeaders.Add(\"X-VAULT-NAMESPACE\", vaultIssuer.Namespace)\n\t\t\trequest.Headers = vaultReqHeaders\n\t\t}\n\t}\n}\nVault internal client should check health conn err before checking response status\/*\nCopyright 2020 The cert-manager Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage vault\n\nimport (\n\t\"crypto\/x509\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\tvault \"github.com\/hashicorp\/vault\/api\"\n\t\"github.com\/hashicorp\/vault\/sdk\/helper\/certutil\"\n\tcorelisters \"k8s.io\/client-go\/listers\/core\/v1\"\n\n\tv1 \"github.com\/jetstack\/cert-manager\/pkg\/apis\/certmanager\/v1\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/util\/pki\"\n)\n\nvar _ Interface = &Vault{}\n\n\/\/ ClientBuilder is a function type that returns a new Interface.\n\/\/ Can be used in tests to create a mock signer of Vault certificate requests.\ntype ClientBuilder func(namespace string, secretsLister corelisters.SecretLister,\n\tissuer v1.GenericIssuer) (Interface, error)\n\n\/\/ Interface implements various high level functionality related to connecting\n\/\/ with a Vault server, verifying its status and signing certificate request for\n\/\/ Vault's certificate.\n\/\/ TODO: Sys() is duplicated here and in Client interface\ntype Interface interface {\n\tSign(csrPEM []byte, duration time.Duration) (certPEM []byte, caPEM []byte, err error)\n\tSys() *vault.Sys\n\tIsVaultInitializedAndUnsealed() error\n}\n\n\/\/ Client implements functionality to talk to a Vault server.\ntype Client interface {\n\tNewRequest(method, requestPath string) *vault.Request\n\tRawRequest(r *vault.Request) (*vault.Response, error)\n\tSetToken(v string)\n\tToken() string\n\tSys() *vault.Sys\n}\n\n\/\/ Vault implements Interface and holds a Vault issuer, secrets lister and a\n\/\/ Vault client.\ntype Vault struct {\n\tsecretsLister corelisters.SecretLister\n\tissuer v1.GenericIssuer\n\tnamespace string\n\n\tclient Client\n}\n\n\/\/ New returns a new Vault instance with the given namespace, issuer and\n\/\/ secrets lister.\n\/\/ Returned errors may be network failures and should be considered for\n\/\/ retrying.\nfunc New(namespace string, secretsLister corelisters.SecretLister, issuer v1.GenericIssuer) (Interface, error) {\n\tv := &Vault{\n\t\tsecretsLister: secretsLister,\n\t\tnamespace: namespace,\n\t\tissuer: issuer,\n\t}\n\n\tcfg, err := v.newConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient, err := vault.NewClient(cfg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error initializing Vault client: %s\", err.Error())\n\t}\n\n\tif err := v.setToken(client); err != nil {\n\t\treturn nil, err\n\t}\n\n\tv.client = client\n\n\treturn v, nil\n}\n\n\/\/ Sign will connect to a Vault instance to sign a certificate signing request.\nfunc (v *Vault) Sign(csrPEM []byte, duration time.Duration) (cert []byte, ca []byte, err error) {\n\tcsr, err := pki.DecodeX509CertificateRequestBytes(csrPEM)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to decode CSR for signing: %s\", err)\n\t}\n\n\tparameters := map[string]string{\n\t\t\"common_name\": csr.Subject.CommonName,\n\t\t\"alt_names\": strings.Join(csr.DNSNames, \",\"),\n\t\t\"ip_sans\": strings.Join(pki.IPAddressesToString(csr.IPAddresses), \",\"),\n\t\t\"uri_sans\": strings.Join(pki.URLsToString(csr.URIs), \",\"),\n\t\t\"ttl\": duration.String(),\n\t\t\"csr\": string(csrPEM),\n\n\t\t\"exclude_cn_from_sans\": \"true\",\n\t}\n\n\tvaultIssuer := v.issuer.GetSpec().Vault\n\turl := path.Join(\"\/v1\", vaultIssuer.Path)\n\n\trequest := v.client.NewRequest(\"POST\", url)\n\n\tv.addVaultNamespaceToRequest(request)\n\n\tif err := request.SetJSONBody(parameters); err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to build vault request: %s\", err)\n\t}\n\n\tresp, err := v.client.RawRequest(request)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to sign certificate by vault: %s\", err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\tvaultResult := certutil.Secret{}\n\terr = resp.DecodeJSON(&vaultResult)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to decode response returned by vault: %s\", err)\n\t}\n\n\treturn extractCertificatesFromVaultCertificateSecret(&vaultResult)\n}\n\nfunc (v *Vault) setToken(client Client) error {\n\ttokenRef := v.issuer.GetSpec().Vault.Auth.TokenSecretRef\n\tif tokenRef != nil {\n\t\ttoken, err := v.tokenRef(tokenRef.Name, v.namespace, tokenRef.Key)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tclient.SetToken(token)\n\n\t\treturn nil\n\t}\n\n\tappRole := v.issuer.GetSpec().Vault.Auth.AppRole\n\tif appRole != nil {\n\t\ttoken, err := v.requestTokenWithAppRoleRef(client, appRole)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tclient.SetToken(token)\n\n\t\treturn nil\n\t}\n\n\tkubernetesAuth := v.issuer.GetSpec().Vault.Auth.Kubernetes\n\tif kubernetesAuth != nil {\n\t\ttoken, err := v.requestTokenWithKubernetesAuth(client, kubernetesAuth)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error reading Kubernetes service account token from %s: %s\", kubernetesAuth.SecretRef.Name, err.Error())\n\t\t}\n\t\tclient.SetToken(token)\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"error initializing Vault client: tokenSecretRef, appRoleSecretRef, or Kubernetes auth role not set\")\n}\n\nfunc (v *Vault) newConfig() (*vault.Config, error) {\n\tcfg := vault.DefaultConfig()\n\tcfg.Address = v.issuer.GetSpec().Vault.Server\n\n\tcerts := v.issuer.GetSpec().Vault.CABundle\n\tif len(certs) == 0 {\n\t\treturn cfg, nil\n\t}\n\n\tcaCertPool := x509.NewCertPool()\n\tok := caCertPool.AppendCertsFromPEM(certs)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"error loading Vault CA bundle\")\n\t}\n\n\tcfg.HttpClient.Transport.(*http.Transport).TLSClientConfig.RootCAs = caCertPool\n\n\treturn cfg, nil\n}\n\nfunc (v *Vault) tokenRef(name, namespace, key string) (string, error) {\n\tsecret, err := v.secretsLister.Secrets(namespace).Get(name)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif key == \"\" {\n\t\tkey = v1.DefaultVaultTokenAuthSecretKey\n\t}\n\n\tkeyBytes, ok := secret.Data[key]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"no data for %q in secret '%s\/%s'\", key, name, namespace)\n\t}\n\n\ttoken := string(keyBytes)\n\ttoken = strings.TrimSpace(token)\n\n\treturn token, nil\n}\n\nfunc (v *Vault) appRoleRef(appRole *v1.VaultAppRole) (roleId, secretId string, err error) {\n\troleId = strings.TrimSpace(appRole.RoleId)\n\n\tsecret, err := v.secretsLister.Secrets(v.namespace).Get(appRole.SecretRef.Name)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tkey := appRole.SecretRef.Key\n\n\tkeyBytes, ok := secret.Data[key]\n\tif !ok {\n\t\treturn \"\", \"\", fmt.Errorf(\"no data for %q in secret '%s\/%s'\", key, v.namespace, appRole.SecretRef.Name)\n\t}\n\n\tsecretId = string(keyBytes)\n\tsecretId = strings.TrimSpace(secretId)\n\n\treturn roleId, secretId, nil\n}\n\nfunc (v *Vault) requestTokenWithAppRoleRef(client Client, appRole *v1.VaultAppRole) (string, error) {\n\troleId, secretId, err := v.appRoleRef(appRole)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tparameters := map[string]string{\n\t\t\"role_id\": roleId,\n\t\t\"secret_id\": secretId,\n\t}\n\n\tauthPath := appRole.Path\n\tif authPath == \"\" {\n\t\tauthPath = \"approle\"\n\t}\n\n\turl := path.Join(\"\/v1\", \"auth\", authPath, \"login\")\n\n\trequest := client.NewRequest(\"POST\", url)\n\n\terr = request.SetJSONBody(parameters)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error encoding Vault parameters: %s\", err.Error())\n\t}\n\n\tv.addVaultNamespaceToRequest(request)\n\n\tresp, err := client.RawRequest(request)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error logging in to Vault server: %s\", err.Error())\n\t}\n\n\tdefer resp.Body.Close()\n\n\tvaultResult := vault.Secret{}\n\tif err := resp.DecodeJSON(&vaultResult); err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to decode JSON payload: %s\", err.Error())\n\t}\n\n\ttoken, err := vaultResult.TokenID()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to read token: %s\", err.Error())\n\t}\n\n\tif token == \"\" {\n\t\treturn \"\", errors.New(\"no token returned\")\n\t}\n\n\treturn token, nil\n}\n\nfunc (v *Vault) requestTokenWithKubernetesAuth(client Client, kubernetesAuth *v1.VaultKubernetesAuth) (string, error) {\n\tsecret, err := v.secretsLister.Secrets(v.namespace).Get(kubernetesAuth.SecretRef.Name)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkey := kubernetesAuth.SecretRef.Key\n\tif key == \"\" {\n\t\tkey = v1.DefaultVaultTokenAuthSecretKey\n\t}\n\n\tkeyBytes, ok := secret.Data[key]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"no data for %q in secret '%s\/%s'\", key, v.namespace, kubernetesAuth.SecretRef.Name)\n\t}\n\n\tjwt := string(keyBytes)\n\n\tparameters := map[string]string{\n\t\t\"role\": kubernetesAuth.Role,\n\t\t\"jwt\": jwt,\n\t}\n\n\tmountPath := kubernetesAuth.Path\n\tif mountPath == \"\" {\n\t\tmountPath = v1.DefaultVaultKubernetesAuthMountPath\n\t}\n\n\turl := filepath.Join(mountPath, \"login\")\n\trequest := client.NewRequest(\"POST\", url)\n\terr = request.SetJSONBody(parameters)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error encoding Vault parameters: %s\", err.Error())\n\t}\n\n\tv.addVaultNamespaceToRequest(request)\n\n\tresp, err := client.RawRequest(request)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error calling Vault server: %s\", err.Error())\n\t}\n\n\tdefer resp.Body.Close()\n\tvaultResult := vault.Secret{}\n\terr = resp.DecodeJSON(&vaultResult)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to decode JSON payload: %s\", err.Error())\n\t}\n\n\ttoken, err := vaultResult.TokenID()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to read token: %s\", err.Error())\n\t}\n\n\treturn token, nil\n}\n\nfunc (v *Vault) Sys() *vault.Sys {\n\treturn v.client.Sys()\n}\n\nfunc extractCertificatesFromVaultCertificateSecret(secret *certutil.Secret) ([]byte, []byte, error) {\n\tparsedBundle, err := certutil.ParsePKIMap(secret.Data)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to decode response returned by vault: %s\", err)\n\t}\n\n\tvbundle, err := parsedBundle.ToCertBundle()\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"unable to convert certificate bundle to PEM bundle: %s\", err.Error())\n\t}\n\n\tbundle, err := pki.ParseSingleCertificateChainPEM([]byte(\n\t\tstrings.Join(append(\n\t\t\tvbundle.CAChain,\n\t\t\tvbundle.IssuingCA,\n\t\t\tvbundle.Certificate,\n\t\t), \"\\n\")))\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to parse certificate chain from vault: %w\", err)\n\t}\n\n\treturn bundle.ChainPEM, bundle.CAPEM, nil\n}\n\nfunc (v *Vault) IsVaultInitializedAndUnsealed() error {\n\thealthURL := path.Join(\"\/v1\", \"sys\", \"health\")\n\thealthRequest := v.client.NewRequest(\"GET\", healthURL)\n\thealthResp, err := v.client.RawRequest(healthRequest)\n\n\tif healthResp != nil {\n\t\tdefer healthResp.Body.Close()\n\t}\n\n\t\/\/ 429 = if unsealed and standby\n\t\/\/ 472 = if disaster recovery mode replication secondary and active\n\t\/\/ 473 = if performance standby\n\tif err != nil {\n\t\tswitch {\n\t\tcase healthResp == nil:\n\t\t\treturn err\n\t\tcase healthResp.StatusCode == 429, healthResp.StatusCode == 472, healthResp.StatusCode == 473:\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"error calling Vault %s: %w\", healthURL, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (v *Vault) addVaultNamespaceToRequest(request *vault.Request) {\n\tvaultIssuer := v.issuer.GetSpec().Vault\n\tif vaultIssuer != nil && vaultIssuer.Namespace != \"\" {\n\t\tif request.Headers != nil {\n\t\t\trequest.Headers.Add(\"X-VAULT-NAMESPACE\", vaultIssuer.Namespace)\n\t\t} else {\n\t\t\tvaultReqHeaders := http.Header{}\n\t\t\tvaultReqHeaders.Add(\"X-VAULT-NAMESPACE\", vaultIssuer.Namespace)\n\t\t\trequest.Headers = vaultReqHeaders\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package cmd\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/nextgearcapital\/pepper\/pkg\/device42\"\n\t\"github.com\/nextgearcapital\/pepper\/pkg\/log\"\n\t\"github.com\/nextgearcapital\/pepper\/pkg\/salt\"\n\t\"github.com\/nextgearcapital\/pepper\/template\/vsphere\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tprofile string\n\troles string\n\tosTemplate string\n\tipam bool\n)\n\nfunc init() {\n\tRootCmd.AddCommand(deployCmd)\n\n\tdeployCmd.Flags().StringVarP(&profile, \"profile\", \"p\", \"\", \"Profile to generate and output to \/etc\/salt\/cloud.profiles.d for salt-cloud to use\")\n\tdeployCmd.Flags().StringVarP(&roles, \"roles\", \"r\", \"\", \"List of roles to assign to the host in D42 [eg: dcos,dcos-master]\")\n\tdeployCmd.Flags().StringVarP(&osTemplate, \"template\", \"t\", \"\", \"Which OS template you want to use [eg: Ubuntu, CentOS, someothertemplatename]\")\n\tdeployCmd.Flags().BoolVarP(&ipam, \"no-ipam\", \"\", false, \"Whether or not to use Device42 IPAM [This is only used internally]\")\n\tdeployCmd.Flags().BoolVarP(&log.IsDebugging, \"debug\", \"d\", false, \"Turn debugging on\")\n}\n\nvar deployCmd = &cobra.Command{\n\tUse: \"deploy\",\n\tShort: \"Deploy VM's via salt-cloud\",\n\tLong: `pepper is a wrapper around salt-cloud that will generate salt-cloud profiles based on information you provide in profile configs.\nProfile configs live in \"\/etc\/pepper\/config.d\/{platform}\/{environment}. Pepper is opinionated and looks at the profile you pass in as it's source\nof truth. For example: If you pass in \"vmware-dev-large\" as the profile, it will look for your profile config in \"\/etc\/pepper\/config.d\/vmware\/large.yaml\".\nThis allows for maximum flexibility due to the fact that everyone has different environments and may have some sort of naming scheme associated with them\nso Pepper makes no assumptions on that. Pepper does however make assumptions on your instance type. [eg: nano, micro, small, medium, etc] Although these\noptions are available to you, you are free to override them as you see fit.\nFor example:\n\nProvision new host web01 (Ubuntu) in the dev environment from the nano profile using vmware as a provider:\n\n$ pepper deploy -p vmware-dev-nano -t Ubuntu web01\n\nOr alternatively:\n\n$ pepper deploy --profile vmware-dev-nano --template Ubuntu web01\n\nProvision new host web02 (CentOS) in the prd environment from the large profile using vmware as a provider:\n\n$ pepper deploy -p vmware-prd-large -t CentOS web02\n\nProvision new host web03 (Ubuntu) in the uat environment from the hyper profile using vmware as a provider:\n\n$ pepper deploy -p vmware-uat-hyper -t Ubuntu web03\n\nAre you getting this yet?\n\n$ pepper deploy -p vmware-prd-mid -t Ubuntu -r dcos,dcos-master dcos01 dcos02 dcos03`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif profile == \"\" {\n\t\t\tlog.Die(\"You didn't specify a profile.\")\n\t\t} else if osTemplate == \"\" {\n\t\t\tlog.Die(\"You didn't specify an OS template.\")\n\t\t} else if len(args) == 0 {\n\t\t\tlog.Die(\"You didn't specify any hosts.\")\n\t\t}\n\n\t\tsplitProfile := strings.Split(profile, \"-\")\n\n\t\t\/\/ These will be the basis for how the profile gets generated.\n\t\tplatform := splitProfile[0]\n\t\tenvironment := splitProfile[1]\n\t\tinstancetype := splitProfile[2]\n\n\t\t\/\/ Nothing really gained here it just makes the code more readable.\n\t\thosts := args\n\n\t\tvar ipAddress string\n\t\tvar serviceLevel string\n\n\t\tfor _, host := range hosts {\n\t\t\tif ipam != true {\n\t\t\t\tif err := device42.ReadConfig(environment); err != nil {\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\t\/\/ Get a new IP\n\t\t\t\tnewIP, err := device42.GetNextIP(device42.IPRange)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\tipAddress = newIP\n\t\t\t\t\/\/ Create the Device\n\t\t\t\tif err := device42.CreateDevice(host, serviceLevel); err != nil {\n\t\t\t\t\tif err = device42.DeleteDevice(host); err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\t\/\/ Reserve IP\n\t\t\t\tif err := device42.ReserveIP(newIP, host); err != nil {\n\t\t\t\t\tif err = device42.CleanDeviceAndIP(newIP, host); err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\t\/\/ Update custom fields\n\t\t\t\tif err := device42.UpdateCustomFields(host, \"roles\", roles); err != nil {\n\t\t\t\t\tif err = device42.CleanDeviceAndIP(newIP, host); err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tswitch platform {\n\t\t\tcase \"vmware\":\n\t\t\t\tvar vsphere vsphere.ProfileConfig\n\t\t\t\tif err := vsphere.Prepare(platform, environment, instancetype, osTemplate, ipAddress); err != nil {\n\t\t\t\t\tif err = device42.CleanDeviceAndIP(ipAddress, host); err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\tif err := vsphere.Generate(); err != nil {\n\t\t\t\t\tif err = device42.CleanDeviceAndIP(ipAddress, host); err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\tif err := salt.Provision(profile, host); err != nil {\n\t\t\t\t\tif err = device42.CleanDeviceAndIP(ipAddress, host); err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\tif err := vsphere.Remove(); err != nil {\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tlog.Die(\"I don't recognize this platform!\")\n\t\t\t}\n\t\t}\n\t},\n}\nKeep variable consistentpackage cmd\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/nextgearcapital\/pepper\/pkg\/device42\"\n\t\"github.com\/nextgearcapital\/pepper\/pkg\/log\"\n\t\"github.com\/nextgearcapital\/pepper\/pkg\/salt\"\n\t\"github.com\/nextgearcapital\/pepper\/template\/vsphere\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tprofile string\n\troles string\n\tosTemplate string\n\tipam bool\n)\n\nfunc init() {\n\tRootCmd.AddCommand(deployCmd)\n\n\tdeployCmd.Flags().StringVarP(&profile, \"profile\", \"p\", \"\", \"Profile to generate and output to \/etc\/salt\/cloud.profiles.d for salt-cloud to use\")\n\tdeployCmd.Flags().StringVarP(&roles, \"roles\", \"r\", \"\", \"List of roles to assign to the host in D42 [eg: dcos,dcos-master]\")\n\tdeployCmd.Flags().StringVarP(&osTemplate, \"template\", \"t\", \"\", \"Which OS template you want to use [eg: Ubuntu, CentOS, someothertemplatename]\")\n\tdeployCmd.Flags().BoolVarP(&ipam, \"no-ipam\", \"\", false, \"Whether or not to use Device42 IPAM [This is only used internally]\")\n\tdeployCmd.Flags().BoolVarP(&log.IsDebugging, \"debug\", \"d\", false, \"Turn debugging on\")\n}\n\nvar deployCmd = &cobra.Command{\n\tUse: \"deploy\",\n\tShort: \"Deploy VM's via salt-cloud\",\n\tLong: `pepper is a wrapper around salt-cloud that will generate salt-cloud profiles based on information you provide in profile configs.\nProfile configs live in \"\/etc\/pepper\/config.d\/{platform}\/{environment}. Pepper is opinionated and looks at the profile you pass in as it's source\nof truth. For example: If you pass in \"vmware-dev-large\" as the profile, it will look for your profile config in \"\/etc\/pepper\/config.d\/vmware\/large.yaml\".\nThis allows for maximum flexibility due to the fact that everyone has different environments and may have some sort of naming scheme associated with them\nso Pepper makes no assumptions on that. Pepper does however make assumptions on your instance type. [eg: nano, micro, small, medium, etc] Although these\noptions are available to you, you are free to override them as you see fit.\nFor example:\n\nProvision new host web01 (Ubuntu) in the dev environment from the nano profile using vmware as a provider:\n\n$ pepper deploy -p vmware-dev-nano -t Ubuntu web01\n\nOr alternatively:\n\n$ pepper deploy --profile vmware-dev-nano --template Ubuntu web01\n\nProvision new host web02 (CentOS) in the prd environment from the large profile using vmware as a provider:\n\n$ pepper deploy -p vmware-prd-large -t CentOS web02\n\nProvision new host web03 (Ubuntu) in the uat environment from the hyper profile using vmware as a provider:\n\n$ pepper deploy -p vmware-uat-hyper -t Ubuntu web03\n\nAre you getting this yet?\n\n$ pepper deploy -p vmware-prd-mid -t Ubuntu -r dcos,dcos-master dcos01 dcos02 dcos03`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif profile == \"\" {\n\t\t\tlog.Die(\"You didn't specify a profile.\")\n\t\t} else if osTemplate == \"\" {\n\t\t\tlog.Die(\"You didn't specify an OS template.\")\n\t\t} else if len(args) == 0 {\n\t\t\tlog.Die(\"You didn't specify any hosts.\")\n\t\t}\n\n\t\tsplitProfile := strings.Split(profile, \"-\")\n\n\t\t\/\/ These will be the basis for how the profile gets generated.\n\t\tplatform := splitProfile[0]\n\t\tenvironment := splitProfile[1]\n\t\tinstancetype := splitProfile[2]\n\n\t\t\/\/ Nothing really gained here it just makes the code more readable.\n\t\thosts := args\n\n\t\tvar ipAddress string\n\t\tvar serviceLevel string\n\n\t\tfor _, host := range hosts {\n\t\t\tif ipam != true {\n\t\t\t\tif err := device42.ReadConfig(environment); err != nil {\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\t\/\/ Get a new IP\n\t\t\t\tnewIP, err := device42.GetNextIP(device42.IPRange)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\tipAddress = newIP\n\t\t\t\t\/\/ Create the Device\n\t\t\t\tif err := device42.CreateDevice(host, serviceLevel); err != nil {\n\t\t\t\t\tif err = device42.DeleteDevice(host); err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\t\/\/ Reserve IP\n\t\t\t\tif err := device42.ReserveIP(ipAddress, host); err != nil {\n\t\t\t\t\tif err = device42.CleanDeviceAndIP(ipAddress, host); err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\t\/\/ Update custom fields\n\t\t\t\tif err := device42.UpdateCustomFields(host, \"roles\", roles); err != nil {\n\t\t\t\t\tif err = device42.CleanDeviceAndIP(ipAddress, host); err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tswitch platform {\n\t\t\tcase \"vmware\":\n\t\t\t\tvar vsphere vsphere.ProfileConfig\n\t\t\t\tif err := vsphere.Prepare(platform, environment, instancetype, osTemplate, ipAddress); err != nil {\n\t\t\t\t\tif err = device42.CleanDeviceAndIP(ipAddress, host); err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\tif err := vsphere.Generate(); err != nil {\n\t\t\t\t\tif err = device42.CleanDeviceAndIP(ipAddress, host); err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\tif err := salt.Provision(profile, host); err != nil {\n\t\t\t\t\tif err = device42.CleanDeviceAndIP(ipAddress, host); err != nil {\n\t\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\t\tif err := vsphere.Remove(); err != nil {\n\t\t\t\t\tlog.Die(\"%s\", err)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tlog.Die(\"I don't recognize this platform!\")\n\t\t\t}\n\t\t}\n\t},\n}\n<|endoftext|>"} {"text":"package lwcache\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestSetAndGet(t *testing.T) {\n\tassert := assert.New(t)\n\tc := New(\"test-1\")\n\n\tkey := \"test-1\"\n\texpect := \"test-1-value\"\n\n\tc.Set(key, expect, 500*time.Millisecond)\n\tactual, ok := c.Get(key)\n\tassert.True(ok)\n\tassert.Equal(expect, actual)\n\n\t\/\/ after expire\n\ttime.Sleep(550 * time.Millisecond)\n\tactual, ok = c.Get(key)\n\tassert.False(ok)\n\tassert.Equal(nil, actual)\n}\n\nfunc TestSetExpire(t *testing.T) {\n\tassert := assert.New(t)\n\tc := New(\"test-2\")\n\n\tkey := \"test-2\"\n\texpect := \"test-1-value\"\n\n\tc.Set(key, expect, 500*time.Millisecond)\n\tactual, ok := c.Get(key)\n\tassert.True(ok)\n\tassert.Equal(expect, actual)\n\n\tc.SetExpire(key, 1*time.Second)\n\t\/\/ after first expiration\n\ttime.Sleep(550 * time.Millisecond)\n\tactual, ok = c.Get(key)\n\tassert.True(ok)\n\tassert.Equal(expect, actual)\n}\n\nfunc TestSetRefresher(t *testing.T) {\n\tassert := assert.New(t)\n\tc := New(\"test-4\")\n\n\tkey := \"test-4\"\n\texpect := 0\n\trefresher := func(c Cache, key interface{}, currentValue interface{}) (interface{}, error) {\n\t\tnum, ok := currentValue.(int)\n\t\tif ok {\n\t\t\treturn num + 1, nil\n\t\t}\n\t\treturn 0, errors.New(\"refresh failed\")\n\t}\n\n\tc.Set(key, expect, 10*time.Second)\n\tc.SetRefresher(refresher)\n\tc.StartRefresher(key, 1*time.Second)\n\n\tfor i := 0; i < 5; i++ {\n\t\tactual, ok := c.Get(key)\n\t\tassert.True(ok)\n\t\tassert.Equal(expect, actual, fmt.Sprintf(\"Test No. %d\", i+1))\n\t\ttime.Sleep(1050 * time.Millisecond)\n\t\texpect++\n\t}\n}\n\nfunc TestSetRefresher_OnExpired(t *testing.T) {\n\tassert := assert.New(t)\n\tc := New(\"test-5\")\n\n\tkey := \"test-5\"\n\texpect := 0\n\trefresher := func(c Cache, key interface{}, currentValue interface{}) (interface{}, error) {\n\t\tnum, ok := currentValue.(int)\n\t\tif ok {\n\t\t\treturn num + 1, nil\n\t\t}\n\t\treturn 0, errors.New(\"refresh failed\")\n\t}\n\n\tc.Set(key, expect, 2*time.Second)\n\tc.SetRefresher(refresher)\n\tc.StartRefresher(key, 1*time.Second)\n\n\tactual, ok := c.Get(key)\n\tassert.True(ok)\n\tassert.Equal(expect, actual)\n\ttime.Sleep(1500 * time.Millisecond)\n\n\t\/\/ refresh\n\tactual, ok = c.Get(key)\n\tassert.True(ok)\n\tassert.Equal(expect+1, actual)\n\ttime.Sleep(1500 * time.Millisecond)\n\n\t\/\/ expire\n\tactual, ok = c.Get(key)\n\tassert.False(ok)\n\tassert.Equal(nil, actual)\n}\n\nfunc TestSetRefresher_OnRefreshError(t *testing.T) {\n\tassert := assert.New(t)\n\tc := New(\"test-3\")\n\n\tkey := \"test-3\"\n\texpect := 0\n\trefresher := func(c Cache, key interface{}, currentValue interface{}) (interface{}, error) {\n\t\treturn 0, errors.New(\"refresh failed\")\n\t}\n\n\tc.Set(key, expect, 10*time.Second)\n\tc.SetRefresher(refresher)\n\tc.StartRefresher(key, 1*time.Second)\n\n\tfor i := 0; i < 5; i++ {\n\t\tactual, ok := c.Get(key)\n\t\tassert.True(ok)\n\t\tassert.Equal(expect, actual, fmt.Sprintf(\"Test No. %d\", i+1))\n\t\ttime.Sleep(1050 * time.Millisecond)\n\t}\n}\nmodify testpackage lwcache\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestSetAndGet(t *testing.T) {\n\tassert := assert.New(t)\n\tc := New(\"test-1\")\n\n\tkey := \"test-1\"\n\texpect := \"test-1-value\"\n\n\tc.Set(key, expect, 500*time.Millisecond)\n\tactual, ok := c.Get(key)\n\tassert.True(ok)\n\tassert.Equal(expect, actual)\n\n\t\/\/ after expire\n\ttime.Sleep(550 * time.Millisecond)\n\tactual, ok = c.Get(key)\n\tassert.False(ok)\n\tassert.Equal(nil, actual)\n}\n\nfunc TestSetExpire(t *testing.T) {\n\tassert := assert.New(t)\n\tc := New(\"test-2\")\n\n\tkey := \"test-2\"\n\texpect := \"test-1-value\"\n\n\tc.Set(key, expect, 500*time.Millisecond)\n\tactual, ok := c.Get(key)\n\tassert.True(ok)\n\tassert.Equal(expect, actual)\n\n\tc.SetExpire(key, 1*time.Second)\n\t\/\/ after first expiration\n\ttime.Sleep(550 * time.Millisecond)\n\tactual, ok = c.Get(key)\n\tassert.True(ok)\n\tassert.Equal(expect, actual)\n}\n\nfunc TestSetRefresher(t *testing.T) {\n\tassert := assert.New(t)\n\tc := New(\"test-4\")\n\n\tkey := \"test-4\"\n\texpect := 0\n\trefresher := func(c Cache, key interface{}, currentValue interface{}) (interface{}, error) {\n\t\tnum, ok := currentValue.(int)\n\t\tif ok {\n\t\t\treturn num + 1, nil\n\t\t}\n\t\treturn 0, errors.New(\"refresh failed\")\n\t}\n\n\tc.Set(key, expect, 10*time.Second)\n\tc.SetRefresher(refresher)\n\tc.StartRefresher(key, 1*time.Second)\n\n\tfor i := 0; i < 5; i++ {\n\t\tactual, ok := c.Get(key)\n\t\tassert.True(ok)\n\t\tassert.Equal(expect, actual, fmt.Sprintf(\"Test No. %d\", i+1))\n\t\ttime.Sleep(1050 * time.Millisecond)\n\t\texpect++\n\t}\n\n\tc.StopRefresher(key)\n\tfor i := 0; i < 2; i++ {\n\t\ttime.Sleep(1050 * time.Millisecond)\n\t\tactual, ok := c.Get(key)\n\t\tassert.True(ok)\n\t\tassert.Equal(expect, actual) \/\/ expect not changed\n\t}\n}\n\nfunc TestSetRefresher_OnExpired(t *testing.T) {\n\tassert := assert.New(t)\n\tc := New(\"test-5\")\n\n\tkey := \"test-5\"\n\texpect := 0\n\trefresher := func(c Cache, key interface{}, currentValue interface{}) (interface{}, error) {\n\t\tnum, ok := currentValue.(int)\n\t\tif ok {\n\t\t\treturn num + 1, nil\n\t\t}\n\t\treturn 0, errors.New(\"refresh failed\")\n\t}\n\n\tc.Set(key, expect, 2*time.Second)\n\tc.SetRefresher(refresher)\n\tc.StartRefresher(key, 1*time.Second)\n\n\tactual, ok := c.Get(key)\n\tassert.True(ok)\n\tassert.Equal(expect, actual)\n\ttime.Sleep(1500 * time.Millisecond)\n\n\t\/\/ refresh\n\tactual, ok = c.Get(key)\n\tassert.True(ok)\n\tassert.Equal(expect+1, actual)\n\ttime.Sleep(1500 * time.Millisecond)\n\n\t\/\/ expire\n\tactual, ok = c.Get(key)\n\tassert.False(ok)\n\tassert.Equal(nil, actual)\n}\n\nfunc TestSetRefresher_OnRefreshError(t *testing.T) {\n\tassert := assert.New(t)\n\tc := New(\"test-3\")\n\n\tkey := \"test-3\"\n\texpect := 0\n\trefresher := func(c Cache, key interface{}, currentValue interface{}) (interface{}, error) {\n\t\treturn 0, errors.New(\"refresh failed\")\n\t}\n\n\tc.Set(key, expect, 10*time.Second)\n\tc.SetRefresher(refresher)\n\tc.StartRefresher(key, 1*time.Second)\n\n\tfor i := 0; i < 5; i++ {\n\t\tactual, ok := c.Get(key)\n\t\tassert.True(ok)\n\t\tassert.Equal(expect, actual, fmt.Sprintf(\"Test No. %d\", i+1))\n\t\ttime.Sleep(1050 * time.Millisecond)\n\t}\n}\n<|endoftext|>"} {"text":"package chip8\n\nimport \"testing\"\n\nfunc checkHex(t *testing.T, subject string, got, want uint16) {\n\tif got != want {\n\t\tt.Errorf(\"%s => 0x%04X; want 0x%04X\", subject, got, want)\n\t}\n}\n\nfunc TestCPU_Step(t *testing.T) {\n\tc := NewCPU(nil)\n\tc.Memory[200] = 0xA1\n\tc.Memory[201] = 0x00\n\n\tif err := c.Step(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcheckHex(t, \"PC\", c.PC, uint16(202))\n}\n\nfunc TestCPU_Dispatch(t *testing.T) {\n\ttests := []struct {\n\t\top uint16\n\t\tbefore func(*CPU)\n\t\tcheck func(*CPU)\n\t}{\n\t\t{\n\t\t\tuint16(0x2100),\n\t\t\tnil,\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"Stack[0]\", c.Stack[0], uint16(0xC8))\n\t\t\t\tcheckHex(t, \"SP\", uint16(c.SP), uint16(0x1))\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(0x100))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x3123),\n\t\t\tnil,\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(200))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x3103),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x03\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(202))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x4123),\n\t\t\tnil,\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(202))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x4103),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x03\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(200))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x5120),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x03\n\t\t\t\tc.V[2] = 0x04\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(200))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x5120),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x03\n\t\t\t\tc.V[2] = 0x03\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(202))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x6102),\n\t\t\tnil,\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x02))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x7102),\n\t\t\tnil,\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x02))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x7102),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x01\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x03))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8120),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[2] = 0x01\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x01))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8121),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x10\n\t\t\t\tc.V[2] = 0x01\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x11))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8122),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x10\n\t\t\t\tc.V[2] = 0x01\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x00))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8123),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x01\n\t\t\t\tc.V[2] = 0x01\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x00))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8124),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x01\n\t\t\t\tc.V[2] = 0x01\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x2))\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x0))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8124),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0xFF\n\t\t\t\tc.V[2] = 0x03\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x2))\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x1))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8125),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0xFF\n\t\t\t\tc.V[2] = 0x03\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x1))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0xFC))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8125),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x02\n\t\t\t\tc.V[2] = 0x03\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x0))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0xFF))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8126),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x03\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x1))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x1))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8126),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x02\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x0))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x1))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8127),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x03\n\t\t\t\tc.V[2] = 0xFF\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x1))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0xFC))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8127),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x03\n\t\t\t\tc.V[2] = 0x02\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x0))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0xFF))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x812E),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x01\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x0))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x2))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x812E),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x81\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x1))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x2))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x9120),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x01\n\t\t\t\tc.V[2] = 0x02\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(202))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x9120),\n\t\t\tfunc(c *CPU) {\n\t\t\t\tc.V[1] = 0x01\n\t\t\t\tc.V[2] = 0x01\n\t\t\t},\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(200))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0xA100),\n\t\t\tnil,\n\t\t\tfunc(c *CPU) {\n\t\t\t\tcheckHex(t, \"I\", c.I, uint16(0x100))\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tc := NewCPU(nil)\n\t\tif tt.before != nil {\n\t\t\ttt.before(c)\n\t\t}\n\t\tc.Dispatch(tt.op)\n\t\ttt.check(c)\n\n\t\tif t.Failed() {\n\t\t\tt.Logf(\"==============\")\n\t\t\tt.Logf(\"Opcode: 0x%04X\", tt.op)\n\t\t\tt.Logf(\"CPU: %v\", c)\n\t\t\tt.Logf(\"==============\")\n\t\t\tt.FailNow()\n\t\t}\n\t}\n}\n\nfunc TestCPU_op(t *testing.T) {\n\tc := NewCPU(nil)\n\tc.Memory[200] = 0xA2\n\tc.Memory[201] = 0xF0\n\n\tcheckHex(t, \"op\", c.op(), uint16(0xA2F0))\n}\nCleanup tests.package chip8\n\nimport \"testing\"\n\nvar opcodeTests = map[string][]struct {\n\top uint16\n\tbefore func(*testing.T, *CPU)\n\tcheck func(*testing.T, *CPU)\n}{\n\t\"2nnn - CALL addr\": {\n\t\t{\n\t\t\tuint16(0x2100),\n\t\t\tnil,\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"Stack[0]\", c.Stack[0], uint16(0xC8))\n\t\t\t\tcheckHex(t, \"SP\", uint16(c.SP), uint16(0x1))\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(0x100))\n\t\t\t},\n\t\t},\n\t},\n\n\t\"3xkk - SE Vx, byte\": {\n\t\t{\n\t\t\tuint16(0x3123),\n\t\t\tnil,\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(200))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x3103),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x03\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(202))\n\t\t\t},\n\t\t},\n\t},\n\n\t\"4xkk - SNE Vx, byte\": {\n\t\t{\n\t\t\tuint16(0x4123),\n\t\t\tnil,\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(202))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x4103),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x03\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(200))\n\t\t\t},\n\t\t},\n\t},\n\n\t\"5xy0 - SE Vx, Vy\": {\n\t\t{\n\t\t\tuint16(0x5120),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x03\n\t\t\t\tc.V[2] = 0x04\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(200))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x5120),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x03\n\t\t\t\tc.V[2] = 0x03\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(202))\n\t\t\t},\n\t\t},\n\t},\n\n\t\"6xkk - LD Vx, byte\": {\n\t\t{\n\t\t\tuint16(0x6102),\n\t\t\tnil,\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x02))\n\t\t\t},\n\t\t},\n\t},\n\n\t\"7xkk - ADD Vx, byte\": {\n\t\t{\n\t\t\tuint16(0x7102),\n\t\t\tnil,\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x02))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x7102),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x01\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x03))\n\t\t\t},\n\t\t},\n\t},\n\n\t\"8xy0 - LD Vx, Vy\": {\n\t\t{\n\t\t\tuint16(0x8120),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[2] = 0x01\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x01))\n\t\t\t},\n\t\t},\n\t},\n\n\t\"8xy1 - OR Vx, Vy\": {\n\t\t{\n\t\t\tuint16(0x8121),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x10\n\t\t\t\tc.V[2] = 0x01\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x11))\n\t\t\t},\n\t\t},\n\t},\n\n\t\"8xy2 - AND Vx, Vy\": {\n\t\t{\n\t\t\tuint16(0x8122),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x10\n\t\t\t\tc.V[2] = 0x01\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x00))\n\t\t\t},\n\t\t},\n\t},\n\n\t\/\/ TODO\n\t\"Set Vx = Vx AND Vy\": {\n\t\t{\n\t\t\tuint16(0x8123),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x01\n\t\t\t\tc.V[2] = 0x01\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x00))\n\t\t\t},\n\t\t},\n\t},\n\n\t\"8xy4 - ADD Vx, Vy\": {\n\t\t{\n\t\t\tuint16(0x8124),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x01\n\t\t\t\tc.V[2] = 0x01\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x2))\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x0))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8124),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0xFF\n\t\t\t\tc.V[2] = 0x03\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x2))\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x1))\n\t\t\t},\n\t\t},\n\t},\n\n\t\"8xy5 - SUB Vx, Vy\": {\n\t\t{\n\t\t\tuint16(0x8125),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0xFF\n\t\t\t\tc.V[2] = 0x03\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x1))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0xFC))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8125),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x02\n\t\t\t\tc.V[2] = 0x03\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x0))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0xFF))\n\t\t\t},\n\t\t},\n\t},\n\n\t\"8xy6 - SHR Vx {, Vy}\": {\n\t\t{\n\t\t\tuint16(0x8126),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x03\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x1))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x1))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8126),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x02\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x0))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x1))\n\t\t\t},\n\t\t},\n\t},\n\n\t\"8xy7 - SUBN Vx, Vy\": {\n\t\t{\n\t\t\tuint16(0x8127),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x03\n\t\t\t\tc.V[2] = 0xFF\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x1))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0xFC))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x8127),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x03\n\t\t\t\tc.V[2] = 0x02\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x0))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0xFF))\n\t\t\t},\n\t\t},\n\t},\n\n\t\"8xyE - SHL Vx {, Vy}\": {\n\t\t{\n\t\t\tuint16(0x812E),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x01\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x0))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x2))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x812E),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x81\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"VF\", uint16(c.V[0xF]), uint16(0x1))\n\t\t\t\tcheckHex(t, \"V[1]\", uint16(c.V[1]), uint16(0x2))\n\t\t\t},\n\t\t},\n\t},\n\n\t\"9xy0 - SNE Vx, Vy\": {\n\t\t{\n\t\t\tuint16(0x9120),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x01\n\t\t\t\tc.V[2] = 0x02\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(202))\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tuint16(0x9120),\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tc.V[1] = 0x01\n\t\t\t\tc.V[2] = 0x01\n\t\t\t},\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"PC\", c.PC, uint16(200))\n\t\t\t},\n\t\t},\n\t},\n\n\t\"Annn - LD I, addr\": {\n\t\t{\n\t\t\tuint16(0xA100),\n\t\t\tnil,\n\t\t\tfunc(t *testing.T, c *CPU) {\n\t\t\t\tcheckHex(t, \"I\", c.I, uint16(0x100))\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc checkHex(t *testing.T, subject string, got, want uint16) {\n\tif got != want {\n\t\tt.Errorf(\"%s => 0x%04X; want 0x%04X\", subject, got, want)\n\t}\n}\n\nfunc TestCPU_Step(t *testing.T) {\n\tc := NewCPU(nil)\n\tc.Memory[200] = 0xA1\n\tc.Memory[201] = 0x00\n\n\tif err := c.Step(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcheckHex(t, \"PC\", c.PC, uint16(202))\n}\n\nfunc TestOpcodes(t *testing.T) {\n\tfor i, tests := range opcodeTests {\n\t\tfor _, tt := range tests {\n\t\t\tc := NewCPU(nil)\n\t\t\tif tt.before != nil {\n\t\t\t\ttt.before(t, c)\n\t\t\t}\n\t\t\tc.Dispatch(tt.op)\n\t\t\ttt.check(t, c)\n\n\t\t\tif t.Failed() {\n\t\t\t\tt.Logf(\"==============\")\n\t\t\t\tt.Logf(\"Instruction: %s\", i)\n\t\t\t\tt.Logf(\"Opcode: 0x%04X\", tt.op)\n\t\t\t\tt.Logf(\"CPU: %v\", c)\n\t\t\t\tt.Logf(\"==============\")\n\t\t\t\tt.FailNow()\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestCPU_op(t *testing.T) {\n\tc := NewCPU(nil)\n\tc.Memory[200] = 0xA2\n\tc.Memory[201] = 0xF0\n\n\tcheckHex(t, \"op\", c.op(), uint16(0xA2F0))\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2017, OpenCensus Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\n\/*\nPackage stats contains support for OpenCensus stats collection.\n\nOpenCensus allows users to create typed measures, record measurements,\naggregate the collected data, and export the aggregated data.\n\nMeasures\n\nA measure represents a type of metric to be tracked and recorded.\nFor example, latency, request Mb\/s, and response Mb\/s are measures\nto collect from a server.\n\nEach measure needs to be registered before being used. Measure\nconstructors such as NewMeasureInt64 and NewMeasureFloat64 automatically\nregisters the measure by the given name. Each registered measure needs\nto be unique by name. Measures also have a description and a unit.\n\nLibraries can define and export measures for their end users to\ncreate views and collect instrumentation data.\n\nRecording measurements\n\nMeasurement is a data point to be collected for a measure. For example,\nfor a latency (ms) measure, 100 is a measurement that represents a 100ms\nlatency event. Users collect data points on the existing measures with\nthe current context. Tags from the current context is recorded with the\nmeasurements if they are any.\n\nRecorded measurements are dropped immediately if user is not aggregating\nthem via views. Users don't necessarily need to conditionally enable\/disable\nrecording to reduce cost. Recording of measurements is cheap.\n\nLibraries can always record measurements, and end-user can later decide\non which measurements they want to collect by registering views. This allows\nlibraries to turn on the instrumentation by default.\n\nViews\n\nIn order to collect measurements, views need to be defined and registered.\nA view allows recorded measurements to be filtered and aggregated over a time window.\n\nAll recorded measurements can be filtered by a list of tags.\n\nOpenCensus provides several aggregation methods: count, distribution, sum and mean.\nCount aggregation only counts the number of measurement points. Distribution\naggregation provides statistical summary of the aggregated data. Sum distribution\nsums up the measurement points. Mean provides the mean of the recorded measurements.\nAggregations can either happen cumulatively or over an interval.\n\nUsers can dynamically create and delete views.\n\nLibraries can export their own views and claim the view names\nby registering them themselves.\n\nExporting\n\nCollected and aggregated data can be exported to a metric collection\nbackend by registering its exporter.\n\nMultiple exporters can be registered to upload the data to various\ndifferent backends. Users need to unregister the exporters once they\nno longer are needed.\n*\/\npackage stats \/\/ import \"go.opencensus.io\/stats\"\n\n\/\/ TODO(acetechnologist): Add a link to the language independent OpenCensus\n\/\/ spec when it is available.\nFix minor stats\/doc.go typos (#266)\/\/ Copyright 2017, OpenCensus Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\n\/*\nPackage stats contains support for OpenCensus stats collection.\n\nOpenCensus allows users to create typed measures, record measurements,\naggregate the collected data, and export the aggregated data.\n\nMeasures\n\nA measure represents a type of metric to be tracked and recorded.\nFor example, latency, request Mb\/s, and response Mb\/s are measures\nto collect from a server.\n\nEach measure needs to be registered before being used. Measure\nconstructors such as NewMeasureInt64 and NewMeasureFloat64 automatically\nregister the measure by the given name. Each registered measure needs\nto be unique by name. Measures also have a description and a unit.\n\nLibraries can define and export measures for their end users to\ncreate views and collect instrumentation data.\n\nRecording measurements\n\nMeasurement is a data point to be collected for a measure. For example,\nfor a latency (ms) measure, 100 is a measurement that represents a 100ms\nlatency event. Users collect data points on the existing measures with\nthe current context. Tags from the current context are recorded with the\nmeasurements if they are any.\n\nRecorded measurements are dropped immediately if user is not aggregating\nthem via views. Users don't necessarily need to conditionally enable\/disable\nrecording to reduce cost. Recording of measurements is cheap.\n\nLibraries can always record measurements, and end-users can later decide\non which measurements they want to collect by registering views. This allows\nlibraries to turn on the instrumentation by default.\n\nViews\n\nIn order to collect measurements, views need to be defined and registered.\nA view allows recorded measurements to be filtered and aggregated over a time window.\n\nAll recorded measurements can be filtered by a list of tags.\n\nOpenCensus provides several aggregation methods: count, distribution, sum and mean.\nCount aggregation only counts the number of measurement points. Distribution\naggregation provides statistical summary of the aggregated data. Sum distribution\nsums up the measurement points. Mean provides the mean of the recorded measurements.\nAggregations can either happen cumulatively or over an interval.\n\nUsers can dynamically create and delete views.\n\nLibraries can export their own views and claim the view names\nby registering them themselves.\n\nExporting\n\nCollected and aggregated data can be exported to a metric collection\nbackend by registering its exporter.\n\nMultiple exporters can be registered to upload the data to various\ndifferent backends. Users need to unregister the exporters once they\nno longer are needed.\n*\/\npackage stats \/\/ import \"go.opencensus.io\/stats\"\n\n\/\/ TODO(acetechnologist): Add a link to the language independent OpenCensus\n\/\/ spec when it is available.\n<|endoftext|>"} {"text":"\/\/ Copyright 2016 Keybase Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD\n\/\/ license that can be found in the LICENSE file.\n\npackage libkbfs\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/keybase\/client\/go\/protocol\/keybase1\"\n\t\"github.com\/keybase\/kbfs\/kbfscrypto\"\n\n\t\"github.com\/keybase\/kbfs\/tlf\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ journalMDOps is an implementation of MDOps that delegates to a\n\/\/ TLF's mdJournal, if one exists. Specifically, it intercepts put\n\/\/ calls to write to the journal instead of the MDServer, where\n\/\/ something else is presumably flushing the journal to the MDServer.\n\/\/\n\/\/ It then intercepts get calls to provide a combined view of the MDs\n\/\/ from the journal and the server when the journal is\n\/\/ non-empty. Specifically, if rev is the earliest revision in the\n\/\/ journal, and BID is the branch ID of the journal (which can only\n\/\/ have one), then any requests for revisions >= rev on BID will be\n\/\/ served from the journal instead of the server. If BID is empty,\n\/\/ i.e. the journal is holding merged revisions, then this means that\n\/\/ all merged revisions on the server from rev are hidden.\n\/\/\n\/\/ TODO: This makes server updates meaningless for revisions >=\n\/\/ rev. Fix this.\ntype journalMDOps struct {\n\tMDOps\n\tjServer *JournalServer\n}\n\nvar _ MDOps = journalMDOps{}\n\n\/\/ convertImmutableBareRMDToIRMD decrypts the bare MD into a\n\/\/ full-fledged RMD.\nfunc (j journalMDOps) convertImmutableBareRMDToIRMD(ctx context.Context,\n\tibrmd ImmutableBareRootMetadata, handle *TlfHandle,\n\tuid keybase1.UID, key kbfscrypto.VerifyingKey) (\n\tImmutableRootMetadata, error) {\n\t\/\/ TODO: Avoid having to do this type assertion.\n\tbrmd, ok := ibrmd.BareRootMetadata.(MutableBareRootMetadata)\n\tif !ok {\n\t\treturn ImmutableRootMetadata{}, MutableBareRootMetadataNoImplError{}\n\t}\n\n\trmd := makeRootMetadata(brmd, ibrmd.extra, handle)\n\n\tconfig := j.jServer.config\n\tpmd, err := decryptMDPrivateData(ctx, config.Codec(), config.Crypto(),\n\t\tconfig.BlockCache(), config.BlockOps(), config.KeyManager(),\n\t\tuid, rmd.GetSerializedPrivateMetadata(), rmd, rmd)\n\tif err != nil {\n\t\treturn ImmutableRootMetadata{}, err\n\t}\n\n\trmd.data = pmd\n\tirmd := MakeImmutableRootMetadata(\n\t\trmd, key, ibrmd.mdID, ibrmd.localTimestamp)\n\treturn irmd, nil\n}\n\n\/\/ getHeadFromJournal returns the head RootMetadata for the TLF with\n\/\/ the given ID stored in the journal, assuming it exists and matches\n\/\/ the given branch ID and merge status. As a special case, if bid is\n\/\/ NullBranchID and mStatus is Unmerged, the branch ID check is\n\/\/ skipped.\nfunc (j journalMDOps) getHeadFromJournal(\n\tctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus,\n\thandle *TlfHandle) (\n\tImmutableRootMetadata, error) {\n\ttlfJournal, ok := j.jServer.getTLFJournal(id)\n\tif !ok {\n\t\treturn ImmutableRootMetadata{}, nil\n\t}\n\n\thead, err := tlfJournal.getMDHead(ctx)\n\tif err == errTLFJournalDisabled {\n\t\treturn ImmutableRootMetadata{}, nil\n\t} else if err != nil {\n\t\treturn ImmutableRootMetadata{}, err\n\t}\n\n\tif head == (ImmutableBareRootMetadata{}) {\n\t\treturn ImmutableRootMetadata{}, nil\n\t}\n\n\tif head.MergedStatus() != mStatus {\n\t\treturn ImmutableRootMetadata{}, nil\n\t}\n\n\tif mStatus == Unmerged && bid != NullBranchID && bid != head.BID() {\n\t\t\/\/ The given branch ID doesn't match the one in the\n\t\t\/\/ journal, which can only be an error.\n\t\treturn ImmutableRootMetadata{},\n\t\t\tfmt.Errorf(\"Expected branch ID %s, got %s\",\n\t\t\t\tbid, head.BID())\n\t}\n\n\theadBareHandle, err := head.MakeBareTlfHandleWithExtra()\n\tif err != nil {\n\t\treturn ImmutableRootMetadata{}, err\n\t}\n\n\tif handle == nil {\n\t\thandle, err = MakeTlfHandle(\n\t\t\tctx, headBareHandle, j.jServer.config.KBPKI())\n\t\tif err != nil {\n\t\t\treturn ImmutableRootMetadata{}, err\n\t\t}\n\t} else {\n\t\t\/\/ Check for mutual handle resolution.\n\t\theadHandle, err := MakeTlfHandle(ctx, headBareHandle,\n\t\t\tj.jServer.config.KBPKI())\n\t\tif err != nil {\n\t\t\treturn ImmutableRootMetadata{}, err\n\t\t}\n\n\t\tif err := headHandle.MutuallyResolvesTo(ctx, j.jServer.config.Codec(),\n\t\t\tj.jServer.config.KBPKI(), *handle, head.RevisionNumber(),\n\t\t\thead.TlfID(), j.jServer.log); err != nil {\n\t\t\treturn ImmutableRootMetadata{}, err\n\t\t}\n\t}\n\n\tirmd, err := j.convertImmutableBareRMDToIRMD(\n\t\tctx, head, handle, tlfJournal.uid, tlfJournal.key)\n\tif err != nil {\n\t\treturn ImmutableRootMetadata{}, err\n\t}\n\n\treturn irmd, nil\n}\n\nfunc (j journalMDOps) getRangeFromJournal(\n\tctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus,\n\tstart, stop MetadataRevision) (\n\t[]ImmutableRootMetadata, error) {\n\ttlfJournal, ok := j.jServer.getTLFJournal(id)\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\n\tibrmds, err := tlfJournal.getMDRange(ctx, start, stop)\n\tif err == errTLFJournalDisabled {\n\t\treturn nil, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(ibrmds) == 0 {\n\t\treturn nil, nil\n\t}\n\n\thead := ibrmds[len(ibrmds)-1]\n\n\tif head.MergedStatus() != mStatus {\n\t\treturn nil, nil\n\t}\n\n\tif mStatus == Unmerged && bid != NullBranchID && bid != head.BID() {\n\t\t\/\/ The given branch ID doesn't match the one in the\n\t\t\/\/ journal, which can only be an error.\n\t\treturn nil, fmt.Errorf(\"Expected branch ID %s, got %s\",\n\t\t\tbid, head.BID())\n\t}\n\n\tbareHandle, err := head.MakeBareTlfHandleWithExtra()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thandle, err := MakeTlfHandle(ctx, bareHandle, j.jServer.config.KBPKI())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tirmds := make([]ImmutableRootMetadata, 0, len(ibrmds))\n\n\tfor _, ibrmd := range ibrmds {\n\t\tirmd, err := j.convertImmutableBareRMDToIRMD(\n\t\t\tctx, ibrmd, handle, tlfJournal.uid, tlfJournal.key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tirmds = append(irmds, irmd)\n\t}\n\n\treturn irmds, nil\n}\n\nfunc (j journalMDOps) GetForHandle(\n\tctx context.Context, handle *TlfHandle, mStatus MergeStatus) (\n\ttlf.ID, ImmutableRootMetadata, error) {\n\t\/\/ Need to always consult the server to get the tlfID. No need to\n\t\/\/ optimize this, since all subsequent lookups will be by\n\t\/\/ TLF. Although if we did want to, we could store a handle -> TLF\n\t\/\/ ID mapping with the journals. If we are looking for an\n\t\/\/ unmerged head, that exists only in the journal, so check the\n\t\/\/ remote server only to get the TLF ID.\n\tremoteMStatus := mStatus\n\tif mStatus == Unmerged {\n\t\tremoteMStatus = Merged\n\t}\n\ttlfID, rmd, err := j.MDOps.GetForHandle(ctx, handle, remoteMStatus)\n\tif err != nil {\n\t\treturn tlf.ID{}, ImmutableRootMetadata{}, err\n\t}\n\n\tif rmd != (ImmutableRootMetadata{}) && (rmd.TlfID() != tlfID) {\n\t\treturn tlf.ID{}, ImmutableRootMetadata{},\n\t\t\tfmt.Errorf(\"Expected RMD to have TLF ID %s, but got %s\",\n\t\t\t\ttlfID, rmd.TlfID())\n\t}\n\n\t\/\/ If the journal has a head, use that.\n\tirmd, err := j.getHeadFromJournal(\n\t\tctx, tlfID, NullBranchID, mStatus, handle)\n\tif err != nil {\n\t\treturn tlf.ID{}, ImmutableRootMetadata{}, err\n\t}\n\tif irmd != (ImmutableRootMetadata{}) {\n\t\treturn tlf.ID{}, irmd, nil\n\t}\n\tif remoteMStatus != mStatus {\n\t\treturn tlfID, ImmutableRootMetadata{}, nil\n\t}\n\n\t\/\/ Otherwise, use the server's head.\n\treturn tlfID, rmd, nil\n}\n\n\/\/ TODO: Combine the two GetForTLF functions in MDOps to avoid the\n\/\/ need for this helper function.\nfunc (j journalMDOps) getForTLF(\n\tctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus,\n\tdelegateFn func(context.Context, tlf.ID) (ImmutableRootMetadata, error)) (\n\tImmutableRootMetadata, error) {\n\t\/\/ If the journal has a head, use that.\n\tirmd, err := j.getHeadFromJournal(ctx, id, bid, mStatus, nil)\n\tif err != nil {\n\t\treturn ImmutableRootMetadata{}, err\n\t}\n\tif irmd != (ImmutableRootMetadata{}) {\n\t\treturn irmd, nil\n\t}\n\n\t\/\/ Otherwise, consult the server instead.\n\treturn delegateFn(ctx, id)\n}\n\nfunc (j journalMDOps) GetForTLF(\n\tctx context.Context, id tlf.ID) (ImmutableRootMetadata, error) {\n\treturn j.getForTLF(ctx, id, NullBranchID, Merged, j.MDOps.GetForTLF)\n}\n\nfunc (j journalMDOps) GetUnmergedForTLF(\n\tctx context.Context, id tlf.ID, bid BranchID) (\n\tImmutableRootMetadata, error) {\n\tdelegateFn := func(ctx context.Context, id tlf.ID) (\n\t\tImmutableRootMetadata, error) {\n\t\treturn j.MDOps.GetUnmergedForTLF(ctx, id, bid)\n\t}\n\treturn j.getForTLF(ctx, id, bid, Unmerged, delegateFn)\n}\n\n\/\/ TODO: Combine the two GetRange functions in MDOps to avoid the need\n\/\/ for this helper function.\nfunc (j journalMDOps) getRange(\n\tctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus,\n\tstart, stop MetadataRevision,\n\tdelegateFn func(ctx context.Context, id tlf.ID,\n\t\tstart, stop MetadataRevision) (\n\t\t[]ImmutableRootMetadata, error)) (\n\t[]ImmutableRootMetadata, error) {\n\t\/\/ Grab the range from the journal first.\n\tjirmds, err := j.getRangeFromJournal(ctx, id, bid, mStatus, start, stop)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If it's empty or disabled, just fall back to the server.\n\tif len(jirmds) == 0 || err == errTLFJournalDisabled {\n\t\treturn delegateFn(ctx, id, start, stop)\n\t}\n\n\t\/\/ If the first revision from the journal is the first\n\t\/\/ revision we asked for, then just return the range from the\n\t\/\/ journal.\n\tif jirmds[0].Revision() == start {\n\t\treturn jirmds, nil\n\t}\n\n\t\/\/ Otherwise, fetch the rest from the server and prepend them.\n\tserverStop := jirmds[0].Revision() - 1\n\tirmds, err := delegateFn(ctx, id, start, serverStop)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(irmds) == 0 {\n\t\treturn jirmds, nil\n\t}\n\n\tlastRev := irmds[len(irmds)-1].Revision()\n\tif lastRev != serverStop {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"Expected last server rev %d, got %d\",\n\t\t\tserverStop, lastRev)\n\t}\n\n\treturn append(irmds, jirmds...), nil\n}\n\nfunc (j journalMDOps) GetRange(\n\tctx context.Context, id tlf.ID, start, stop MetadataRevision) (\n\t[]ImmutableRootMetadata, error) {\n\treturn j.getRange(ctx, id, NullBranchID, Merged, start, stop,\n\t\tj.MDOps.GetRange)\n}\n\nfunc (j journalMDOps) GetUnmergedRange(\n\tctx context.Context, id tlf.ID, bid BranchID,\n\tstart, stop MetadataRevision) ([]ImmutableRootMetadata, error) {\n\tdelegateFn := func(ctx context.Context, id tlf.ID,\n\t\tstart, stop MetadataRevision) (\n\t\t[]ImmutableRootMetadata, error) {\n\t\treturn j.MDOps.GetUnmergedRange(ctx, id, bid, start, stop)\n\t}\n\treturn j.getRange(ctx, id, bid, Unmerged, start, stop,\n\t\tdelegateFn)\n}\n\nfunc (j journalMDOps) Put(ctx context.Context, rmd *RootMetadata) (\n\tMdID, error) {\n\tif tlfJournal, ok := j.jServer.getTLFJournal(rmd.TlfID()); ok {\n\t\t\/\/ Just route to the journal.\n\t\tmdID, err := tlfJournal.putMD(ctx, rmd)\n\t\tif err != errTLFJournalDisabled {\n\t\t\treturn mdID, err\n\t\t}\n\t}\n\n\treturn j.MDOps.Put(ctx, rmd)\n}\n\nfunc (j journalMDOps) PutUnmerged(ctx context.Context, rmd *RootMetadata) (\n\tMdID, error) {\n\tif tlfJournal, ok := j.jServer.getTLFJournal(rmd.TlfID()); ok {\n\t\trmd.SetUnmerged()\n\t\tmdID, err := tlfJournal.putMD(ctx, rmd)\n\t\tif err != errTLFJournalDisabled {\n\t\t\treturn mdID, err\n\t\t}\n\t}\n\n\treturn j.MDOps.PutUnmerged(ctx, rmd)\n}\n\nfunc (j journalMDOps) PruneBranch(\n\tctx context.Context, id tlf.ID, bid BranchID) error {\n\tif tlfJournal, ok := j.jServer.getTLFJournal(id); ok {\n\t\t\/\/ Prune the journal, too.\n\t\terr := tlfJournal.clearMDs(ctx, bid)\n\t\tif err != nil && err != errTLFJournalDisabled {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn j.MDOps.PruneBranch(ctx, id, bid)\n}\n\nfunc (j journalMDOps) ResolveBranch(\n\tctx context.Context, id tlf.ID, bid BranchID,\n\tblocksToDelete []BlockID, rmd *RootMetadata) (MdID, error) {\n\tif tlfJournal, ok := j.jServer.getTLFJournal(id); ok {\n\t\tmdID, err := tlfJournal.resolveBranch(\n\t\t\tctx, bid, blocksToDelete, rmd, rmd.extra)\n\t\tif err != errTLFJournalDisabled {\n\t\t\treturn mdID, err\n\t\t}\n\t}\n\n\treturn j.MDOps.ResolveBranch(ctx, id, bid, blocksToDelete, rmd)\n}\njournal_md_ops: don't return an err if journal is disabled\/\/ Copyright 2016 Keybase Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD\n\/\/ license that can be found in the LICENSE file.\n\npackage libkbfs\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/keybase\/client\/go\/protocol\/keybase1\"\n\t\"github.com\/keybase\/kbfs\/kbfscrypto\"\n\n\t\"github.com\/keybase\/kbfs\/tlf\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ journalMDOps is an implementation of MDOps that delegates to a\n\/\/ TLF's mdJournal, if one exists. Specifically, it intercepts put\n\/\/ calls to write to the journal instead of the MDServer, where\n\/\/ something else is presumably flushing the journal to the MDServer.\n\/\/\n\/\/ It then intercepts get calls to provide a combined view of the MDs\n\/\/ from the journal and the server when the journal is\n\/\/ non-empty. Specifically, if rev is the earliest revision in the\n\/\/ journal, and BID is the branch ID of the journal (which can only\n\/\/ have one), then any requests for revisions >= rev on BID will be\n\/\/ served from the journal instead of the server. If BID is empty,\n\/\/ i.e. the journal is holding merged revisions, then this means that\n\/\/ all merged revisions on the server from rev are hidden.\n\/\/\n\/\/ TODO: This makes server updates meaningless for revisions >=\n\/\/ rev. Fix this.\ntype journalMDOps struct {\n\tMDOps\n\tjServer *JournalServer\n}\n\nvar _ MDOps = journalMDOps{}\n\n\/\/ convertImmutableBareRMDToIRMD decrypts the bare MD into a\n\/\/ full-fledged RMD.\nfunc (j journalMDOps) convertImmutableBareRMDToIRMD(ctx context.Context,\n\tibrmd ImmutableBareRootMetadata, handle *TlfHandle,\n\tuid keybase1.UID, key kbfscrypto.VerifyingKey) (\n\tImmutableRootMetadata, error) {\n\t\/\/ TODO: Avoid having to do this type assertion.\n\tbrmd, ok := ibrmd.BareRootMetadata.(MutableBareRootMetadata)\n\tif !ok {\n\t\treturn ImmutableRootMetadata{}, MutableBareRootMetadataNoImplError{}\n\t}\n\n\trmd := makeRootMetadata(brmd, ibrmd.extra, handle)\n\n\tconfig := j.jServer.config\n\tpmd, err := decryptMDPrivateData(ctx, config.Codec(), config.Crypto(),\n\t\tconfig.BlockCache(), config.BlockOps(), config.KeyManager(),\n\t\tuid, rmd.GetSerializedPrivateMetadata(), rmd, rmd)\n\tif err != nil {\n\t\treturn ImmutableRootMetadata{}, err\n\t}\n\n\trmd.data = pmd\n\tirmd := MakeImmutableRootMetadata(\n\t\trmd, key, ibrmd.mdID, ibrmd.localTimestamp)\n\treturn irmd, nil\n}\n\n\/\/ getHeadFromJournal returns the head RootMetadata for the TLF with\n\/\/ the given ID stored in the journal, assuming it exists and matches\n\/\/ the given branch ID and merge status. As a special case, if bid is\n\/\/ NullBranchID and mStatus is Unmerged, the branch ID check is\n\/\/ skipped.\nfunc (j journalMDOps) getHeadFromJournal(\n\tctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus,\n\thandle *TlfHandle) (\n\tImmutableRootMetadata, error) {\n\ttlfJournal, ok := j.jServer.getTLFJournal(id)\n\tif !ok {\n\t\treturn ImmutableRootMetadata{}, nil\n\t}\n\n\thead, err := tlfJournal.getMDHead(ctx)\n\tif err == errTLFJournalDisabled {\n\t\treturn ImmutableRootMetadata{}, nil\n\t} else if err != nil {\n\t\treturn ImmutableRootMetadata{}, err\n\t}\n\n\tif head == (ImmutableBareRootMetadata{}) {\n\t\treturn ImmutableRootMetadata{}, nil\n\t}\n\n\tif head.MergedStatus() != mStatus {\n\t\treturn ImmutableRootMetadata{}, nil\n\t}\n\n\tif mStatus == Unmerged && bid != NullBranchID && bid != head.BID() {\n\t\t\/\/ The given branch ID doesn't match the one in the\n\t\t\/\/ journal, which can only be an error.\n\t\treturn ImmutableRootMetadata{},\n\t\t\tfmt.Errorf(\"Expected branch ID %s, got %s\",\n\t\t\t\tbid, head.BID())\n\t}\n\n\theadBareHandle, err := head.MakeBareTlfHandleWithExtra()\n\tif err != nil {\n\t\treturn ImmutableRootMetadata{}, err\n\t}\n\n\tif handle == nil {\n\t\thandle, err = MakeTlfHandle(\n\t\t\tctx, headBareHandle, j.jServer.config.KBPKI())\n\t\tif err != nil {\n\t\t\treturn ImmutableRootMetadata{}, err\n\t\t}\n\t} else {\n\t\t\/\/ Check for mutual handle resolution.\n\t\theadHandle, err := MakeTlfHandle(ctx, headBareHandle,\n\t\t\tj.jServer.config.KBPKI())\n\t\tif err != nil {\n\t\t\treturn ImmutableRootMetadata{}, err\n\t\t}\n\n\t\tif err := headHandle.MutuallyResolvesTo(ctx, j.jServer.config.Codec(),\n\t\t\tj.jServer.config.KBPKI(), *handle, head.RevisionNumber(),\n\t\t\thead.TlfID(), j.jServer.log); err != nil {\n\t\t\treturn ImmutableRootMetadata{}, err\n\t\t}\n\t}\n\n\tirmd, err := j.convertImmutableBareRMDToIRMD(\n\t\tctx, head, handle, tlfJournal.uid, tlfJournal.key)\n\tif err != nil {\n\t\treturn ImmutableRootMetadata{}, err\n\t}\n\n\treturn irmd, nil\n}\n\nfunc (j journalMDOps) getRangeFromJournal(\n\tctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus,\n\tstart, stop MetadataRevision) (\n\t[]ImmutableRootMetadata, error) {\n\ttlfJournal, ok := j.jServer.getTLFJournal(id)\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\n\tibrmds, err := tlfJournal.getMDRange(ctx, start, stop)\n\tif err == errTLFJournalDisabled {\n\t\treturn nil, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(ibrmds) == 0 {\n\t\treturn nil, nil\n\t}\n\n\thead := ibrmds[len(ibrmds)-1]\n\n\tif head.MergedStatus() != mStatus {\n\t\treturn nil, nil\n\t}\n\n\tif mStatus == Unmerged && bid != NullBranchID && bid != head.BID() {\n\t\t\/\/ The given branch ID doesn't match the one in the\n\t\t\/\/ journal, which can only be an error.\n\t\treturn nil, fmt.Errorf(\"Expected branch ID %s, got %s\",\n\t\t\tbid, head.BID())\n\t}\n\n\tbareHandle, err := head.MakeBareTlfHandleWithExtra()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thandle, err := MakeTlfHandle(ctx, bareHandle, j.jServer.config.KBPKI())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tirmds := make([]ImmutableRootMetadata, 0, len(ibrmds))\n\n\tfor _, ibrmd := range ibrmds {\n\t\tirmd, err := j.convertImmutableBareRMDToIRMD(\n\t\t\tctx, ibrmd, handle, tlfJournal.uid, tlfJournal.key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tirmds = append(irmds, irmd)\n\t}\n\n\treturn irmds, nil\n}\n\nfunc (j journalMDOps) GetForHandle(\n\tctx context.Context, handle *TlfHandle, mStatus MergeStatus) (\n\ttlf.ID, ImmutableRootMetadata, error) {\n\t\/\/ Need to always consult the server to get the tlfID. No need to\n\t\/\/ optimize this, since all subsequent lookups will be by\n\t\/\/ TLF. Although if we did want to, we could store a handle -> TLF\n\t\/\/ ID mapping with the journals. If we are looking for an\n\t\/\/ unmerged head, that exists only in the journal, so check the\n\t\/\/ remote server only to get the TLF ID.\n\tremoteMStatus := mStatus\n\tif mStatus == Unmerged {\n\t\tremoteMStatus = Merged\n\t}\n\ttlfID, rmd, err := j.MDOps.GetForHandle(ctx, handle, remoteMStatus)\n\tif err != nil {\n\t\treturn tlf.ID{}, ImmutableRootMetadata{}, err\n\t}\n\n\tif rmd != (ImmutableRootMetadata{}) && (rmd.TlfID() != tlfID) {\n\t\treturn tlf.ID{}, ImmutableRootMetadata{},\n\t\t\tfmt.Errorf(\"Expected RMD to have TLF ID %s, but got %s\",\n\t\t\t\ttlfID, rmd.TlfID())\n\t}\n\n\t\/\/ If the journal has a head, use that.\n\tirmd, err := j.getHeadFromJournal(\n\t\tctx, tlfID, NullBranchID, mStatus, handle)\n\tif err != nil {\n\t\treturn tlf.ID{}, ImmutableRootMetadata{}, err\n\t}\n\tif irmd != (ImmutableRootMetadata{}) {\n\t\treturn tlf.ID{}, irmd, nil\n\t}\n\tif remoteMStatus != mStatus {\n\t\treturn tlfID, ImmutableRootMetadata{}, nil\n\t}\n\n\t\/\/ Otherwise, use the server's head.\n\treturn tlfID, rmd, nil\n}\n\n\/\/ TODO: Combine the two GetForTLF functions in MDOps to avoid the\n\/\/ need for this helper function.\nfunc (j journalMDOps) getForTLF(\n\tctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus,\n\tdelegateFn func(context.Context, tlf.ID) (ImmutableRootMetadata, error)) (\n\tImmutableRootMetadata, error) {\n\t\/\/ If the journal has a head, use that.\n\tirmd, err := j.getHeadFromJournal(ctx, id, bid, mStatus, nil)\n\tif err != nil {\n\t\treturn ImmutableRootMetadata{}, err\n\t}\n\tif irmd != (ImmutableRootMetadata{}) {\n\t\treturn irmd, nil\n\t}\n\n\t\/\/ Otherwise, consult the server instead.\n\treturn delegateFn(ctx, id)\n}\n\nfunc (j journalMDOps) GetForTLF(\n\tctx context.Context, id tlf.ID) (ImmutableRootMetadata, error) {\n\treturn j.getForTLF(ctx, id, NullBranchID, Merged, j.MDOps.GetForTLF)\n}\n\nfunc (j journalMDOps) GetUnmergedForTLF(\n\tctx context.Context, id tlf.ID, bid BranchID) (\n\tImmutableRootMetadata, error) {\n\tdelegateFn := func(ctx context.Context, id tlf.ID) (\n\t\tImmutableRootMetadata, error) {\n\t\treturn j.MDOps.GetUnmergedForTLF(ctx, id, bid)\n\t}\n\treturn j.getForTLF(ctx, id, bid, Unmerged, delegateFn)\n}\n\n\/\/ TODO: Combine the two GetRange functions in MDOps to avoid the need\n\/\/ for this helper function.\nfunc (j journalMDOps) getRange(\n\tctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus,\n\tstart, stop MetadataRevision,\n\tdelegateFn func(ctx context.Context, id tlf.ID,\n\t\tstart, stop MetadataRevision) (\n\t\t[]ImmutableRootMetadata, error)) (\n\t[]ImmutableRootMetadata, error) {\n\t\/\/ Grab the range from the journal first.\n\tjirmds, err := j.getRangeFromJournal(ctx, id, bid, mStatus, start, stop)\n\tif err != nil && err != errTLFJournalDisabled {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If it's empty or disabled, just fall back to the server.\n\tif len(jirmds) == 0 || err == errTLFJournalDisabled {\n\t\treturn delegateFn(ctx, id, start, stop)\n\t}\n\n\t\/\/ If the first revision from the journal is the first\n\t\/\/ revision we asked for, then just return the range from the\n\t\/\/ journal.\n\tif jirmds[0].Revision() == start {\n\t\treturn jirmds, nil\n\t}\n\n\t\/\/ Otherwise, fetch the rest from the server and prepend them.\n\tserverStop := jirmds[0].Revision() - 1\n\tirmds, err := delegateFn(ctx, id, start, serverStop)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(irmds) == 0 {\n\t\treturn jirmds, nil\n\t}\n\n\tlastRev := irmds[len(irmds)-1].Revision()\n\tif lastRev != serverStop {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"Expected last server rev %d, got %d\",\n\t\t\tserverStop, lastRev)\n\t}\n\n\treturn append(irmds, jirmds...), nil\n}\n\nfunc (j journalMDOps) GetRange(\n\tctx context.Context, id tlf.ID, start, stop MetadataRevision) (\n\t[]ImmutableRootMetadata, error) {\n\treturn j.getRange(ctx, id, NullBranchID, Merged, start, stop,\n\t\tj.MDOps.GetRange)\n}\n\nfunc (j journalMDOps) GetUnmergedRange(\n\tctx context.Context, id tlf.ID, bid BranchID,\n\tstart, stop MetadataRevision) ([]ImmutableRootMetadata, error) {\n\tdelegateFn := func(ctx context.Context, id tlf.ID,\n\t\tstart, stop MetadataRevision) (\n\t\t[]ImmutableRootMetadata, error) {\n\t\treturn j.MDOps.GetUnmergedRange(ctx, id, bid, start, stop)\n\t}\n\treturn j.getRange(ctx, id, bid, Unmerged, start, stop,\n\t\tdelegateFn)\n}\n\nfunc (j journalMDOps) Put(ctx context.Context, rmd *RootMetadata) (\n\tMdID, error) {\n\tif tlfJournal, ok := j.jServer.getTLFJournal(rmd.TlfID()); ok {\n\t\t\/\/ Just route to the journal.\n\t\tmdID, err := tlfJournal.putMD(ctx, rmd)\n\t\tif err != errTLFJournalDisabled {\n\t\t\treturn mdID, err\n\t\t}\n\t}\n\n\treturn j.MDOps.Put(ctx, rmd)\n}\n\nfunc (j journalMDOps) PutUnmerged(ctx context.Context, rmd *RootMetadata) (\n\tMdID, error) {\n\tif tlfJournal, ok := j.jServer.getTLFJournal(rmd.TlfID()); ok {\n\t\trmd.SetUnmerged()\n\t\tmdID, err := tlfJournal.putMD(ctx, rmd)\n\t\tif err != errTLFJournalDisabled {\n\t\t\treturn mdID, err\n\t\t}\n\t}\n\n\treturn j.MDOps.PutUnmerged(ctx, rmd)\n}\n\nfunc (j journalMDOps) PruneBranch(\n\tctx context.Context, id tlf.ID, bid BranchID) error {\n\tif tlfJournal, ok := j.jServer.getTLFJournal(id); ok {\n\t\t\/\/ Prune the journal, too.\n\t\terr := tlfJournal.clearMDs(ctx, bid)\n\t\tif err != nil && err != errTLFJournalDisabled {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn j.MDOps.PruneBranch(ctx, id, bid)\n}\n\nfunc (j journalMDOps) ResolveBranch(\n\tctx context.Context, id tlf.ID, bid BranchID,\n\tblocksToDelete []BlockID, rmd *RootMetadata) (MdID, error) {\n\tif tlfJournal, ok := j.jServer.getTLFJournal(id); ok {\n\t\tmdID, err := tlfJournal.resolveBranch(\n\t\t\tctx, bid, blocksToDelete, rmd, rmd.extra)\n\t\tif err != errTLFJournalDisabled {\n\t\t\treturn mdID, err\n\t\t}\n\t}\n\n\treturn j.MDOps.ResolveBranch(ctx, id, bid, blocksToDelete, rmd)\n}\n<|endoftext|>"} {"text":"S종목별_일일_가격정보_모음 함수 변경<|endoftext|>"} {"text":"\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nHist shows the history of a given file, using Arq backups.\n\n usage: hist [-d] [-h host] [-m mtpt] [-s yyyy\/mmdd] file ...\n\nThe -d flag causes it to show diffs between successive versions.\n\nBy default, hist assumes backups are mounted at mtpt\/host, where\nmtpt defaults to \/mnt\/arq and host is the first element of the local host name.\nHist starts the file list with the present copy of the file.\n\nThe -h and -s flags override these assumptions.\n\n*\/\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar usageString = `usage: hist [-d] [-h host] [-m mtpt] [-s yyyy\/mmdd] file ...\n\nHist lists the known versions of the given file.\nThe -d flag causes it to show diffs between successive versions.\n\nBy default, hist assumes backups are mounted at mtpt\/host, where\nmtpt defaults to \/mnt\/arq and host is the first element of the local host name.\nHist starts the file list with the present copy of the file.\n\nThe -h and -s flags override these assumptions.\n`\n\nvar (\n\tdiff = flag.Bool(\"d\", false, \"diff\")\n\thost = flag.String(\"h\", defaultHost(), \"host name\")\n\tmtpt = flag.String(\"m\", \"\/mnt\/arq\", \"mount point\")\n\tvers = flag.String(\"s\", \"\", \"version\")\n)\n\nfunc defaultHost() string {\n\tname, _ := os.Hostname()\n\tif name == \"\" {\n\t\tname = \"gnot\"\n\t}\n\tif i := strings.Index(name, \".\"); i >= 0 {\n\t\tname = name[:i]\n\t}\n\treturn name\n}\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprint(os.Stderr, usageString)\n\t\tos.Exit(2)\n\t}\n\t\n\tflag.Parse()\n\targs := flag.Args()\n\tif len(args) == 0 {\n\t\tflag.Usage()\n\t}\n\t\n\tdates := loadDates()\n\tfor _, file := range args {\n\t\tlist(dates, file)\n\t}\n}\n\nvar (\n\tyyyy = regexp.MustCompile(`^\\d{4}$`)\n\tmmdd = regexp.MustCompile(`^\\d{4}(\\.\\d+)?$`)\n)\n\nfunc loadDates() []string {\n\tvar all []string\n\tydir, err := ioutil.ReadDir(filepath.Join(*mtpt, *host))\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(3)\n\t}\n\tfor _, y := range ydir {\n\t\tif !y.IsDir() || !yyyy.MatchString(y.Name()) {\n\t\t\tcontinue\n\t\t}\n\t\tddir, err := ioutil.ReadDir(filepath.Join(*mtpt, *host, y.Name()))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, d := range ddir {\n\t\t\tif !d.IsDir() || !mmdd.MatchString(d.Name()) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdate := y.Name() + \"\/\" + d.Name()\n\t\t\tif *vers > date {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tall = append(all, filepath.Join(*mtpt, *host, date))\n\t\t}\n\t}\n\treturn all\n}\t\t\n\nconst timeFormat = \"Jan 02 15:04:05 MST 2006\"\n\nfunc list(dates []string, file string) {\n\tvar (\n\t\tlast os.FileInfo\n\t\tlastPath string\n\t)\n\n\tfi, err := os.Stat(file)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"hist: warning: %s: %v\\n\", file, err)\n\t} else {\n\t\tfmt.Printf(\"%s %s %d\\n\", fi.ModTime().Format(timeFormat), file, fi.Size())\n\t\tlast = fi\n\t\tlastPath = file\n\t}\n\t\n\tfile, err = filepath.Abs(file)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"hist: abs: %v\\n\", err)\n\t\treturn\n\t}\n\n\tfor i := len(dates)-1; i >= 0; i-- {\n\t\tp := filepath.Join(dates[i], file)\n\t\tfi, err := os.Stat(p)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif last != nil && fi.ModTime() == last.ModTime() && fi.Size() == last.Size() {\n\t\t\tcontinue\n\t\t}\n\t\tif *diff {\n\t\t\tcmd := exec.Command(\"diff\", lastPath, p)\n\t\t\tcmd.Stdout = os.Stdout\n\t\t\tcmd.Stderr = os.Stderr\n\t\t\tif err := cmd.Start(); err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\t\t\t}\n\t\t\tcmd.Wait()\n\t\t}\n\t\tfmt.Printf(\"%s %s %d\\n\", fi.ModTime().Format(timeFormat), file, fi.Size())\n\t\tlast = fi\n\t\tlastPath = p\n\t}\n}\n\narq\/hist: fix print\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nHist shows the history of a given file, using Arq backups.\n\n usage: hist [-d] [-h host] [-m mtpt] [-s yyyy\/mmdd] file ...\n\nThe -d flag causes it to show diffs between successive versions.\n\nBy default, hist assumes backups are mounted at mtpt\/host, where\nmtpt defaults to \/mnt\/arq and host is the first element of the local host name.\nHist starts the file list with the present copy of the file.\n\nThe -h and -s flags override these assumptions.\n\n*\/\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar usageString = `usage: hist [-d] [-h host] [-m mtpt] [-s yyyy\/mmdd] file ...\n\nHist lists the known versions of the given file.\nThe -d flag causes it to show diffs between successive versions.\n\nBy default, hist assumes backups are mounted at mtpt\/host, where\nmtpt defaults to \/mnt\/arq and host is the first element of the local host name.\nHist starts the file list with the present copy of the file.\n\nThe -h and -s flags override these assumptions.\n`\n\nvar (\n\tdiff = flag.Bool(\"d\", false, \"diff\")\n\thost = flag.String(\"h\", defaultHost(), \"host name\")\n\tmtpt = flag.String(\"m\", \"\/mnt\/arq\", \"mount point\")\n\tvers = flag.String(\"s\", \"\", \"version\")\n)\n\nfunc defaultHost() string {\n\tname, _ := os.Hostname()\n\tif name == \"\" {\n\t\tname = \"gnot\"\n\t}\n\tif i := strings.Index(name, \".\"); i >= 0 {\n\t\tname = name[:i]\n\t}\n\treturn name\n}\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprint(os.Stderr, usageString)\n\t\tos.Exit(2)\n\t}\n\t\n\tflag.Parse()\n\targs := flag.Args()\n\tif len(args) == 0 {\n\t\tflag.Usage()\n\t}\n\t\n\tdates := loadDates()\n\tfor _, file := range args {\n\t\tlist(dates, file)\n\t}\n}\n\nvar (\n\tyyyy = regexp.MustCompile(`^\\d{4}$`)\n\tmmdd = regexp.MustCompile(`^\\d{4}(\\.\\d+)?$`)\n)\n\nfunc loadDates() []string {\n\tvar all []string\n\tydir, err := ioutil.ReadDir(filepath.Join(*mtpt, *host))\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(3)\n\t}\n\tfor _, y := range ydir {\n\t\tif !y.IsDir() || !yyyy.MatchString(y.Name()) {\n\t\t\tcontinue\n\t\t}\n\t\tddir, err := ioutil.ReadDir(filepath.Join(*mtpt, *host, y.Name()))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, d := range ddir {\n\t\t\tif !d.IsDir() || !mmdd.MatchString(d.Name()) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdate := y.Name() + \"\/\" + d.Name()\n\t\t\tif *vers > date {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tall = append(all, filepath.Join(*mtpt, *host, date))\n\t\t}\n\t}\n\treturn all\n}\t\t\n\nconst timeFormat = \"Jan 02 15:04:05 MST 2006\"\n\nfunc list(dates []string, file string) {\n\tvar (\n\t\tlast os.FileInfo\n\t\tlastPath string\n\t)\n\n\tfi, err := os.Stat(file)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"hist: warning: %s: %v\\n\", file, err)\n\t} else {\n\t\tfmt.Printf(\"%s %s %d\\n\", fi.ModTime().Format(timeFormat), file, fi.Size())\n\t\tlast = fi\n\t\tlastPath = file\n\t}\n\t\n\tfile, err = filepath.Abs(file)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"hist: abs: %v\\n\", err)\n\t\treturn\n\t}\n\n\tfor i := len(dates)-1; i >= 0; i-- {\n\t\tp := filepath.Join(dates[i], file)\n\t\tfi, err := os.Stat(p)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif last != nil && fi.ModTime() == last.ModTime() && fi.Size() == last.Size() {\n\t\t\tcontinue\n\t\t}\n\t\tif *diff {\n\t\t\tcmd := exec.Command(\"diff\", lastPath, p)\n\t\t\tcmd.Stdout = os.Stdout\n\t\t\tcmd.Stderr = os.Stderr\n\t\t\tif err := cmd.Start(); err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\t\t\t}\n\t\t\tcmd.Wait()\n\t\t}\n\t\tfmt.Printf(\"%s %s %d\\n\", fi.ModTime().Format(timeFormat), p, fi.Size())\n\t\tlast = fi\n\t\tlastPath = p\n\t}\n}\n\n<|endoftext|>"} {"text":"package main\n\nfunc main() {}\nfunc sink(id int, v interface{}) {}\n\nfunc link(from interface{}, into interface{}) {}\n\nfunc newSource(id int) interface{} {\n\treturn nil\n}\nFix go autoformatpackage main\n\nfunc main() {}\nfunc sink(id int, v interface{}) {}\n\nfunc link(from interface{}, into interface{}) {}\n\nfunc newSource(id int) interface{} {\n\treturn nil\n}\n<|endoftext|>"} {"text":"package make\n\nimport (\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n)\n\ntype SolrCore struct {\n\tAddress string\n\tName string\n\tTemplate string\n\tPath string\n\tLegacy bool\n}\n\nfunc logSolrInstall() bool {\n\tif verifySolrInstall() {\n\t\tlog.Infoln(\"Found Solr installation\")\n\t\treturn true\n\t} else {\n\t\tlog.Errorln(\"Could not find Solr installation\")\n\t\treturn false\n\t}\n}\nfunc logSolrCLI() bool {\n\tif verifySolrCLI() {\n\t\tlog.Infoln(\"Found Solr command-line tools\")\n\t\treturn true\n\t} else {\n\t\tlog.Warnln(\"Could not find Solr command-line tools\")\n\t\treturn false\n\t}\n}\nfunc logResources(Template string) bool {\n\tif verifyResources(Template) {\n\t\tlog.Infoln(\"Found configuration folder\")\n\t\treturn true\n\t} else {\n\t\tlog.Errorln(\"Could not find configuration folder\")\n\t\treturn false\n\t}\n}\nfunc logSolrCore(SolrCore *SolrCore) bool {\n\tif verifySolrCore(SolrCore) {\n\t\tlog.Infoln(\"Solr core is installed.\")\n\t\treturn true\n\t} else {\n\t\tlog.Warnln(\"Solr core is not installed.\")\n\t\treturn false\n\t}\n}\nfunc verifySolrInstall() bool {\n\t_, err := os.Stat(\"\/opt\/solr\")\n\tif err == nil {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}\nfunc verifySolrCLI() bool {\n\t_, err := os.Stat(\"\/opt\/solr\/bin\/solr\")\n\tif err == nil {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}\nfunc verifyResources(Template string) bool {\n\t_, err := os.Stat(Template)\n\tif err == nil {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}\nfunc verifySolrCore(SolrCore *SolrCore) bool {\n\tcurlResponse, err := exec.Command(\"curl\", SolrCore.Address+\"\/solr\/admin\/cores?action=STATUS\").Output()\n\tif err == nil {\n\t\tif strings.Contains(string(curlResponse), ``+SolrCore.Name+`<\/str>`) == true {\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\tlog.Errorln(\"Solr could not be accessed using CURL:\", err.Error())\n\t}\n\treturn false\n}\n\nfunc NewCore(Address, Name, Template, Path string) SolrCore {\n\treturn SolrCore{Address, Name, Template, Path, false}\n}\n\nfunc (SolrCore *SolrCore) Install() {\n\tif logSolrInstall() && logResources(SolrCore.Template) {\n\t\tlog.Infoln(\"All checks have passed.\")\n\t\tdataDir := \"\"\n\t\tif SolrCore.Legacy {\n\t\t\tlog.Infoln(\"Installing legacy file system for Solr < 5.0\")\n\t\t\tdataDir = SolrCore.Path + \"\/\" + SolrCore.Name + \"\/conf\/\"\n\t\t} else {\n\t\t\tdataDir = SolrCore.Path + \"\/data\/\" + SolrCore.Name + \"\/conf\/\"\n\t\t}\n\n\t\t\/\/ Create data directories\n\t\terr := os.MkdirAll(dataDir, 0777)\n\t\tif err == nil {\n\t\t\tlog.Infoln(\"Directory has been created.\", dataDir)\n\t\t} else {\n\t\t\tlog.Errorln(\"Directory has not been created:\", err.Error())\n\t\t}\n\n\t\t\/\/ Sync\n\t\t_, err = exec.Command(\"rsync\", \"-a\", SolrCore.Template+\"\/\", dataDir).Output()\n\t\tif err == nil {\n\t\t\tlog.Infoln(\"Configuration has been synced with boilerplate resources.\")\n\t\t} else {\n\t\t\tlog.Errorln(\"Configuration could not be synced with boilerplate resources:\", err.Error())\n\t\t}\n\n\t\tif logSolrCLI() == true {\n\t\t\t\/\/ Install core via CLI\n\t\t\tcliOut, err := exec.Command(\"\/opt\/solr\/bin\/solr\", \"create\", \"-c\", SolrCore.Name).Output()\n\t\t\tif err == nil && strings.Contains(string(cliOut), \"Unable to create core\") == false {\n\t\t\t\tif verifySolrCore(SolrCore) {\n\t\t\t\t\tlog.Infoln(\"Core has been installed and verified successfully.\")\n\t\t\t\t} else {\n\t\t\t\t\tlog.Errorln(\"Core could not be installed, check logs for details.\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Errorln(\"Core could not be installed:\", err.Error(), string(cliOut))\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Install core via CURL\n\t\t\t_, err = exec.Command(\"curl\", SolrCore.Address+\"\/solr\/admin\/cores?action=CREATE&name=\"+SolrCore.Name+\"&instanceDir=\"+SolrCore.Name+\"&dataDir=data&config=solrconfig.xml&schema=schema.xml\").Output()\n\t\t\tif err == nil {\n\t\t\t\tlog.Infoln(\"Core has been successfully installed.\")\n\t\t\t} else {\n\t\t\t\tlog.Errorln(\"Core could not be installed:\", err)\n\t\t\t}\n\t\t}\n\t}\n\tverifySolrCore(SolrCore)\n}\n\nfunc (SolrCore *SolrCore) Uninstall() {\n\tif verifySolrInstall() && verifySolrCLI() {\n\t\t_, err := exec.Command(\"sh\", \"-c\", \"\/opt\/solr\/bin\/solr\", \"delete\", \"-c\", SolrCore.Name).Output()\n\t\tif err == nil {\n\t\t\tlog.Infoln(\"Core has been successfully uninstalled.\")\n\t\t} else {\n\t\t\tlog.Errorln(\"Core could not be uninstalled:\", err)\n\t\t}\n\t} else if verifySolrInstall() && !verifySolrCLI() {\n\t\t_, err := exec.Command(\"curl\", SolrCore.Address+\"\/solr\/admin\/cores?action=UNLOAD&core=\"+SolrCore.Name).Output()\n\t\tif err == nil {\n\t\t\tlog.Infoln(\"Core has been successfully uninstalled.\")\n\t\t} else {\n\t\t\tlog.Errorln(\"Core could not be uninstalled:\", err)\n\t\t}\n\t}\n\terr := os.RemoveAll(SolrCore.Path + \"\/\" + SolrCore.Name)\n\tif err == nil {\n\t\tlog.Infoln(\"Core resources have been removed.\")\n\t} else {\n\t\tlog.Errorln(\"Core resources could not be removed:\", err)\n\t}\n}\nFallback to curl as a fixed solution for creating and deleting cores.package make\n\nimport (\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n)\n\ntype SolrCore struct {\n\tAddress string\n\tName string\n\tTemplate string\n\tPath string\n\tLegacy bool\n}\n\nfunc logSolrInstall() bool {\n\tif verifySolrInstall() {\n\t\tlog.Infoln(\"Found Solr installation\")\n\t\treturn true\n\t} else {\n\t\tlog.Errorln(\"Could not find Solr installation\")\n\t\treturn false\n\t}\n}\nfunc logSolrCLI() bool {\n\tif verifySolrCLI() {\n\t\tlog.Infoln(\"Found Solr command-line tools\")\n\t\treturn true\n\t} else {\n\t\tlog.Warnln(\"Could not find Solr command-line tools\")\n\t\treturn false\n\t}\n}\nfunc logResources(Template string) bool {\n\tif verifyResources(Template) {\n\t\tlog.Infoln(\"Found configuration folder\")\n\t\treturn true\n\t} else {\n\t\tlog.Errorln(\"Could not find configuration folder\")\n\t\treturn false\n\t}\n}\nfunc logSolrCore(SolrCore *SolrCore) bool {\n\tif verifySolrCore(SolrCore) {\n\t\tlog.Infoln(\"Solr core is installed.\")\n\t\treturn true\n\t} else {\n\t\tlog.Warnln(\"Solr core is not installed.\")\n\t\treturn false\n\t}\n}\nfunc verifySolrInstall() bool {\n\t_, err := os.Stat(\"\/opt\/solr\")\n\tif err == nil {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}\nfunc verifySolrCLI() bool {\n\t_, err := os.Stat(\"\/opt\/solr\/bin\/solr\")\n\tif err == nil {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}\nfunc verifyResources(Template string) bool {\n\t_, err := os.Stat(Template)\n\tif err == nil {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}\nfunc verifySolrCore(SolrCore *SolrCore) bool {\n\tcurlResponse, err := exec.Command(\"curl\", SolrCore.Address+\"\/solr\/admin\/cores?action=STATUS\").Output()\n\tif err == nil {\n\t\tif strings.Contains(string(curlResponse), ``+SolrCore.Name+`<\/str>`) == true {\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\tlog.Errorln(\"Solr could not be accessed using CURL:\", err.Error())\n\t}\n\treturn false\n}\n\nfunc NewCore(Address, Name, Template, Path string) SolrCore {\n\treturn SolrCore{Address, Name, Template, Path, false}\n}\n\nfunc (SolrCore *SolrCore) Install() {\n\tif logSolrInstall() && logResources(SolrCore.Template) {\n\t\tlog.Infoln(\"All checks have passed.\")\n\t\tdataDir := \"\"\n\t\tif SolrCore.Legacy {\n\t\t\tlog.Infoln(\"Installing legacy file system for Solr < 5.0\")\n\t\t\tdataDir = SolrCore.Path + \"\/\" + SolrCore.Name + \"\/conf\/\"\n\t\t} else {\n\t\t\tdataDir = SolrCore.Path + \"\/data\/\" + SolrCore.Name + \"\/conf\/\"\n\t\t}\n\n\t\t\/\/ Create data directories\n\t\terr := os.MkdirAll(dataDir, 0777)\n\t\tif err == nil {\n\t\t\tlog.Infoln(\"Directory has been created.\", dataDir)\n\t\t} else {\n\t\t\tlog.Errorln(\"Directory has not been created:\", err.Error())\n\t\t}\n\n\t\t\/\/ Sync\n\t\t_, err = exec.Command(\"rsync\", \"-a\", SolrCore.Template+\"\/\", dataDir).Output()\n\t\tif err == nil {\n\t\t\tlog.Infoln(\"Configuration has been synced with boilerplate resources.\")\n\t\t} else {\n\t\t\tlog.Errorln(\"Configuration could not be synced with boilerplate resources:\", err.Error())\n\t\t}\n\n\t\t_, err = exec.Command(\"curl\", SolrCore.Address+\"\/solr\/admin\/cores?action=CREATE&name=\"+SolrCore.Name+\"&instanceDir=\"+SolrCore.Name+\"&dataDir=data&config=solrconfig.xml&schema=schema.xml\").Output()\n\t\tif err == nil {\n\t\t\tlog.Infoln(\"Core has been successfully installed.\")\n\t\t} else {\n\t\t\tlog.Errorln(\"Core could not be installed:\", err)\n\t\t}\n\t}\n\tverifySolrCore(SolrCore)\n}\n\nfunc (SolrCore *SolrCore) Uninstall() {\n\t_, err := exec.Command(\"curl\", SolrCore.Address+\"\/solr\/admin\/cores?action=UNLOAD&core=\"+SolrCore.Name).Output()\n\tif err == nil {\n\t\tlog.Infoln(\"Core has been successfully uninstalled.\")\n\t} else {\n\t\tlog.Errorln(\"Core could not be uninstalled:\", err)\n\t}\n\terr = os.RemoveAll(SolrCore.Path + \"\/\" + SolrCore.Name)\n\tif err == nil {\n\t\tlog.Infoln(\"Core resources have been removed.\")\n\t} else {\n\t\tlog.Errorln(\"Core resources could not be removed:\", err)\n\t}\n}\n<|endoftext|>"} {"text":"package libcontainer\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\/debug\"\n\t\"strconv\"\n\n\tsecurejoin \"github.com\/cyphar\/filepath-securejoin\"\n\t\"github.com\/moby\/sys\/mountinfo\"\n\t\"golang.org\/x\/sys\/unix\"\n\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\/manager\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/configs\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/configs\/validate\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/intelrdt\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/utils\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nconst (\n\tstateFilename = \"state.json\"\n\texecFifoFilename = \"exec.fifo\"\n)\n\nvar idRegex = regexp.MustCompile(`^[\\w+-\\.]+$`)\n\n\/\/ InitArgs returns an options func to configure a LinuxFactory with the\n\/\/ provided init binary path and arguments.\nfunc InitArgs(args ...string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) (err error) {\n\t\tif len(args) > 0 {\n\t\t\t\/\/ Resolve relative paths to ensure that its available\n\t\t\t\/\/ after directory changes.\n\t\t\tif args[0], err = filepath.Abs(args[0]); err != nil {\n\t\t\t\t\/\/ The only error returned from filepath.Abs is\n\t\t\t\t\/\/ the one from os.Getwd, i.e. a system error.\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tl.InitArgs = args\n\t\treturn nil\n\t}\n}\n\n\/\/ IntelRdtfs is an options func to configure a LinuxFactory to return\n\/\/ containers that use the Intel RDT \"resource control\" filesystem to\n\/\/ create and manage Intel RDT resources (e.g., L3 cache, memory bandwidth).\nfunc IntelRdtFs(l *LinuxFactory) error {\n\tif !intelrdt.IsCATEnabled() && !intelrdt.IsMBAEnabled() {\n\t\tl.NewIntelRdtManager = nil\n\t} else {\n\t\tl.NewIntelRdtManager = func(config *configs.Config, id string, path string) intelrdt.Manager {\n\t\t\treturn intelrdt.NewManager(config, id, path)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ TmpfsRoot is an option func to mount LinuxFactory.Root to tmpfs.\nfunc TmpfsRoot(l *LinuxFactory) error {\n\tmounted, err := mountinfo.Mounted(l.Root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !mounted {\n\t\tif err := mount(\"tmpfs\", l.Root, \"\", \"tmpfs\", 0, \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ CriuPath returns an option func to configure a LinuxFactory with the\n\/\/ provided criupath\nfunc CriuPath(criupath string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) error {\n\t\tl.CriuPath = criupath\n\t\treturn nil\n\t}\n}\n\n\/\/ New returns a linux based container factory based in the root directory and\n\/\/ configures the factory with the provided option funcs.\nfunc New(root string, options ...func(*LinuxFactory) error) (Factory, error) {\n\tif root != \"\" {\n\t\tif err := os.MkdirAll(root, 0o700); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tl := &LinuxFactory{\n\t\tRoot: root,\n\t\tInitPath: \"\/proc\/self\/exe\",\n\t\tInitArgs: []string{os.Args[0], \"init\"},\n\t\tValidator: validate.New(),\n\t\tCriuPath: \"criu\",\n\t}\n\n\tfor _, opt := range options {\n\t\tif opt == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif err := opt(l); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn l, nil\n}\n\n\/\/ LinuxFactory implements the default factory interface for linux based systems.\ntype LinuxFactory struct {\n\t\/\/ Root directory for the factory to store state.\n\tRoot string\n\n\t\/\/ InitPath is the path for calling the init responsibilities for spawning\n\t\/\/ a container.\n\tInitPath string\n\n\t\/\/ InitArgs are arguments for calling the init responsibilities for spawning\n\t\/\/ a container.\n\tInitArgs []string\n\n\t\/\/ CriuPath is the path to the criu binary used for checkpoint and restore of\n\t\/\/ containers.\n\tCriuPath string\n\n\t\/\/ New{u,g}idmapPath is the path to the binaries used for mapping with\n\t\/\/ rootless containers.\n\tNewuidmapPath string\n\tNewgidmapPath string\n\n\t\/\/ Validator provides validation to container configurations.\n\tValidator validate.Validator\n\n\t\/\/ NewIntelRdtManager returns an initialized Intel RDT manager for a single container.\n\tNewIntelRdtManager func(config *configs.Config, id string, path string) intelrdt.Manager\n}\n\nfunc (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) {\n\tif l.Root == \"\" {\n\t\treturn nil, errors.New(\"root not set\")\n\t}\n\tif err := l.validateID(id); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := l.Validator.Validate(config); err != nil {\n\t\treturn nil, err\n\t}\n\tcontainerRoot, err := securejoin.SecureJoin(l.Root, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif _, err := os.Stat(containerRoot); err == nil {\n\t\treturn nil, ErrExist\n\t} else if !os.IsNotExist(err) {\n\t\treturn nil, err\n\t}\n\n\tcm, err := manager.New(config.Cgroups)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Check that cgroup does not exist or empty (no processes).\n\t\/\/ Note for cgroup v1 this check is not thorough, as there are multiple\n\t\/\/ separate hierarchies, while both Exists() and GetAllPids() only use\n\t\/\/ one for \"devices\" controller (assuming others are the same, which is\n\t\/\/ probably true in almost all scenarios). Checking all the hierarchies\n\t\/\/ would be too expensive.\n\tif cm.Exists() {\n\t\tpids, err := cm.GetAllPids()\n\t\t\/\/ Reading PIDs can race with cgroups removal, so ignore ENOENT and ENODEV.\n\t\tif err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, unix.ENODEV) {\n\t\t\treturn nil, fmt.Errorf(\"unable to get cgroup PIDs: %w\", err)\n\t\t}\n\t\tif len(pids) != 0 {\n\t\t\t\/\/ TODO: return an error.\n\t\t\tlogrus.Warnf(\"container's cgroup is not empty: %d process(es) found\", len(pids))\n\t\t\tlogrus.Warn(\"DEPRECATED: running container in a non-empty cgroup won't be supported in runc 1.2; https:\/\/github.com\/opencontainers\/runc\/issues\/3132\")\n\t\t}\n\t}\n\n\t\/\/ Check that cgroup is not frozen. Do not use Exists() here\n\t\/\/ since in cgroup v1 it only checks \"devices\" controller.\n\tst, err := cm.GetFreezerState()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to get cgroup freezer state: %w\", err)\n\t}\n\tif st == configs.Frozen {\n\t\treturn nil, errors.New(\"container's cgroup unexpectedly frozen\")\n\t}\n\n\tif err := os.MkdirAll(containerRoot, 0o711); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := os.Chown(containerRoot, unix.Geteuid(), unix.Getegid()); err != nil {\n\t\treturn nil, err\n\t}\n\tc := &linuxContainer{\n\t\tid: id,\n\t\troot: containerRoot,\n\t\tconfig: config,\n\t\tinitPath: l.InitPath,\n\t\tinitArgs: l.InitArgs,\n\t\tcriuPath: l.CriuPath,\n\t\tnewuidmapPath: l.NewuidmapPath,\n\t\tnewgidmapPath: l.NewgidmapPath,\n\t\tcgroupManager: cm,\n\t}\n\tif l.NewIntelRdtManager != nil {\n\t\tc.intelRdtManager = l.NewIntelRdtManager(config, id, \"\")\n\t}\n\tc.state = &stoppedState{c: c}\n\treturn c, nil\n}\n\nfunc (l *LinuxFactory) Load(id string) (Container, error) {\n\tif l.Root == \"\" {\n\t\treturn nil, errors.New(\"root not set\")\n\t}\n\t\/\/ when load, we need to check id is valid or not.\n\tif err := l.validateID(id); err != nil {\n\t\treturn nil, err\n\t}\n\tcontainerRoot, err := securejoin.SecureJoin(l.Root, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstate, err := l.loadState(containerRoot)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := &nonChildProcess{\n\t\tprocessPid: state.InitProcessPid,\n\t\tprocessStartTime: state.InitProcessStartTime,\n\t\tfds: state.ExternalDescriptors,\n\t}\n\tcm, err := manager.NewWithPaths(state.Config.Cgroups, state.CgroupPaths)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := &linuxContainer{\n\t\tinitProcess: r,\n\t\tinitProcessStartTime: state.InitProcessStartTime,\n\t\tid: id,\n\t\tconfig: &state.Config,\n\t\tinitPath: l.InitPath,\n\t\tinitArgs: l.InitArgs,\n\t\tcriuPath: l.CriuPath,\n\t\tnewuidmapPath: l.NewuidmapPath,\n\t\tnewgidmapPath: l.NewgidmapPath,\n\t\tcgroupManager: cm,\n\t\troot: containerRoot,\n\t\tcreated: state.Created,\n\t}\n\tif l.NewIntelRdtManager != nil {\n\t\tc.intelRdtManager = l.NewIntelRdtManager(&state.Config, id, state.IntelRdtPath)\n\t}\n\tc.state = &loadedState{c: c}\n\tif err := c.refreshState(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\n\nfunc (l *LinuxFactory) Type() string {\n\treturn \"libcontainer\"\n}\n\n\/\/ StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state\n\/\/ This is a low level implementation detail of the reexec and should not be consumed externally\nfunc (l *LinuxFactory) StartInitialization() (err error) {\n\t\/\/ Get the INITPIPE.\n\tenvInitPipe := os.Getenv(\"_LIBCONTAINER_INITPIPE\")\n\tpipefd, err := strconv.Atoi(envInitPipe)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"unable to convert _LIBCONTAINER_INITPIPE: %w\", err)\n\t\tlogrus.Error(err)\n\t\treturn err\n\t}\n\tpipe := os.NewFile(uintptr(pipefd), \"pipe\")\n\tdefer pipe.Close()\n\n\tdefer func() {\n\t\t\/\/ We have an error during the initialization of the container's init,\n\t\t\/\/ send it back to the parent process in the form of an initError.\n\t\tif werr := writeSync(pipe, procError); werr != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\treturn\n\t\t}\n\t\tif werr := utils.WriteJSON(pipe, &initError{Message: err.Error()}); werr != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\treturn\n\t\t}\n\t}()\n\n\t\/\/ Only init processes have FIFOFD.\n\tfifofd := -1\n\tenvInitType := os.Getenv(\"_LIBCONTAINER_INITTYPE\")\n\tit := initType(envInitType)\n\tif it == initStandard {\n\t\tenvFifoFd := os.Getenv(\"_LIBCONTAINER_FIFOFD\")\n\t\tif fifofd, err = strconv.Atoi(envFifoFd); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to convert _LIBCONTAINER_FIFOFD: %w\", err)\n\t\t}\n\t}\n\n\tvar consoleSocket *os.File\n\tif envConsole := os.Getenv(\"_LIBCONTAINER_CONSOLE\"); envConsole != \"\" {\n\t\tconsole, err := strconv.Atoi(envConsole)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to convert _LIBCONTAINER_CONSOLE: %w\", err)\n\t\t}\n\t\tconsoleSocket = os.NewFile(uintptr(console), \"console-socket\")\n\t\tdefer consoleSocket.Close()\n\t}\n\n\tlogPipeFdStr := os.Getenv(\"_LIBCONTAINER_LOGPIPE\")\n\tlogPipeFd, err := strconv.Atoi(logPipeFdStr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to convert _LIBCONTAINER_LOGPIPE: %w\", err)\n\t}\n\n\t\/\/ Get mount files (O_PATH).\n\tmountFds, err := parseMountFds()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ clear the current process's environment to clean any libcontainer\n\t\/\/ specific env vars.\n\tos.Clearenv()\n\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = fmt.Errorf(\"panic from initialization: %w, %v\", e, string(debug.Stack()))\n\t\t}\n\t}()\n\n\ti, err := newContainerInit(it, pipe, consoleSocket, fifofd, logPipeFd, mountFds)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ If Init succeeds, syscall.Exec will not return, hence none of the defers will be called.\n\treturn i.Init()\n}\n\nfunc (l *LinuxFactory) loadState(root string) (*State, error) {\n\tstateFilePath, err := securejoin.SecureJoin(root, stateFilename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tf, err := os.Open(stateFilePath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, ErrNotExist\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tvar state *State\n\tif err := json.NewDecoder(f).Decode(&state); err != nil {\n\t\treturn nil, err\n\t}\n\treturn state, nil\n}\n\nfunc (l *LinuxFactory) validateID(id string) error {\n\tif !idRegex.MatchString(id) || string(os.PathSeparator)+id != utils.CleanPath(string(os.PathSeparator)+id) {\n\t\treturn ErrInvalidID\n\t}\n\n\treturn nil\n}\n\n\/\/ NewuidmapPath returns an option func to configure a LinuxFactory with the\n\/\/ provided ..\nfunc NewuidmapPath(newuidmapPath string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) error {\n\t\tl.NewuidmapPath = newuidmapPath\n\t\treturn nil\n\t}\n}\n\n\/\/ NewgidmapPath returns an option func to configure a LinuxFactory with the\n\/\/ provided ..\nfunc NewgidmapPath(newgidmapPath string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) error {\n\t\tl.NewgidmapPath = newgidmapPath\n\t\treturn nil\n\t}\n}\n\nfunc parseMountFds() ([]int, error) {\n\tfdsJson := os.Getenv(\"_LIBCONTAINER_MOUNT_FDS\")\n\tif fdsJson == \"\" {\n\t\t\/\/ Always return the nil slice if no fd is present.\n\t\treturn nil, nil\n\t}\n\n\tvar mountFds []int\n\tif err := json.Unmarshal([]byte(fdsJson), &mountFds); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error unmarshalling _LIBCONTAINER_MOUNT_FDS: %w\", err)\n\t}\n\n\treturn mountFds, nil\n}\nlibct: Create: rm unneeded chownpackage libcontainer\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\/debug\"\n\t\"strconv\"\n\n\tsecurejoin \"github.com\/cyphar\/filepath-securejoin\"\n\t\"github.com\/moby\/sys\/mountinfo\"\n\t\"golang.org\/x\/sys\/unix\"\n\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\/manager\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/configs\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/configs\/validate\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/intelrdt\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/utils\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nconst (\n\tstateFilename = \"state.json\"\n\texecFifoFilename = \"exec.fifo\"\n)\n\nvar idRegex = regexp.MustCompile(`^[\\w+-\\.]+$`)\n\n\/\/ InitArgs returns an options func to configure a LinuxFactory with the\n\/\/ provided init binary path and arguments.\nfunc InitArgs(args ...string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) (err error) {\n\t\tif len(args) > 0 {\n\t\t\t\/\/ Resolve relative paths to ensure that its available\n\t\t\t\/\/ after directory changes.\n\t\t\tif args[0], err = filepath.Abs(args[0]); err != nil {\n\t\t\t\t\/\/ The only error returned from filepath.Abs is\n\t\t\t\t\/\/ the one from os.Getwd, i.e. a system error.\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tl.InitArgs = args\n\t\treturn nil\n\t}\n}\n\n\/\/ IntelRdtfs is an options func to configure a LinuxFactory to return\n\/\/ containers that use the Intel RDT \"resource control\" filesystem to\n\/\/ create and manage Intel RDT resources (e.g., L3 cache, memory bandwidth).\nfunc IntelRdtFs(l *LinuxFactory) error {\n\tif !intelrdt.IsCATEnabled() && !intelrdt.IsMBAEnabled() {\n\t\tl.NewIntelRdtManager = nil\n\t} else {\n\t\tl.NewIntelRdtManager = func(config *configs.Config, id string, path string) intelrdt.Manager {\n\t\t\treturn intelrdt.NewManager(config, id, path)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ TmpfsRoot is an option func to mount LinuxFactory.Root to tmpfs.\nfunc TmpfsRoot(l *LinuxFactory) error {\n\tmounted, err := mountinfo.Mounted(l.Root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !mounted {\n\t\tif err := mount(\"tmpfs\", l.Root, \"\", \"tmpfs\", 0, \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ CriuPath returns an option func to configure a LinuxFactory with the\n\/\/ provided criupath\nfunc CriuPath(criupath string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) error {\n\t\tl.CriuPath = criupath\n\t\treturn nil\n\t}\n}\n\n\/\/ New returns a linux based container factory based in the root directory and\n\/\/ configures the factory with the provided option funcs.\nfunc New(root string, options ...func(*LinuxFactory) error) (Factory, error) {\n\tif root != \"\" {\n\t\tif err := os.MkdirAll(root, 0o700); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tl := &LinuxFactory{\n\t\tRoot: root,\n\t\tInitPath: \"\/proc\/self\/exe\",\n\t\tInitArgs: []string{os.Args[0], \"init\"},\n\t\tValidator: validate.New(),\n\t\tCriuPath: \"criu\",\n\t}\n\n\tfor _, opt := range options {\n\t\tif opt == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif err := opt(l); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn l, nil\n}\n\n\/\/ LinuxFactory implements the default factory interface for linux based systems.\ntype LinuxFactory struct {\n\t\/\/ Root directory for the factory to store state.\n\tRoot string\n\n\t\/\/ InitPath is the path for calling the init responsibilities for spawning\n\t\/\/ a container.\n\tInitPath string\n\n\t\/\/ InitArgs are arguments for calling the init responsibilities for spawning\n\t\/\/ a container.\n\tInitArgs []string\n\n\t\/\/ CriuPath is the path to the criu binary used for checkpoint and restore of\n\t\/\/ containers.\n\tCriuPath string\n\n\t\/\/ New{u,g}idmapPath is the path to the binaries used for mapping with\n\t\/\/ rootless containers.\n\tNewuidmapPath string\n\tNewgidmapPath string\n\n\t\/\/ Validator provides validation to container configurations.\n\tValidator validate.Validator\n\n\t\/\/ NewIntelRdtManager returns an initialized Intel RDT manager for a single container.\n\tNewIntelRdtManager func(config *configs.Config, id string, path string) intelrdt.Manager\n}\n\nfunc (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) {\n\tif l.Root == \"\" {\n\t\treturn nil, errors.New(\"root not set\")\n\t}\n\tif err := l.validateID(id); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := l.Validator.Validate(config); err != nil {\n\t\treturn nil, err\n\t}\n\tcontainerRoot, err := securejoin.SecureJoin(l.Root, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif _, err := os.Stat(containerRoot); err == nil {\n\t\treturn nil, ErrExist\n\t} else if !os.IsNotExist(err) {\n\t\treturn nil, err\n\t}\n\n\tcm, err := manager.New(config.Cgroups)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Check that cgroup does not exist or empty (no processes).\n\t\/\/ Note for cgroup v1 this check is not thorough, as there are multiple\n\t\/\/ separate hierarchies, while both Exists() and GetAllPids() only use\n\t\/\/ one for \"devices\" controller (assuming others are the same, which is\n\t\/\/ probably true in almost all scenarios). Checking all the hierarchies\n\t\/\/ would be too expensive.\n\tif cm.Exists() {\n\t\tpids, err := cm.GetAllPids()\n\t\t\/\/ Reading PIDs can race with cgroups removal, so ignore ENOENT and ENODEV.\n\t\tif err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, unix.ENODEV) {\n\t\t\treturn nil, fmt.Errorf(\"unable to get cgroup PIDs: %w\", err)\n\t\t}\n\t\tif len(pids) != 0 {\n\t\t\t\/\/ TODO: return an error.\n\t\t\tlogrus.Warnf(\"container's cgroup is not empty: %d process(es) found\", len(pids))\n\t\t\tlogrus.Warn(\"DEPRECATED: running container in a non-empty cgroup won't be supported in runc 1.2; https:\/\/github.com\/opencontainers\/runc\/issues\/3132\")\n\t\t}\n\t}\n\n\t\/\/ Check that cgroup is not frozen. Do not use Exists() here\n\t\/\/ since in cgroup v1 it only checks \"devices\" controller.\n\tst, err := cm.GetFreezerState()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to get cgroup freezer state: %w\", err)\n\t}\n\tif st == configs.Frozen {\n\t\treturn nil, errors.New(\"container's cgroup unexpectedly frozen\")\n\t}\n\n\tif err := os.MkdirAll(containerRoot, 0o711); err != nil {\n\t\treturn nil, err\n\t}\n\tc := &linuxContainer{\n\t\tid: id,\n\t\troot: containerRoot,\n\t\tconfig: config,\n\t\tinitPath: l.InitPath,\n\t\tinitArgs: l.InitArgs,\n\t\tcriuPath: l.CriuPath,\n\t\tnewuidmapPath: l.NewuidmapPath,\n\t\tnewgidmapPath: l.NewgidmapPath,\n\t\tcgroupManager: cm,\n\t}\n\tif l.NewIntelRdtManager != nil {\n\t\tc.intelRdtManager = l.NewIntelRdtManager(config, id, \"\")\n\t}\n\tc.state = &stoppedState{c: c}\n\treturn c, nil\n}\n\nfunc (l *LinuxFactory) Load(id string) (Container, error) {\n\tif l.Root == \"\" {\n\t\treturn nil, errors.New(\"root not set\")\n\t}\n\t\/\/ when load, we need to check id is valid or not.\n\tif err := l.validateID(id); err != nil {\n\t\treturn nil, err\n\t}\n\tcontainerRoot, err := securejoin.SecureJoin(l.Root, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstate, err := l.loadState(containerRoot)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := &nonChildProcess{\n\t\tprocessPid: state.InitProcessPid,\n\t\tprocessStartTime: state.InitProcessStartTime,\n\t\tfds: state.ExternalDescriptors,\n\t}\n\tcm, err := manager.NewWithPaths(state.Config.Cgroups, state.CgroupPaths)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := &linuxContainer{\n\t\tinitProcess: r,\n\t\tinitProcessStartTime: state.InitProcessStartTime,\n\t\tid: id,\n\t\tconfig: &state.Config,\n\t\tinitPath: l.InitPath,\n\t\tinitArgs: l.InitArgs,\n\t\tcriuPath: l.CriuPath,\n\t\tnewuidmapPath: l.NewuidmapPath,\n\t\tnewgidmapPath: l.NewgidmapPath,\n\t\tcgroupManager: cm,\n\t\troot: containerRoot,\n\t\tcreated: state.Created,\n\t}\n\tif l.NewIntelRdtManager != nil {\n\t\tc.intelRdtManager = l.NewIntelRdtManager(&state.Config, id, state.IntelRdtPath)\n\t}\n\tc.state = &loadedState{c: c}\n\tif err := c.refreshState(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\n\nfunc (l *LinuxFactory) Type() string {\n\treturn \"libcontainer\"\n}\n\n\/\/ StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state\n\/\/ This is a low level implementation detail of the reexec and should not be consumed externally\nfunc (l *LinuxFactory) StartInitialization() (err error) {\n\t\/\/ Get the INITPIPE.\n\tenvInitPipe := os.Getenv(\"_LIBCONTAINER_INITPIPE\")\n\tpipefd, err := strconv.Atoi(envInitPipe)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"unable to convert _LIBCONTAINER_INITPIPE: %w\", err)\n\t\tlogrus.Error(err)\n\t\treturn err\n\t}\n\tpipe := os.NewFile(uintptr(pipefd), \"pipe\")\n\tdefer pipe.Close()\n\n\tdefer func() {\n\t\t\/\/ We have an error during the initialization of the container's init,\n\t\t\/\/ send it back to the parent process in the form of an initError.\n\t\tif werr := writeSync(pipe, procError); werr != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\treturn\n\t\t}\n\t\tif werr := utils.WriteJSON(pipe, &initError{Message: err.Error()}); werr != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\treturn\n\t\t}\n\t}()\n\n\t\/\/ Only init processes have FIFOFD.\n\tfifofd := -1\n\tenvInitType := os.Getenv(\"_LIBCONTAINER_INITTYPE\")\n\tit := initType(envInitType)\n\tif it == initStandard {\n\t\tenvFifoFd := os.Getenv(\"_LIBCONTAINER_FIFOFD\")\n\t\tif fifofd, err = strconv.Atoi(envFifoFd); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to convert _LIBCONTAINER_FIFOFD: %w\", err)\n\t\t}\n\t}\n\n\tvar consoleSocket *os.File\n\tif envConsole := os.Getenv(\"_LIBCONTAINER_CONSOLE\"); envConsole != \"\" {\n\t\tconsole, err := strconv.Atoi(envConsole)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to convert _LIBCONTAINER_CONSOLE: %w\", err)\n\t\t}\n\t\tconsoleSocket = os.NewFile(uintptr(console), \"console-socket\")\n\t\tdefer consoleSocket.Close()\n\t}\n\n\tlogPipeFdStr := os.Getenv(\"_LIBCONTAINER_LOGPIPE\")\n\tlogPipeFd, err := strconv.Atoi(logPipeFdStr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to convert _LIBCONTAINER_LOGPIPE: %w\", err)\n\t}\n\n\t\/\/ Get mount files (O_PATH).\n\tmountFds, err := parseMountFds()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ clear the current process's environment to clean any libcontainer\n\t\/\/ specific env vars.\n\tos.Clearenv()\n\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = fmt.Errorf(\"panic from initialization: %w, %v\", e, string(debug.Stack()))\n\t\t}\n\t}()\n\n\ti, err := newContainerInit(it, pipe, consoleSocket, fifofd, logPipeFd, mountFds)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ If Init succeeds, syscall.Exec will not return, hence none of the defers will be called.\n\treturn i.Init()\n}\n\nfunc (l *LinuxFactory) loadState(root string) (*State, error) {\n\tstateFilePath, err := securejoin.SecureJoin(root, stateFilename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tf, err := os.Open(stateFilePath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, ErrNotExist\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tvar state *State\n\tif err := json.NewDecoder(f).Decode(&state); err != nil {\n\t\treturn nil, err\n\t}\n\treturn state, nil\n}\n\nfunc (l *LinuxFactory) validateID(id string) error {\n\tif !idRegex.MatchString(id) || string(os.PathSeparator)+id != utils.CleanPath(string(os.PathSeparator)+id) {\n\t\treturn ErrInvalidID\n\t}\n\n\treturn nil\n}\n\n\/\/ NewuidmapPath returns an option func to configure a LinuxFactory with the\n\/\/ provided ..\nfunc NewuidmapPath(newuidmapPath string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) error {\n\t\tl.NewuidmapPath = newuidmapPath\n\t\treturn nil\n\t}\n}\n\n\/\/ NewgidmapPath returns an option func to configure a LinuxFactory with the\n\/\/ provided ..\nfunc NewgidmapPath(newgidmapPath string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) error {\n\t\tl.NewgidmapPath = newgidmapPath\n\t\treturn nil\n\t}\n}\n\nfunc parseMountFds() ([]int, error) {\n\tfdsJson := os.Getenv(\"_LIBCONTAINER_MOUNT_FDS\")\n\tif fdsJson == \"\" {\n\t\t\/\/ Always return the nil slice if no fd is present.\n\t\treturn nil, nil\n\t}\n\n\tvar mountFds []int\n\tif err := json.Unmarshal([]byte(fdsJson), &mountFds); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error unmarshalling _LIBCONTAINER_MOUNT_FDS: %w\", err)\n\t}\n\n\treturn mountFds, nil\n}\n<|endoftext|>"} {"text":"package cmd\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/Shopify\/themekit\/src\/cmdutil\"\n\t\"github.com\/Shopify\/themekit\/src\/colors\"\n\t\"github.com\/Shopify\/themekit\/src\/file\"\n\t\"github.com\/Shopify\/themekit\/src\/shopify\"\n)\n\nconst settingsDataKey = \"config\/settings_data.json\"\n\nvar (\n\tdeployCmd = &cobra.Command{\n\t\tUse: \"deploy \",\n\t\tShort: \"deploy files to shopify\",\n\t\tLong: `Deploy will overwrite specific files if provided with file names.\n If deploy is not provided with file names then it will deploy all\n the files on shopify with your local files. Any files that do not\n exist on your local machine will be removed from shopify unless the --soft\n flag is passed\n\n For more documentation please see http:\/\/shopify.github.io\/themekit\/commands\/#deploy\n `,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn cmdutil.ForEachClient(flags, args, deploy)\n\t\t},\n\t}\n\n\treplaceCmd = &cobra.Command{\n\t\tUse: \"replace \",\n\t\tShort: \"Overwrite theme file(s)\",\n\t\tLong: `Replace will overwrite specific files if provided with file names.\n If replace is not provided with file names then it will replace all\n the files on shopify with your local files. Any files that do not\n exist on your local machine will be removed from shopify.\n\n Deprecation Notice: This command is deprecated in v0.8.0 and will be removed in\n\tv0.8.1. Please use the 'deploy' command instead.\n\n For more documentation please see http:\/\/shopify.github.io\/themekit\/commands\/#replace\n `,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcolors.ColorStdOut.Printf(\"[%s] replace has been deprecated please use `deploy` instead\", colors.Yellow(\"WARN\"))\n\t\t\treturn cmdutil.ForEachClient(flags, args, deploy)\n\t\t},\n\t}\n\n\tuploadCmd = &cobra.Command{\n\t\tUse: \"upload \",\n\t\tShort: \"Upload theme file(s) to shopify\",\n\t\tLong: `Upload will upload specific files to shopify servers if provided file names.\n If no filenames are provided then upload will upload every file in the project\n to shopify.\n\n Deprecation Notice: This command is deprecated in v0.8.0 and will be removed in\n\tv0.8.1. Please use the 'deploy' command instead.\n\n For more documentation please see http:\/\/shopify.github.io\/themekit\/commands\/#upload\n `,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcolors.ColorStdOut.Printf(\"[%s] upload has been deprecated please use `deploy` with the --nodelete flag instead\", colors.Yellow(\"WARN\"))\n\t\t\tflags.NoDelete = true\n\t\t\treturn cmdutil.ForEachClient(flags, args, deploy)\n\t\t},\n\t}\n)\n\nfunc deploy(ctx cmdutil.Ctx) error {\n\tif ctx.Env.ReadOnly {\n\t\treturn fmt.Errorf(\"[%s] environment is readonly\", colors.Green(ctx.Env.Name))\n\t}\n\n\tassetsActions, err := generateActions(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar deployGroup sync.WaitGroup\n\tctx.StartProgress(len(assetsActions))\n\tfor path, op := range assetsActions {\n\t\tif path == settingsDataKey {\n\t\t\tdefer perform(ctx, path, op)\n\t\t\tcontinue\n\t\t}\n\t\tdeployGroup.Add(1)\n\t\tgo func(path string, op file.Op) {\n\t\t\tdefer deployGroup.Done()\n\t\t\tperform(ctx, path, op)\n\t\t}(path, op)\n\t}\n\n\tdeployGroup.Wait()\n\treturn nil\n}\n\nfunc generateActions(ctx cmdutil.Ctx) (map[string]file.Op, error) {\n\tassetsActions := map[string]file.Op{}\n\n\tif len(ctx.Args) == 0 && !ctx.Flags.NoDelete {\n\t\tremoteFiles, err := ctx.Client.GetAllAssets()\n\t\tif err != nil {\n\t\t\treturn assetsActions, err\n\t\t}\n\t\tfor _, filename := range remoteFiles {\n\t\t\tassetsActions[filename] = file.Remove\n\t\t}\n\t}\n\n\tlocalAssets, err := shopify.FindAssets(ctx.Env, ctx.Args...)\n\tif err != nil {\n\t\treturn assetsActions, err\n\t}\n\n\tfor _, path := range localAssets {\n\t\tassetsActions[path] = file.Update\n\t}\n\treturn assetsActions, nil\n}\nFixing windows deploy filepathspackage cmd\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"sync\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/Shopify\/themekit\/src\/cmdutil\"\n\t\"github.com\/Shopify\/themekit\/src\/colors\"\n\t\"github.com\/Shopify\/themekit\/src\/file\"\n\t\"github.com\/Shopify\/themekit\/src\/shopify\"\n)\n\nconst settingsDataKey = \"config\/settings_data.json\"\n\nvar (\n\tdeployCmd = &cobra.Command{\n\t\tUse: \"deploy \",\n\t\tShort: \"deploy files to shopify\",\n\t\tLong: `Deploy will overwrite specific files if provided with file names.\n If deploy is not provided with file names then it will deploy all\n the files on shopify with your local files. Any files that do not\n exist on your local machine will be removed from shopify unless the --soft\n flag is passed\n\n For more documentation please see http:\/\/shopify.github.io\/themekit\/commands\/#deploy\n `,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn cmdutil.ForEachClient(flags, args, deploy)\n\t\t},\n\t}\n\n\treplaceCmd = &cobra.Command{\n\t\tUse: \"replace \",\n\t\tShort: \"Overwrite theme file(s)\",\n\t\tLong: `Replace will overwrite specific files if provided with file names.\n If replace is not provided with file names then it will replace all\n the files on shopify with your local files. Any files that do not\n exist on your local machine will be removed from shopify.\n\n Deprecation Notice: This command is deprecated in v0.8.0 and will be removed in\n\tv0.8.1. Please use the 'deploy' command instead.\n\n For more documentation please see http:\/\/shopify.github.io\/themekit\/commands\/#replace\n `,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcolors.ColorStdOut.Printf(\"[%s] replace has been deprecated please use `deploy` instead\", colors.Yellow(\"WARN\"))\n\t\t\treturn cmdutil.ForEachClient(flags, args, deploy)\n\t\t},\n\t}\n\n\tuploadCmd = &cobra.Command{\n\t\tUse: \"upload \",\n\t\tShort: \"Upload theme file(s) to shopify\",\n\t\tLong: `Upload will upload specific files to shopify servers if provided file names.\n If no filenames are provided then upload will upload every file in the project\n to shopify.\n\n Deprecation Notice: This command is deprecated in v0.8.0 and will be removed in\n\tv0.8.1. Please use the 'deploy' command instead.\n\n For more documentation please see http:\/\/shopify.github.io\/themekit\/commands\/#upload\n `,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcolors.ColorStdOut.Printf(\"[%s] upload has been deprecated please use `deploy` with the --nodelete flag instead\", colors.Yellow(\"WARN\"))\n\t\t\tflags.NoDelete = true\n\t\t\treturn cmdutil.ForEachClient(flags, args, deploy)\n\t\t},\n\t}\n)\n\nfunc deploy(ctx cmdutil.Ctx) error {\n\tif ctx.Env.ReadOnly {\n\t\treturn fmt.Errorf(\"[%s] environment is readonly\", colors.Green(ctx.Env.Name))\n\t}\n\n\tassetsActions, err := generateActions(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar deployGroup sync.WaitGroup\n\tctx.StartProgress(len(assetsActions))\n\tfor path, op := range assetsActions {\n\t\tif path == settingsDataKey {\n\t\t\tdefer perform(ctx, path, op)\n\t\t\tcontinue\n\t\t}\n\t\tdeployGroup.Add(1)\n\t\tgo func(path string, op file.Op) {\n\t\t\tdefer deployGroup.Done()\n\t\t\tperform(ctx, path, op)\n\t\t}(path, op)\n\t}\n\n\tdeployGroup.Wait()\n\treturn nil\n}\n\nfunc generateActions(ctx cmdutil.Ctx) (map[string]file.Op, error) {\n\tassetsActions := map[string]file.Op{}\n\n\tif len(ctx.Args) == 0 && !ctx.Flags.NoDelete {\n\t\tremoteFiles, err := ctx.Client.GetAllAssets()\n\t\tif err != nil {\n\t\t\treturn assetsActions, err\n\t\t}\n\t\tfor _, filename := range remoteFiles {\n\t\t\tassetsActions[filepath.ToSlash(filename)] = file.Remove\n\t\t}\n\t}\n\n\tlocalAssets, err := shopify.FindAssets(ctx.Env, ctx.Args...)\n\tif err != nil {\n\t\treturn assetsActions, err\n\t}\n\n\tfor _, path := range localAssets {\n\t\tassetsActions[path] = file.Update\n\t}\n\treturn assetsActions, nil\n}\n<|endoftext|>"} {"text":"package tls\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/bosssauce\/ponzu\/system\/db\"\n\n\t\"golang.org\/x\/crypto\/acme\/autocert\"\n)\n\nvar m autocert.Manager\n\n\/\/ setup attempts to locate or create the cert cache directory and the certs for TLS encryption\nfunc setup() {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatalln(\"Couldn't find working directory to locate or save certificates.\")\n\t}\n\n\tcache := autocert.DirCache(filepath.Join(pwd, \"system\", \"tls\", \"certs\"))\n\tif _, err := os.Stat(string(cache)); os.IsNotExist(err) {\n\t\terr := os.MkdirAll(string(cache), os.ModePerm|os.ModeDir)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Couldn't create cert directory at\", cache)\n\t\t}\n\t}\n\n\t\/\/ get host\/domain and email from Config to use for TLS request to Let's encryption.\n\t\/\/ we will fail fatally if either are not found since Let's Encrypt will rate-limit\n\t\/\/ and sending incomplete requests is wasteful and guarenteed to fail its check\n\thost, err := db.Config(\"domain\")\n\tif err != nil {\n\t\tlog.Fatalln(\"Error identifying host\/domain during TLS set-up.\", err)\n\t}\n\n\tif host == nil {\n\t\tlog.Fatalln(\"No 'domain' field set in Configuration. Please add a domain before attempting to make certificates.\")\n\t}\n\tfmt.Println(\"Using\", host, \"as host\/domain for certificate...\")\n\tfmt.Println(\"NOTE: if the host\/domain is not configured properly or is unreachable, HTTPS set-up will fail.\")\n\n\temail, err := db.Config(\"admin_email\")\n\tif err != nil {\n\t\tlog.Fatalln(\"Error identifying admin email during TLS set-up.\", err)\n\t}\n\n\tif email == nil {\n\t\tlog.Fatalln(\"No 'admin_email' field set in Configuration. Please add an admin email before attempting to make certificates.\")\n\t}\n\tfmt.Println(\"Using\", email, \"as contact email for certificate...\")\n\n\tm = autocert.Manager{\n\t\tPrompt: autocert.AcceptTOS,\n\t\tCache: cache,\n\t\tHostPolicy: autocert.HostWhitelist(string(host)),\n\t\tRenewBefore: time.Hour * 24 * 30,\n\t\tEmail: string(email),\n\t}\n\n}\n\n\/\/ Enable runs the setup for creating or locating certificates and starts the TLS server\nfunc Enable() {\n\tsetup()\n\n\tserver := &http.Server{\n\t\tAddr: \":443\",\n\t\tTLSConfig: &tls.Config{GetCertificate: m.GetCertificate},\n\t}\n\n\tgo log.Fatalln(server.ListenAndServeTLS(\"\", \"\"))\n\tfmt.Println(\"Server listening for HTTPS requests...\")\n}\ncasting var to strings for printing in shellpackage tls\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/bosssauce\/ponzu\/system\/db\"\n\n\t\"golang.org\/x\/crypto\/acme\/autocert\"\n)\n\nvar m autocert.Manager\n\n\/\/ setup attempts to locate or create the cert cache directory and the certs for TLS encryption\nfunc setup() {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatalln(\"Couldn't find working directory to locate or save certificates.\")\n\t}\n\n\tcache := autocert.DirCache(filepath.Join(pwd, \"system\", \"tls\", \"certs\"))\n\tif _, err := os.Stat(string(cache)); os.IsNotExist(err) {\n\t\terr := os.MkdirAll(string(cache), os.ModePerm|os.ModeDir)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Couldn't create cert directory at\", cache)\n\t\t}\n\t}\n\n\t\/\/ get host\/domain and email from Config to use for TLS request to Let's encryption.\n\t\/\/ we will fail fatally if either are not found since Let's Encrypt will rate-limit\n\t\/\/ and sending incomplete requests is wasteful and guarenteed to fail its check\n\thost, err := db.Config(\"domain\")\n\tif err != nil {\n\t\tlog.Fatalln(\"Error identifying host\/domain during TLS set-up.\", err)\n\t}\n\n\tif host == nil {\n\t\tlog.Fatalln(\"No 'domain' field set in Configuration. Please add a domain before attempting to make certificates.\")\n\t}\n\tfmt.Println(\"Using\", string(host), \"as host\/domain for certificate...\")\n\tfmt.Println(\"NOTE: if the host\/domain is not configured properly or is unreachable, HTTPS set-up will fail.\")\n\n\temail, err := db.Config(\"admin_email\")\n\tif err != nil {\n\t\tlog.Fatalln(\"Error identifying admin email during TLS set-up.\", err)\n\t}\n\n\tif email == nil {\n\t\tlog.Fatalln(\"No 'admin_email' field set in Configuration. Please add an admin email before attempting to make certificates.\")\n\t}\n\tfmt.Println(\"Using\", string(email), \"as contact email for certificate...\")\n\n\tm = autocert.Manager{\n\t\tPrompt: autocert.AcceptTOS,\n\t\tCache: cache,\n\t\tHostPolicy: autocert.HostWhitelist(string(host)),\n\t\tRenewBefore: time.Hour * 24 * 30,\n\t\tEmail: string(email),\n\t}\n\n}\n\n\/\/ Enable runs the setup for creating or locating certificates and starts the TLS server\nfunc Enable() {\n\tsetup()\n\n\tserver := &http.Server{\n\t\tAddr: \":443\",\n\t\tTLSConfig: &tls.Config{GetCertificate: m.GetCertificate},\n\t}\n\n\tgo log.Fatalln(server.ListenAndServeTLS(\"\", \"\"))\n\tfmt.Println(\"Server listening for HTTPS requests...\")\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2020 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cryptolib\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/crypto\/openpgp\"\n\t\"golang.org\/x\/crypto\/openpgp\/armor\"\n)\n\ntype pgpVerifierImpl struct{}\n\n\/\/ verifyPgp verifies a PGP signature using a public key and outputs the\n\/\/ payload that was signed. `signature` is an ASCII-armored \"attached\"\n\/\/ signature, generated by `gpg --armor --sign --output signature payload`.\n\/\/ `publicKey` is an ASCII-armored PGP key.\nfunc (v pgpVerifierImpl) verifyPgp(signature, publicKey []byte) ([]byte, error) {\n\tkeyring, err := openpgp.ReadArmoredKeyRing(bytes.NewReader(publicKey))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error reading armored key ring\")\n\t}\n\n\tarmorBlock, err := armor.Decode(bytes.NewReader(signature))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error decoding armored signature\")\n\t}\n\n\tmessageDetails, err := openpgp.ReadMessage(armorBlock.Body, keyring, nil, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error reading armor signature\")\n\t}\n\n\t\/\/ MessageDetails.UnverifiedBody signature is not verified until we read it.\n\t\/\/ This will call PublicKey.VerifySignature for the keys in the keyring.\n\tpayload, err := ioutil.ReadAll(messageDetails.UnverifiedBody)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error reading message contents\")\n\t}\n\n\t\/\/ Make sure after reading the UnverifiedBody above that the Signature\n\t\/\/ exists and there is no SignatureError.\n\tif messageDetails.SignatureError != nil {\n\t\treturn nil, errors.Wrap(messageDetails.SignatureError, \"failed to validate: signature error\")\n\t}\n\tif messageDetails.Signature == nil {\n\t\treturn nil, fmt.Errorf(\"failed to validate: signature missing\")\n\t}\n\treturn payload, nil\n}\n\ntype pgpSigner struct {\n\tprivateEntity *openpgp.Entity\n\tpublicKeyID string\n}\n\n\/\/ NewPgpSigner creates a Signer interface for PGP Attestations. `privateKey`\n\/\/ contains the ASCII-armored private key.\nfunc NewPgpSigner(privateKey []byte) (Signer, error) {\n\tkeyring, err := openpgp.ReadArmoredKeyRing(bytes.NewReader(privateKey))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error reading armored private key\")\n\t}\n\tif len(keyring) != 1 {\n\t\treturn nil, fmt.Errorf(\"expected 1 key in keyring, got %d\", len(keyring))\n\t}\n\tprivateEntity := keyring[0]\n\treturn &pgpSigner{\n\t\tprivateEntity: privateEntity,\n\t\tpublicKeyID: fmt.Sprintf(\"%X\", privateEntity.PrimaryKey.Fingerprint),\n\t}, nil\n}\n\n\/\/ CreateAttestation creates a signed PGP Attestation. The Attestation's\n\/\/ publicKeyID will be derived from the private key. See Signer for more\n\/\/ details.\nfunc (s *pgpSigner) CreateAttestation(payload []byte) (*Attestation, error) {\n\t\/\/ Create a buffer to store the signature\n\tsignature := bytes.Buffer{}\n\n\t\/\/ Armor-encode the signature before writing to the buffer\n\tarmorBuffer, err := armor.Encode(&signature, openpgp.SignatureType, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error creating armor buffer\")\n\t}\n\n\tarmorWriter, err := openpgp.Sign(armorBuffer, s.privateEntity, nil, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error signing payload\")\n\t}\n\n\t_, err = armorWriter.Write(payload)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error writing payload to armor writer\")\n\t}\n\n\t\/\/ The payload is not signed until the armor writer is closed. This will\n\t\/\/ call Signature.Sign to sign the payload.\n\tarmorWriter.Close()\n\t\/\/ The CRC checksum is not written until the armor buffer is closed.\n\tarmorBuffer.Close()\n\treturn &Attestation{\n\t\tPublicKeyID: s.publicKeyID,\n\t\tSignature: signature.Bytes(),\n\t}, nil\n}\nAddress round 2 comments\/*\nCopyright 2020 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cryptolib\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/crypto\/openpgp\"\n\t\"golang.org\/x\/crypto\/openpgp\/armor\"\n)\n\ntype pgpVerifierImpl struct{}\n\n\/\/ verifyPgp verifies a PGP signature using a public key and outputs the\n\/\/ payload that was signed. `signature` is an ASCII-armored \"attached\"\n\/\/ signature, generated by `gpg --armor --sign --output signature payload`.\n\/\/ `publicKey` is an ASCII-armored PGP key.\nfunc (v pgpVerifierImpl) verifyPgp(signature, publicKey []byte) ([]byte, error) {\n\tkeyring, err := openpgp.ReadArmoredKeyRing(bytes.NewReader(publicKey))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error reading armored key ring\")\n\t}\n\n\tarmorBlock, err := armor.Decode(bytes.NewReader(signature))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error decoding armored signature\")\n\t}\n\n\tmessageDetails, err := openpgp.ReadMessage(armorBlock.Body, keyring, nil, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error reading armor signature\")\n\t}\n\n\t\/\/ MessageDetails.UnverifiedBody signature is not verified until we read it.\n\t\/\/ This will call PublicKey.VerifySignature for the keys in the keyring.\n\tpayload, err := ioutil.ReadAll(messageDetails.UnverifiedBody)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error reading message contents\")\n\t}\n\n\t\/\/ Make sure after reading the UnverifiedBody above that the Signature\n\t\/\/ exists and there is no SignatureError.\n\tif messageDetails.SignatureError != nil {\n\t\treturn nil, errors.Wrap(messageDetails.SignatureError, \"failed to validate: signature error\")\n\t}\n\tif messageDetails.Signature == nil {\n\t\treturn nil, fmt.Errorf(\"failed to validate: signature missing\")\n\t}\n\treturn payload, nil\n}\n\ntype pgpSigner struct {\n\tprivateKey *openpgp.Entity\n\tpublicKeyID string\n}\n\n\/\/ NewPgpSigner creates a Signer interface for PGP Attestations. `privateKey`\n\/\/ contains the ASCII-armored private key.\nfunc NewPgpSigner(privateKey []byte) (Signer, error) {\n\tkeyring, err := openpgp.ReadArmoredKeyRing(bytes.NewReader(privateKey))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error reading armored private key\")\n\t}\n\tif len(keyring) != 1 {\n\t\treturn nil, fmt.Errorf(\"expected 1 key in keyring, got %d\", len(keyring))\n\t}\n\tkey := keyring[0]\n\treturn &pgpSigner{\n\t\tprivateKey: key,\n\t\tpublicKeyID: fmt.Sprintf(\"%X\", key.PrimaryKey.Fingerprint),\n\t}, nil\n}\n\n\/\/ CreateAttestation creates a signed PGP Attestation. The Attestation's\n\/\/ publicKeyID will be derived from the private key. See Signer for more\n\/\/ details.\nfunc (s *pgpSigner) CreateAttestation(payload []byte) (*Attestation, error) {\n\t\/\/ Create a buffer to store the signature\n\tarmoredSignature := bytes.Buffer{}\n\n\t\/\/ Armor-encode the signature before writing to the buffer\n\tarmorWriter, err := armor.Encode(&armoredSignature, openpgp.SignatureType, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error creating armor buffer\")\n\t}\n\n\tsignatureWriter, err := openpgp.Sign(armorWriter, s.privateKey, nil, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error signing payload\")\n\t}\n\n\t_, err = signatureWriter.Write(payload)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error writing payload to armor writer\")\n\t}\n\n\t\/\/ The payload is not signed until the armor writer is closed. This will\n\t\/\/ call Signature.Sign to sign the payload.\n\tsignatureWriter.Close()\n\t\/\/ The CRC checksum is not written until the armor buffer is closed.\n\tarmorWriter.Close()\n\treturn &Attestation{\n\t\tPublicKeyID: s.publicKeyID,\n\t\tSignature: armoredSignature.Bytes(),\n\t}, nil\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"strconv\"\n\n\t\"github.com\/ptt\/pttweb\/article\"\n\t\"github.com\/ptt\/pttweb\/atomfeed\"\n\t\"github.com\/ptt\/pttweb\/cache\"\n\t\"github.com\/ptt\/pttweb\/pttbbs\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tEntryPerPage = 20\n\n\tCtxKeyBoardname = `ContextBoardname`\n)\n\ntype BbsIndexRequest struct {\n\tBrd pttbbs.Board\n\tPage int\n}\n\nfunc (r *BbsIndexRequest) String() string {\n\treturn fmt.Sprintf(\"pttweb:bbsindex\/%v\/%v\", r.Brd.BrdName, r.Page)\n}\n\nfunc generateBbsIndex(key cache.Key) (cache.Cacheable, error) {\n\tr := key.(*BbsIndexRequest)\n\tpage := r.Page\n\n\tbbsindex := &BbsIndex{\n\t\tBoard: r.Brd,\n\t\tIsValid: true,\n\t}\n\n\t\/\/ Handle paging\n\tpaging := NewPaging(EntryPerPage, r.Brd.NumPosts)\n\tif page == 0 {\n\t\tpage = paging.LastPageNo()\n\t\tpaging.SetPageNo(page)\n\t} else if err := paging.SetPageNo(page); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Fetch article list\n\tvar err error\n\tbbsindex.Articles, err = ptt.GetArticleList(r.Brd.Ref(), paging.Cursor(), EntryPerPage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Fetch bottoms when at last page\n\tif page == paging.LastPageNo() {\n\t\tbbsindex.Bottoms, err = ptt.GetBottomList(r.Brd.Ref())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Page links\n\tif u, err := router.Get(\"bbsindex\").URLPath(\"brdname\", r.Brd.BrdName); err == nil {\n\t\tbbsindex.LastPage = u.String()\n\t}\n\tpageLink := func(n int) string {\n\t\tu, err := router.Get(\"bbsindex_page\").URLPath(\"brdname\", r.Brd.BrdName, \"page\", strconv.Itoa(n))\n\t\tif err != nil {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn u.String()\n\t}\n\tbbsindex.FirstPage = pageLink(1)\n\tif page > 1 {\n\t\tbbsindex.PrevPage = pageLink(page - 1)\n\t}\n\tif page < paging.LastPageNo() {\n\t\tbbsindex.NextPage = pageLink(page + 1)\n\t}\n\n\treturn bbsindex, nil\n}\n\ntype BbsSearchRequest struct {\n\tBrd pttbbs.Board\n\tPage int\n\tQuery string\n\tPreds []pttbbs.SearchPredicate\n}\n\nfunc (r *BbsSearchRequest) String() string {\n\treturn fmt.Sprintf(\"pttweb:bbssearch\/%v\/%v\/%v\", r.Brd.BrdName, r.Page, r.Query)\n}\n\nfunc generateBbsSearch(key cache.Key) (cache.Cacheable, error) {\n\tr := key.(*BbsSearchRequest)\n\tpage := r.Page\n\tif page == 0 {\n\t\tpage = 1\n\t}\n\toffset := -EntryPerPage * page\n\n\tbbsindex := &BbsIndex{\n\t\tBoard: r.Brd,\n\t\tQuery: r.Query,\n\t\tIsValid: true,\n\t}\n\n\t\/\/ Search articles\n\tarticles, totalPosts, err := pttSearch.Search(r.Brd.Ref(), r.Preds, offset, EntryPerPage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Handle paging\n\tpaging := NewPaging(EntryPerPage, totalPosts)\n\tif lastPage := paging.LastPageNo(); page > lastPage {\n\t\tarticles = nil\n\t\tbbsindex.IsValid = false\n\t} else if page == lastPage {\n\t\t\/\/ We may get extra entries for last page.\n\t\tn := totalPosts % EntryPerPage\n\t\tif n < len(articles) {\n\t\t\tarticles = articles[:n]\n\t\t}\n\t}\n\n\t\/\/ Show the page in reverse order.\n\tfor i, j := 0, len(articles)-1; i < j; i, j = i+1, j-1 {\n\t\tarticles[i], articles[j] = articles[j], articles[i]\n\t}\n\tbbsindex.Articles = articles\n\n\t\/\/ Page links, in newest first order.\n\tpageLink := func(n int) string {\n\t\tu, err := router.Get(\"bbssearch\").URLPath(\"brdname\", r.Brd.BrdName)\n\t\tif err != nil {\n\t\t\treturn \"\"\n\t\t}\n\t\tq := url.Values{}\n\t\tq.Set(\"q\", r.Query)\n\t\tq.Set(\"page\", strconv.Itoa(n))\n\t\tu.RawQuery = q.Encode()\n\t\treturn u.String()\n\t}\n\tbbsindex.FirstPage = pageLink(paging.LastPageNo())\n\tbbsindex.LastPage = pageLink(1)\n\tif page > 1 {\n\t\tbbsindex.NextPage = pageLink(page - 1)\n\t}\n\tif page < paging.LastPageNo() {\n\t\tbbsindex.PrevPage = pageLink(page + 1)\n\t}\n\n\treturn bbsindex, nil\n}\n\ntype BoardAtomFeedRequest struct {\n\tBrd pttbbs.Board\n}\n\nfunc (r *BoardAtomFeedRequest) String() string {\n\treturn fmt.Sprintf(\"pttweb:atomfeed\/%v\", r.Brd.BrdName)\n}\n\nfunc generateBoardAtomFeed(key cache.Key) (cache.Cacheable, error) {\n\tr := key.(*BoardAtomFeedRequest)\n\n\tif atomConverter == nil {\n\t\treturn nil, errors.New(\"atom feed not configured\")\n\t}\n\n\t\/\/ Fetch article list\n\tarticles, err := ptt.GetArticleList(r.Brd.Ref(), -EntryPerPage, EntryPerPage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Fetch snippets and contruct posts.\n\tvar posts []*atomfeed.PostEntry\n\tfor _, article := range articles {\n\t\t\/\/ Use an empty string when error.\n\t\tsnippet, _ := getArticleSnippet(r.Brd, article.FileName)\n\t\tposts = append(posts, &atomfeed.PostEntry{\n\t\t\tArticle: article,\n\t\t\tSnippet: snippet,\n\t\t})\n\t}\n\n\tfeed, err := atomConverter.Convert(r.Brd, posts)\n\tif err != nil {\n\t\tlog.Println(\"atomfeed: Convert:\", err)\n\t\t\/\/ Don't return error but cache that it's invalid.\n\t}\n\treturn &BoardAtomFeed{\n\t\tFeed: feed,\n\t\tIsValid: err == nil,\n\t}, nil\n}\n\nconst SnippetHeadSize = 16 * 1024 \/\/ Enough for 8 pages of 80x24.\n\nfunc getArticleSnippet(brd pttbbs.Board, filename string) (string, error) {\n\tp, err := ptt.GetArticleSelect(brd.Ref(), pttbbs.SelectHead, filename, \"\", 0, SnippetHeadSize)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(p.Content) == 0 {\n\t\treturn \"\", pttbbs.ErrNotFound\n\t}\n\n\tra, err := article.Render(article.WithContent(p.Content))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ra.PreviewContent(), nil\n}\n\nconst (\n\tTruncateSize = 1048576\n\tTruncateMaxScan = 1024\n\n\tHeadSize = 100 * 1024\n\tTailSize = 50 * 1024\n)\n\ntype ArticleRequest struct {\n\tNamespace string\n\tBrd pttbbs.Board\n\tFilename string\n\tSelect func(m pttbbs.SelectMethod, offset, maxlen int) (*pttbbs.ArticlePart, error)\n}\n\nfunc (r *ArticleRequest) String() string {\n\treturn fmt.Sprintf(\"pttweb:%v\/%v\/%v\", r.Namespace, r.Brd.BrdName, r.Filename)\n}\n\nfunc (r *ArticleRequest) Boardname() string {\n\treturn r.Brd.BrdName\n}\n\nfunc generateArticle(key cache.Key) (cache.Cacheable, error) {\n\tr := key.(*ArticleRequest)\n\tctx := context.WithValue(context.TODO(), CtxKeyBoardname, r)\n\n\tp, err := r.Select(pttbbs.SelectHead, 0, HeadSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ We don't want head and tail have duplicate content\n\tif p.FileSize > HeadSize && p.FileSize <= HeadSize+TailSize {\n\t\tp, err = r.Select(pttbbs.SelectPart, 0, p.FileSize)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(p.Content) == 0 {\n\t\treturn nil, pttbbs.ErrNotFound\n\t}\n\n\ta := new(Article)\n\n\ta.IsPartial = p.Length < p.FileSize\n\ta.IsTruncated = a.IsPartial\n\n\tif a.IsPartial {\n\t\t\/\/ Get and render tail\n\t\tptail, err := r.Select(pttbbs.SelectTail, -TailSize, TailSize)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(ptail.Content) > 0 {\n\t\t\tra, err := article.Render(\n\t\t\t\tarticle.WithContent(ptail.Content),\n\t\t\t\tarticle.WithContext(ctx),\n\t\t\t\tarticle.WithDisableArticleHeader(),\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ta.ContentTailHtml = ra.HTML()\n\t\t}\n\t\ta.CacheKey = ptail.CacheKey\n\t\ta.NextOffset = ptail.FileSize - TailSize + ptail.Offset + ptail.Length\n\t} else {\n\t\ta.CacheKey = p.CacheKey\n\t\ta.NextOffset = p.Length\n\t}\n\n\tra, err := article.Render(\n\t\tarticle.WithContent(p.Content),\n\t\tarticle.WithContext(ctx),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ta.ParsedTitle = ra.ParsedTitle()\n\ta.PreviewContent = ra.PreviewContent()\n\ta.ContentHtml = ra.HTML()\n\ta.IsValid = true\n\treturn a, nil\n}\n\ntype ArticlePartRequest struct {\n\tBrd pttbbs.Board\n\tFilename string\n\tCacheKey string\n\tOffset int\n}\n\nfunc (r *ArticlePartRequest) String() string {\n\treturn fmt.Sprintf(\"pttweb:bbs\/%v\/%v#%v,%v\", r.Brd.BrdName, r.Filename, r.CacheKey, r.Offset)\n}\n\nfunc (r *ArticlePartRequest) Boardname() string {\n\treturn r.Brd.BrdName\n}\n\nfunc generateArticlePart(key cache.Key) (cache.Cacheable, error) {\n\tr := key.(*ArticlePartRequest)\n\tctx := context.WithValue(context.TODO(), CtxKeyBoardname, r)\n\n\tp, err := ptt.GetArticleSelect(r.Brd.Ref(), pttbbs.SelectHead, r.Filename, r.CacheKey, r.Offset, -1)\n\tif err == pttbbs.ErrNotFound {\n\t\t\/\/ Returns an invalid result\n\t\treturn new(ArticlePart), nil\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tap := new(ArticlePart)\n\tap.IsValid = true\n\tap.CacheKey = p.CacheKey\n\tap.NextOffset = r.Offset + p.Offset + p.Length\n\n\tif len(p.Content) > 0 {\n\t\tra, err := article.Render(\n\t\t\tarticle.WithContent(p.Content),\n\t\t\tarticle.WithContext(ctx),\n\t\t\tarticle.WithDisableArticleHeader(),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tap.ContentHtml = string(ra.HTML())\n\t}\n\n\treturn ap, nil\n}\n\nfunc truncateLargeContent(content []byte, size, maxScan int) []byte {\n\tif len(content) <= size {\n\t\treturn content\n\t}\n\tfor i := size - 1; i >= size-maxScan && i >= 0; i-- {\n\t\tif content[i] == '\\n' {\n\t\t\treturn content[:i+1]\n\t\t}\n\t}\n\treturn content[:size]\n}\nChange query part of memcache key to base64(sha256(query)).package main\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"strconv\"\n\n\t\"github.com\/ptt\/pttweb\/article\"\n\t\"github.com\/ptt\/pttweb\/atomfeed\"\n\t\"github.com\/ptt\/pttweb\/cache\"\n\t\"github.com\/ptt\/pttweb\/pttbbs\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tEntryPerPage = 20\n\n\tCtxKeyBoardname = `ContextBoardname`\n)\n\ntype BbsIndexRequest struct {\n\tBrd pttbbs.Board\n\tPage int\n}\n\nfunc (r *BbsIndexRequest) String() string {\n\treturn fmt.Sprintf(\"pttweb:bbsindex\/%v\/%v\", r.Brd.BrdName, r.Page)\n}\n\nfunc generateBbsIndex(key cache.Key) (cache.Cacheable, error) {\n\tr := key.(*BbsIndexRequest)\n\tpage := r.Page\n\n\tbbsindex := &BbsIndex{\n\t\tBoard: r.Brd,\n\t\tIsValid: true,\n\t}\n\n\t\/\/ Handle paging\n\tpaging := NewPaging(EntryPerPage, r.Brd.NumPosts)\n\tif page == 0 {\n\t\tpage = paging.LastPageNo()\n\t\tpaging.SetPageNo(page)\n\t} else if err := paging.SetPageNo(page); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Fetch article list\n\tvar err error\n\tbbsindex.Articles, err = ptt.GetArticleList(r.Brd.Ref(), paging.Cursor(), EntryPerPage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Fetch bottoms when at last page\n\tif page == paging.LastPageNo() {\n\t\tbbsindex.Bottoms, err = ptt.GetBottomList(r.Brd.Ref())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Page links\n\tif u, err := router.Get(\"bbsindex\").URLPath(\"brdname\", r.Brd.BrdName); err == nil {\n\t\tbbsindex.LastPage = u.String()\n\t}\n\tpageLink := func(n int) string {\n\t\tu, err := router.Get(\"bbsindex_page\").URLPath(\"brdname\", r.Brd.BrdName, \"page\", strconv.Itoa(n))\n\t\tif err != nil {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn u.String()\n\t}\n\tbbsindex.FirstPage = pageLink(1)\n\tif page > 1 {\n\t\tbbsindex.PrevPage = pageLink(page - 1)\n\t}\n\tif page < paging.LastPageNo() {\n\t\tbbsindex.NextPage = pageLink(page + 1)\n\t}\n\n\treturn bbsindex, nil\n}\n\ntype BbsSearchRequest struct {\n\tBrd pttbbs.Board\n\tPage int\n\tQuery string\n\tPreds []pttbbs.SearchPredicate\n}\n\nfunc (r *BbsSearchRequest) String() string {\n\tqueryHash := sha256.Sum256([]byte(r.Query))\n\tquery := base64.URLEncoding.EncodeToString(queryHash[:])\n\treturn fmt.Sprintf(\"pttweb:bbssearch\/%v\/%v\/%v\", r.Brd.BrdName, r.Page, query)\n}\n\nfunc generateBbsSearch(key cache.Key) (cache.Cacheable, error) {\n\tr := key.(*BbsSearchRequest)\n\tpage := r.Page\n\tif page == 0 {\n\t\tpage = 1\n\t}\n\toffset := -EntryPerPage * page\n\n\tbbsindex := &BbsIndex{\n\t\tBoard: r.Brd,\n\t\tQuery: r.Query,\n\t\tIsValid: true,\n\t}\n\n\t\/\/ Search articles\n\tarticles, totalPosts, err := pttSearch.Search(r.Brd.Ref(), r.Preds, offset, EntryPerPage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Handle paging\n\tpaging := NewPaging(EntryPerPage, totalPosts)\n\tif lastPage := paging.LastPageNo(); page > lastPage {\n\t\tarticles = nil\n\t\tbbsindex.IsValid = false\n\t} else if page == lastPage {\n\t\t\/\/ We may get extra entries for last page.\n\t\tn := totalPosts % EntryPerPage\n\t\tif n < len(articles) {\n\t\t\tarticles = articles[:n]\n\t\t}\n\t}\n\n\t\/\/ Show the page in reverse order.\n\tfor i, j := 0, len(articles)-1; i < j; i, j = i+1, j-1 {\n\t\tarticles[i], articles[j] = articles[j], articles[i]\n\t}\n\tbbsindex.Articles = articles\n\n\t\/\/ Page links, in newest first order.\n\tpageLink := func(n int) string {\n\t\tu, err := router.Get(\"bbssearch\").URLPath(\"brdname\", r.Brd.BrdName)\n\t\tif err != nil {\n\t\t\treturn \"\"\n\t\t}\n\t\tq := url.Values{}\n\t\tq.Set(\"q\", r.Query)\n\t\tq.Set(\"page\", strconv.Itoa(n))\n\t\tu.RawQuery = q.Encode()\n\t\treturn u.String()\n\t}\n\tbbsindex.FirstPage = pageLink(paging.LastPageNo())\n\tbbsindex.LastPage = pageLink(1)\n\tif page > 1 {\n\t\tbbsindex.NextPage = pageLink(page - 1)\n\t}\n\tif page < paging.LastPageNo() {\n\t\tbbsindex.PrevPage = pageLink(page + 1)\n\t}\n\n\treturn bbsindex, nil\n}\n\ntype BoardAtomFeedRequest struct {\n\tBrd pttbbs.Board\n}\n\nfunc (r *BoardAtomFeedRequest) String() string {\n\treturn fmt.Sprintf(\"pttweb:atomfeed\/%v\", r.Brd.BrdName)\n}\n\nfunc generateBoardAtomFeed(key cache.Key) (cache.Cacheable, error) {\n\tr := key.(*BoardAtomFeedRequest)\n\n\tif atomConverter == nil {\n\t\treturn nil, errors.New(\"atom feed not configured\")\n\t}\n\n\t\/\/ Fetch article list\n\tarticles, err := ptt.GetArticleList(r.Brd.Ref(), -EntryPerPage, EntryPerPage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Fetch snippets and contruct posts.\n\tvar posts []*atomfeed.PostEntry\n\tfor _, article := range articles {\n\t\t\/\/ Use an empty string when error.\n\t\tsnippet, _ := getArticleSnippet(r.Brd, article.FileName)\n\t\tposts = append(posts, &atomfeed.PostEntry{\n\t\t\tArticle: article,\n\t\t\tSnippet: snippet,\n\t\t})\n\t}\n\n\tfeed, err := atomConverter.Convert(r.Brd, posts)\n\tif err != nil {\n\t\tlog.Println(\"atomfeed: Convert:\", err)\n\t\t\/\/ Don't return error but cache that it's invalid.\n\t}\n\treturn &BoardAtomFeed{\n\t\tFeed: feed,\n\t\tIsValid: err == nil,\n\t}, nil\n}\n\nconst SnippetHeadSize = 16 * 1024 \/\/ Enough for 8 pages of 80x24.\n\nfunc getArticleSnippet(brd pttbbs.Board, filename string) (string, error) {\n\tp, err := ptt.GetArticleSelect(brd.Ref(), pttbbs.SelectHead, filename, \"\", 0, SnippetHeadSize)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(p.Content) == 0 {\n\t\treturn \"\", pttbbs.ErrNotFound\n\t}\n\n\tra, err := article.Render(article.WithContent(p.Content))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ra.PreviewContent(), nil\n}\n\nconst (\n\tTruncateSize = 1048576\n\tTruncateMaxScan = 1024\n\n\tHeadSize = 100 * 1024\n\tTailSize = 50 * 1024\n)\n\ntype ArticleRequest struct {\n\tNamespace string\n\tBrd pttbbs.Board\n\tFilename string\n\tSelect func(m pttbbs.SelectMethod, offset, maxlen int) (*pttbbs.ArticlePart, error)\n}\n\nfunc (r *ArticleRequest) String() string {\n\treturn fmt.Sprintf(\"pttweb:%v\/%v\/%v\", r.Namespace, r.Brd.BrdName, r.Filename)\n}\n\nfunc (r *ArticleRequest) Boardname() string {\n\treturn r.Brd.BrdName\n}\n\nfunc generateArticle(key cache.Key) (cache.Cacheable, error) {\n\tr := key.(*ArticleRequest)\n\tctx := context.WithValue(context.TODO(), CtxKeyBoardname, r)\n\n\tp, err := r.Select(pttbbs.SelectHead, 0, HeadSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ We don't want head and tail have duplicate content\n\tif p.FileSize > HeadSize && p.FileSize <= HeadSize+TailSize {\n\t\tp, err = r.Select(pttbbs.SelectPart, 0, p.FileSize)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(p.Content) == 0 {\n\t\treturn nil, pttbbs.ErrNotFound\n\t}\n\n\ta := new(Article)\n\n\ta.IsPartial = p.Length < p.FileSize\n\ta.IsTruncated = a.IsPartial\n\n\tif a.IsPartial {\n\t\t\/\/ Get and render tail\n\t\tptail, err := r.Select(pttbbs.SelectTail, -TailSize, TailSize)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(ptail.Content) > 0 {\n\t\t\tra, err := article.Render(\n\t\t\t\tarticle.WithContent(ptail.Content),\n\t\t\t\tarticle.WithContext(ctx),\n\t\t\t\tarticle.WithDisableArticleHeader(),\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ta.ContentTailHtml = ra.HTML()\n\t\t}\n\t\ta.CacheKey = ptail.CacheKey\n\t\ta.NextOffset = ptail.FileSize - TailSize + ptail.Offset + ptail.Length\n\t} else {\n\t\ta.CacheKey = p.CacheKey\n\t\ta.NextOffset = p.Length\n\t}\n\n\tra, err := article.Render(\n\t\tarticle.WithContent(p.Content),\n\t\tarticle.WithContext(ctx),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ta.ParsedTitle = ra.ParsedTitle()\n\ta.PreviewContent = ra.PreviewContent()\n\ta.ContentHtml = ra.HTML()\n\ta.IsValid = true\n\treturn a, nil\n}\n\ntype ArticlePartRequest struct {\n\tBrd pttbbs.Board\n\tFilename string\n\tCacheKey string\n\tOffset int\n}\n\nfunc (r *ArticlePartRequest) String() string {\n\treturn fmt.Sprintf(\"pttweb:bbs\/%v\/%v#%v,%v\", r.Brd.BrdName, r.Filename, r.CacheKey, r.Offset)\n}\n\nfunc (r *ArticlePartRequest) Boardname() string {\n\treturn r.Brd.BrdName\n}\n\nfunc generateArticlePart(key cache.Key) (cache.Cacheable, error) {\n\tr := key.(*ArticlePartRequest)\n\tctx := context.WithValue(context.TODO(), CtxKeyBoardname, r)\n\n\tp, err := ptt.GetArticleSelect(r.Brd.Ref(), pttbbs.SelectHead, r.Filename, r.CacheKey, r.Offset, -1)\n\tif err == pttbbs.ErrNotFound {\n\t\t\/\/ Returns an invalid result\n\t\treturn new(ArticlePart), nil\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tap := new(ArticlePart)\n\tap.IsValid = true\n\tap.CacheKey = p.CacheKey\n\tap.NextOffset = r.Offset + p.Offset + p.Length\n\n\tif len(p.Content) > 0 {\n\t\tra, err := article.Render(\n\t\t\tarticle.WithContent(p.Content),\n\t\t\tarticle.WithContext(ctx),\n\t\t\tarticle.WithDisableArticleHeader(),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tap.ContentHtml = string(ra.HTML())\n\t}\n\n\treturn ap, nil\n}\n\nfunc truncateLargeContent(content []byte, size, maxScan int) []byte {\n\tif len(content) <= size {\n\t\treturn content\n\t}\n\tfor i := size - 1; i >= size-maxScan && i >= 0; i-- {\n\t\tif content[i] == '\\n' {\n\t\t\treturn content[:i+1]\n\t\t}\n\t}\n\treturn content[:size]\n}\n<|endoftext|>"} {"text":"package cli\n\nimport (\n\t\"flag\"\n\n\t\"github.com\/cloudflare\/cfssl\/config\"\n\t\"github.com\/cloudflare\/cfssl\/helpers\"\n\t\"github.com\/cloudflare\/cfssl\/signer\/pkcs11\"\n\t\"github.com\/cloudflare\/cfssl\/signer\/universal\"\n)\n\n\/\/ Config is a type to hold flag values used by cfssl commands.\ntype Config struct {\n\tHostname string\n\tCertFile string\n\tCSRFile string\n\tCAFile string\n\tCAKeyFile string\n\tKeyFile string\n\tIntermediatesFile string\n\tCABundleFile string\n\tIntBundleFile string\n\tAddress string\n\tPort int\n\tPassword string\n\tConfigFile string\n\tCFG *config.Config\n\tProfile string\n\tIsCA bool\n\tIntDir string\n\tFlavor string\n\tMetadata string\n\tDomain string\n\tIP string\n\tRemote string\n\tLabel string\n\tAuthKey string\n\tModule string\n\tToken string\n\tPIN string\n\tPKCS11Label string\n\tResponderFile string\n\tStatus string\n\tReason int\n\tRevokedAt string\n\tInterval int64\n\tList bool\n\tFamily string\n\tScanner string\n\tResponses string\n\tPath string\n\tUseLocal bool\n}\n\n\/\/ registerFlags defines all cfssl command flags and associates their values with variables.\nfunc registerFlags(c *Config, f *flag.FlagSet) {\n\tf.StringVar(&c.Hostname, \"hostname\", \"\", \"Hostname for the cert, could be a comma-separated hostname list\")\n\tf.StringVar(&c.CertFile, \"cert\", \"\", \"Client certificate that contains the public key\")\n\tf.StringVar(&c.CSRFile, \"csr\", \"\", \"Certificate signature request file for new public key\")\n\tf.StringVar(&c.CAFile, \"ca\", \"ca.pem\", \"CA used to sign the new certificate\")\n\tf.StringVar(&c.CAKeyFile, \"ca-key\", \"ca-key.pem\", \"CA private key\")\n\tf.StringVar(&c.KeyFile, \"key\", \"\", \"private key for the certificate\")\n\tf.StringVar(&c.IntermediatesFile, \"intermediates\", \"\", \"intermediate certs\")\n\tf.StringVar(&c.CABundleFile, \"ca-bundle\", \"\/etc\/cfssl\/ca-bundle.crt\", \"Bundle to be used for root certificates pool\")\n\tf.StringVar(&c.IntBundleFile, \"int-bundle\", \"\/etc\/cfssl\/int-bundle.crt\", \"Bundle to be used for intermediate certificates pool\")\n\tf.StringVar(&c.Address, \"address\", \"127.0.0.1\", \"Address to bind\")\n\tf.IntVar(&c.Port, \"port\", 8888, \"Port to bind\")\n\tf.StringVar(&c.ConfigFile, \"config\", \"\", \"path to configuration file\")\n\tf.StringVar(&c.Profile, \"profile\", \"\", \"signing profile to use\")\n\tf.BoolVar(&c.IsCA, \"initca\", false, \"initialise new CA\")\n\tf.StringVar(&c.IntDir, \"int-dir\", \"\/etc\/cfssl\/intermediates\", \"specify intermediates directory\")\n\tf.StringVar(&c.Flavor, \"flavor\", \"ubiquitous\", \"Bundle Flavor: ubiquitous, optimal and force.\")\n\tf.StringVar(&c.Metadata, \"metadata\", \"\/etc\/cfssl\/ca-bundle.crt.metadata\", \"Metadata file for root certificate presence. The content of the file is a json dictionary (k,v): each key k is SHA-1 digest of a root certificate while value v is a list of key store filenames.\")\n\tf.StringVar(&c.Domain, \"domain\", \"\", \"remote server domain name\")\n\tf.StringVar(&c.IP, \"ip\", \"\", \"remote server ip\")\n\tf.StringVar(&c.Remote, \"remote\", \"\", \"remote CFSSL server\")\n\tf.StringVar(&c.Label, \"label\", \"\", \"key label to use in remote CFSSL server\")\n\tf.StringVar(&c.AuthKey, \"authkey\", \"\", \"key to authenticate requests to remote CFSSL server\")\n\tf.StringVar(&c.ResponderFile, \"responder\", \"\", \"Certificate for OCSP responder\")\n\tf.StringVar(&c.Status, \"status\", \"good\", \"Status of the certificate: good, revoked, unknown\")\n\tf.IntVar(&c.Reason, \"reason\", 0, \"Reason code for revocation\")\n\tf.StringVar(&c.RevokedAt, \"revoked-at\", \"now\", \"Date of revocation (YYYY-MM-DD)\")\n\tf.Int64Var(&c.Interval, \"interval\", int64(4*helpers.OneDay), \"Interval between OCSP updates, in seconds (default: 4 days)\")\n\tf.BoolVar(&c.List, \"list\", false, \"list possible scanners\")\n\tf.StringVar(&c.Family, \"family\", \"\", \"scanner family regular expression\")\n\tf.StringVar(&c.Scanner, \"scanner\", \"\", \"scanner regular expression\")\n\tf.StringVar(&c.Responses, \"responses\", \"\", \"file to load OCSP responses from\")\n\tf.StringVar(&c.Path, \"path\", \"\/\", \"Path on which the server will listen\")\n\tf.StringVar(&c.Password, \"password\", \"\", \"Password for accessing PKCS #12 data passed to bundler\")\n\tf.BoolVar(&c.UseLocal, \"uselocal\", false, \"serve local static files as opposed to compiled ones\")\n\n\tif pkcs11.Enabled {\n\t\tf.StringVar(&c.Module, \"pkcs11-module\", \"\", \"PKCS #11 module\")\n\t\tf.StringVar(&c.Token, \"pkcs11-token\", \"\", \"PKCS #11 token\")\n\t\tf.StringVar(&c.PIN, \"pkcs11-pin\", \"\", \"PKCS #11 user PIN\")\n\t\tf.StringVar(&c.PKCS11Label, \"pkcs11-label\", \"\", \"PKCS #11 label\")\n\t}\n}\n\n\/\/ RootFromConfig returns a universal signer Root structure that can\n\/\/ be used to produce a signer.\nfunc RootFromConfig(c *Config) universal.Root {\n\treturn universal.Root{\n\t\tConfig: map[string]string{\n\t\t\t\"pkcs11-module\": c.Module,\n\t\t\t\"pkcs11-token\": c.Token,\n\t\t\t\"pkcs11-label\": c.PKCS11Label,\n\t\t\t\"pkcs11-user-pin\": c.PIN,\n\t\t\t\"cert-file\": c.CAFile,\n\t\t\t\"key-file\": c.CAKeyFile,\n\t\t},\n\t\tForceRemote: c.Remote != \"\",\n\t}\n}\nSet pkcs11 user pin in environmentpackage cli\n\nimport (\n\t\"flag\"\n\t\"os\"\n\n\t\"github.com\/cloudflare\/cfssl\/config\"\n\t\"github.com\/cloudflare\/cfssl\/helpers\"\n\t\"github.com\/cloudflare\/cfssl\/signer\/pkcs11\"\n\t\"github.com\/cloudflare\/cfssl\/signer\/universal\"\n)\n\n\/\/ Config is a type to hold flag values used by cfssl commands.\ntype Config struct {\n\tHostname string\n\tCertFile string\n\tCSRFile string\n\tCAFile string\n\tCAKeyFile string\n\tKeyFile string\n\tIntermediatesFile string\n\tCABundleFile string\n\tIntBundleFile string\n\tAddress string\n\tPort int\n\tPassword string\n\tConfigFile string\n\tCFG *config.Config\n\tProfile string\n\tIsCA bool\n\tIntDir string\n\tFlavor string\n\tMetadata string\n\tDomain string\n\tIP string\n\tRemote string\n\tLabel string\n\tAuthKey string\n\tModule string\n\tToken string\n\tPIN string\n\tPKCS11Label string\n\tResponderFile string\n\tStatus string\n\tReason int\n\tRevokedAt string\n\tInterval int64\n\tList bool\n\tFamily string\n\tScanner string\n\tResponses string\n\tPath string\n\tUseLocal bool\n}\n\n\/\/ registerFlags defines all cfssl command flags and associates their values with variables.\nfunc registerFlags(c *Config, f *flag.FlagSet) {\n\tf.StringVar(&c.Hostname, \"hostname\", \"\", \"Hostname for the cert, could be a comma-separated hostname list\")\n\tf.StringVar(&c.CertFile, \"cert\", \"\", \"Client certificate that contains the public key\")\n\tf.StringVar(&c.CSRFile, \"csr\", \"\", \"Certificate signature request file for new public key\")\n\tf.StringVar(&c.CAFile, \"ca\", \"ca.pem\", \"CA used to sign the new certificate\")\n\tf.StringVar(&c.CAKeyFile, \"ca-key\", \"ca-key.pem\", \"CA private key\")\n\tf.StringVar(&c.KeyFile, \"key\", \"\", \"private key for the certificate\")\n\tf.StringVar(&c.IntermediatesFile, \"intermediates\", \"\", \"intermediate certs\")\n\tf.StringVar(&c.CABundleFile, \"ca-bundle\", \"\/etc\/cfssl\/ca-bundle.crt\", \"Bundle to be used for root certificates pool\")\n\tf.StringVar(&c.IntBundleFile, \"int-bundle\", \"\/etc\/cfssl\/int-bundle.crt\", \"Bundle to be used for intermediate certificates pool\")\n\tf.StringVar(&c.Address, \"address\", \"127.0.0.1\", \"Address to bind\")\n\tf.IntVar(&c.Port, \"port\", 8888, \"Port to bind\")\n\tf.StringVar(&c.ConfigFile, \"config\", \"\", \"path to configuration file\")\n\tf.StringVar(&c.Profile, \"profile\", \"\", \"signing profile to use\")\n\tf.BoolVar(&c.IsCA, \"initca\", false, \"initialise new CA\")\n\tf.StringVar(&c.IntDir, \"int-dir\", \"\/etc\/cfssl\/intermediates\", \"specify intermediates directory\")\n\tf.StringVar(&c.Flavor, \"flavor\", \"ubiquitous\", \"Bundle Flavor: ubiquitous, optimal and force.\")\n\tf.StringVar(&c.Metadata, \"metadata\", \"\/etc\/cfssl\/ca-bundle.crt.metadata\", \"Metadata file for root certificate presence. The content of the file is a json dictionary (k,v): each key k is SHA-1 digest of a root certificate while value v is a list of key store filenames.\")\n\tf.StringVar(&c.Domain, \"domain\", \"\", \"remote server domain name\")\n\tf.StringVar(&c.IP, \"ip\", \"\", \"remote server ip\")\n\tf.StringVar(&c.Remote, \"remote\", \"\", \"remote CFSSL server\")\n\tf.StringVar(&c.Label, \"label\", \"\", \"key label to use in remote CFSSL server\")\n\tf.StringVar(&c.AuthKey, \"authkey\", \"\", \"key to authenticate requests to remote CFSSL server\")\n\tf.StringVar(&c.ResponderFile, \"responder\", \"\", \"Certificate for OCSP responder\")\n\tf.StringVar(&c.Status, \"status\", \"good\", \"Status of the certificate: good, revoked, unknown\")\n\tf.IntVar(&c.Reason, \"reason\", 0, \"Reason code for revocation\")\n\tf.StringVar(&c.RevokedAt, \"revoked-at\", \"now\", \"Date of revocation (YYYY-MM-DD)\")\n\tf.Int64Var(&c.Interval, \"interval\", int64(4*helpers.OneDay), \"Interval between OCSP updates, in seconds (default: 4 days)\")\n\tf.BoolVar(&c.List, \"list\", false, \"list possible scanners\")\n\tf.StringVar(&c.Family, \"family\", \"\", \"scanner family regular expression\")\n\tf.StringVar(&c.Scanner, \"scanner\", \"\", \"scanner regular expression\")\n\tf.StringVar(&c.Responses, \"responses\", \"\", \"file to load OCSP responses from\")\n\tf.StringVar(&c.Path, \"path\", \"\/\", \"Path on which the server will listen\")\n\tf.StringVar(&c.Password, \"password\", \"\", \"Password for accessing PKCS #12 data passed to bundler\")\n\tf.BoolVar(&c.UseLocal, \"uselocal\", false, \"serve local static files as opposed to compiled ones\")\n\n\tif pkcs11.Enabled {\n\t\tf.StringVar(&c.Module, \"pkcs11-module\", \"\", \"PKCS #11 module\")\n\t\tf.StringVar(&c.Token, \"pkcs11-token\", \"\", \"PKCS #11 token\")\n\t\tf.StringVar(&c.PIN, \"pkcs11-pin\", os.Getenv(\"USER_PIN\"), \"PKCS #11 user PIN\")\n\t\tf.StringVar(&c.PKCS11Label, \"pkcs11-label\", \"\", \"PKCS #11 label\")\n\t}\n}\n\n\/\/ RootFromConfig returns a universal signer Root structure that can\n\/\/ be used to produce a signer.\nfunc RootFromConfig(c *Config) universal.Root {\n\treturn universal.Root{\n\t\tConfig: map[string]string{\n\t\t\t\"pkcs11-module\": c.Module,\n\t\t\t\"pkcs11-token\": c.Token,\n\t\t\t\"pkcs11-label\": c.PKCS11Label,\n\t\t\t\"pkcs11-user-pin\": c.PIN,\n\t\t\t\"cert-file\": c.CAFile,\n\t\t\t\"key-file\": c.CAKeyFile,\n\t\t},\n\t\tForceRemote: c.Remote != \"\",\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright (C) 2015 Scaleway. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE.md file.\n\npackage cli\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\n\t\"github.com\/scaleway\/scaleway-cli\/pkg\/api\"\n\t\"github.com\/scaleway\/scaleway-cli\/pkg\/scwversion\"\n)\n\nvar cmdUserdata = &Command{\n\tExec: runUserdata,\n\tUsageLine: \"_userdata [OPTIONS] SERVER [FIELD[=VALUE]]\",\n\tDescription: \"\",\n\tHidden: true,\n\tHelp: \"List, read and write and delete server's userdata\",\n\tExamples: `\n $ scw _userdata myserver\n $ scw _userdata myserver key\n $ scw _userdata myserver key=value\n $ scw _userdata myserver key=\"\"\n`,\n}\n\nfunc init() {\n\tcmdUserdata.Flag.BoolVar(&userdataHelp, []string{\"h\", \"-help\"}, false, \"Print usage\")\n}\n\n\/\/ Flags\nvar userdataHelp bool \/\/ -h, --help flag\n\nfunc runUserdata(cmd *Command, args []string) error {\n\tif userdataHelp {\n\t\treturn cmd.PrintUsage()\n\t}\n\tif len(args) < 1 {\n\t\treturn cmd.PrintShortUsage()\n\t}\n\tmetadata := false\n\tctx := cmd.GetContext(args)\n\tvar API *api.ScalewayAPI\n\tvar err error\n\tvar serverID string\n\tif args[0] == \"local\" {\n\t\tAPI, err = api.NewScalewayAPI(\"\", \"\", scwversion.UserAgent())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmetadata = true\n\t} else {\n\t\tif ctx.API == nil {\n\t\t\treturn fmt.Errorf(\"You need to login first: 'scw login'\")\n\t\t}\n\t\tserverID, err = ctx.API.GetServerID(args[0])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tAPI = ctx.API\n\t}\n\n\tswitch len(args) {\n\tcase 1:\n\t\t\/\/ List userdata\n\t\tres, err := API.GetUserdatas(serverID, metadata)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, key := range res.UserData {\n\t\t\tfmt.Fprintln(ctx.Stdout, key)\n\t\t}\n\tdefault:\n\t\tparts := strings.Split(args[1], \"=\")\n\t\tkey := parts[0]\n\t\tswitch len(parts) {\n\t\tcase 1:\n\t\t\t\/\/ Get userdatas\n\t\t\tres, err := API.GetUserdata(serverID, key, metadata)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfmt.Fprintf(ctx.Stdout, \"%s\\n\", res.String())\n\t\tdefault:\n\t\t\tvalue := args[1][len(parts[0])+1:]\n\t\t\tif value != \"\" {\n\t\t\t\tvar data []byte\n\t\t\t\t\/\/ Set userdata\n\t\t\t\tif value[0] == '@' {\n\t\t\t\t\tdata, err = ioutil.ReadFile(value[1:])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tdata = []byte(value)\n\t\t\t\t}\n\t\t\t\terr := API.PatchUserdata(serverID, key, data, metadata)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfmt.Fprintln(ctx.Stdout, key)\n\t\t\t} else {\n\t\t\t\t\/\/ Delete userdata\n\t\t\t\terr := API.DeleteUserdata(serverID, key, metadata)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n_userdata: handles USERDATA=@~\/path\/to\/file\/\/ Copyright (C) 2015 Scaleway. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE.md file.\n\npackage cli\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\n\t\"github.com\/scaleway\/scaleway-cli\/pkg\/api\"\n\t\"github.com\/scaleway\/scaleway-cli\/pkg\/config\"\n\t\"github.com\/scaleway\/scaleway-cli\/pkg\/scwversion\"\n)\n\nvar cmdUserdata = &Command{\n\tExec: runUserdata,\n\tUsageLine: \"_userdata [OPTIONS] SERVER [FIELD[=VALUE]]\",\n\tDescription: \"\",\n\tHidden: true,\n\tHelp: \"List, read and write and delete server's userdata\",\n\tExamples: `\n $ scw _userdata myserver\n $ scw _userdata myserver key\n $ scw _userdata myserver key=value\n $ scw _userdata myserver key=\"\"\n`,\n}\n\nfunc init() {\n\tcmdUserdata.Flag.BoolVar(&userdataHelp, []string{\"h\", \"-help\"}, false, \"Print usage\")\n}\n\n\/\/ Flags\nvar userdataHelp bool \/\/ -h, --help flag\n\nfunc runUserdata(cmd *Command, args []string) error {\n\tif userdataHelp {\n\t\treturn cmd.PrintUsage()\n\t}\n\tif len(args) < 1 {\n\t\treturn cmd.PrintShortUsage()\n\t}\n\tmetadata := false\n\tctx := cmd.GetContext(args)\n\tvar API *api.ScalewayAPI\n\tvar err error\n\tvar serverID string\n\tif args[0] == \"local\" {\n\t\tAPI, err = api.NewScalewayAPI(\"\", \"\", scwversion.UserAgent())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmetadata = true\n\t} else {\n\t\tif ctx.API == nil {\n\t\t\treturn fmt.Errorf(\"You need to login first: 'scw login'\")\n\t\t}\n\t\tserverID, err = ctx.API.GetServerID(args[0])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tAPI = ctx.API\n\t}\n\n\tswitch len(args) {\n\tcase 1:\n\t\t\/\/ List userdata\n\t\tres, err := API.GetUserdatas(serverID, metadata)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, key := range res.UserData {\n\t\t\tfmt.Fprintln(ctx.Stdout, key)\n\t\t}\n\tdefault:\n\t\tparts := strings.Split(args[1], \"=\")\n\t\tkey := parts[0]\n\t\tswitch len(parts) {\n\t\tcase 1:\n\t\t\t\/\/ Get userdatas\n\t\t\tres, err := API.GetUserdata(serverID, key, metadata)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfmt.Fprintf(ctx.Stdout, \"%s\\n\", res.String())\n\t\tdefault:\n\t\t\tvalue := args[1][len(parts[0])+1:]\n\t\t\tif value != \"\" {\n\t\t\t\tvar data []byte\n\t\t\t\t\/\/ Set userdata\n\t\t\t\tif value[0] == '@' {\n\t\t\t\t\tif len(value) > 1 && value[1] == '~' {\n\t\t\t\t\t\thome, err := config.GetHomeDir()\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tvalue = \"@\" + home + value[2:]\n\t\t\t\t\t}\n\t\t\t\t\tdata, err = ioutil.ReadFile(value[1:])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tdata = []byte(value)\n\t\t\t\t}\n\t\t\t\terr := API.PatchUserdata(serverID, key, data, metadata)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfmt.Fprintln(ctx.Stdout, key)\n\t\t\t} else {\n\t\t\t\t\/\/ Delete userdata\n\t\t\t\terr := API.DeleteUserdata(serverID, key, metadata)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"package assets\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/yosssi\/gcss\"\n)\n\n\/\/ CompileJavascripts compiles a set of JS files into a single large file by\n\/\/ appending them all to each other. Files are appended in alphabetical order\n\/\/ so we depend on the fact that there aren't too many interdependencies\n\/\/ between files. A common requirement can be given an underscore prefix to be\n\/\/ loaded first.\nfunc CompileJavascripts(inPath, outPath string) error {\n\tstart := time.Now()\n\tdefer func() {\n\t\tlog.Debugf(\"Compiled script assets in %v.\", time.Now().Sub(start))\n\t}()\n\n\tlog.Debugf(\"Building: %v\", outPath)\n\n\tjavascriptInfos, err := ioutil.ReadDir(inPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toutFile, err := os.Create(outPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer outFile.Close()\n\n\tfor _, javascriptInfo := range javascriptInfos {\n\t\tif isHidden(javascriptInfo.Name()) {\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debugf(\"Including: %v\", javascriptInfo.Name())\n\n\t\tinFile, err := os.Open(path.Join(inPath, javascriptInfo.Name()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\toutFile.WriteString(\"\/* \" + javascriptInfo.Name() + \" *\/\\n\\n\")\n\t\toutFile.WriteString(\"(function() {\\n\\n\")\n\n\t\t_, err = io.Copy(outFile, inFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\toutFile.WriteString(\"\\n\\n\")\n\t\toutFile.WriteString(\"}).call(this);\\n\\n\")\n\t}\n\n\treturn nil\n}\n\n\/\/ CompileStylesheets compiles a set of stylesheet files into a single large\n\/\/ file by appending them all to each other. Files are appended in alphabetical\n\/\/ order so we depend on the fact that there aren't too many interdependencies\n\/\/ between files. CSS reset in particular is given an underscore prefix so that\n\/\/ it gets to load first.\n\/\/\n\/\/ If a file has a \".sass\" suffix, we attempt to render it as GCSS. This isn't\n\/\/ a perfect symmetry, but works well enough for these cases.\nfunc CompileStylesheets(inPath, outPath string) error {\n\tstart := time.Now()\n\tdefer func() {\n\t\tlog.Debugf(\"Compiled stylesheet assets in %v.\", time.Now().Sub(start))\n\t}()\n\n\tlog.Debugf(\"Building: %v\", outPath)\n\n\tstylesheetInfos, err := ioutil.ReadDir(inPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toutFile, err := os.Create(outPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer outFile.Close()\n\n\tfor _, stylesheetInfo := range stylesheetInfos {\n\t\tif isHidden(stylesheetInfo.Name()) {\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debugf(\"Including: %v\", stylesheetInfo.Name())\n\n\t\tinFile, err := os.Open(path.Join(inPath, stylesheetInfo.Name()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\toutFile.WriteString(\"\/* \" + stylesheetInfo.Name() + \" *\/\\n\\n\")\n\n\t\tif strings.HasSuffix(stylesheetInfo.Name(), \".sass\") {\n\t\t\t_, err := gcss.Compile(outFile, inFile)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error compiling %v: %v\",\n\t\t\t\t\tstylesheetInfo.Name(), err)\n\t\t\t}\n\t\t} else {\n\t\t\t_, err := io.Copy(outFile, inFile)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\toutFile.WriteString(\"\\n\\n\")\n\t}\n\n\treturn nil\n}\n\n\/\/ Detects a hidden file, i.e. one that starts with a dot.\nfunc isHidden(file string) bool {\n\treturn strings.HasPrefix(file, \".\")\n}\nIgnore non-JS files files in content\/javascripts\/package assets\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/yosssi\/gcss\"\n)\n\n\/\/ CompileJavascripts compiles a set of JS files into a single large file by\n\/\/ appending them all to each other. Files are appended in alphabetical order\n\/\/ so we depend on the fact that there aren't too many interdependencies\n\/\/ between files. A common requirement can be given an underscore prefix to be\n\/\/ loaded first.\nfunc CompileJavascripts(inPath, outPath string) error {\n\tstart := time.Now()\n\tdefer func() {\n\t\tlog.Debugf(\"Compiled script assets in %v.\", time.Now().Sub(start))\n\t}()\n\n\tlog.Debugf(\"Building: %v\", outPath)\n\n\tjavascriptInfos, err := ioutil.ReadDir(inPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toutFile, err := os.Create(outPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer outFile.Close()\n\n\tfor _, javascriptInfo := range javascriptInfos {\n\t\tif isHidden(javascriptInfo.Name()) {\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debugf(\"Including: %v\", javascriptInfo.Name())\n\n\t\tinFile, err := os.Open(path.Join(inPath, javascriptInfo.Name()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\toutFile.WriteString(\"\/* \" + javascriptInfo.Name() + \" *\/\\n\\n\")\n\t\toutFile.WriteString(\"(function() {\\n\\n\")\n\n\t\t\/\/ Ignore non-JS files in the directory (I have a README in there)\n\t\tif strings.HasSuffix(javascriptInfo.Name(), \".js\") {\n\t\t\t_, err = io.Copy(outFile, inFile)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\toutFile.WriteString(\"\\n\\n\")\n\t\toutFile.WriteString(\"}).call(this);\\n\\n\")\n\t}\n\n\treturn nil\n}\n\n\/\/ CompileStylesheets compiles a set of stylesheet files into a single large\n\/\/ file by appending them all to each other. Files are appended in alphabetical\n\/\/ order so we depend on the fact that there aren't too many interdependencies\n\/\/ between files. CSS reset in particular is given an underscore prefix so that\n\/\/ it gets to load first.\n\/\/\n\/\/ If a file has a \".sass\" suffix, we attempt to render it as GCSS. This isn't\n\/\/ a perfect symmetry, but works well enough for these cases.\nfunc CompileStylesheets(inPath, outPath string) error {\n\tstart := time.Now()\n\tdefer func() {\n\t\tlog.Debugf(\"Compiled stylesheet assets in %v.\", time.Now().Sub(start))\n\t}()\n\n\tlog.Debugf(\"Building: %v\", outPath)\n\n\tstylesheetInfos, err := ioutil.ReadDir(inPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toutFile, err := os.Create(outPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer outFile.Close()\n\n\tfor _, stylesheetInfo := range stylesheetInfos {\n\t\tif isHidden(stylesheetInfo.Name()) {\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debugf(\"Including: %v\", stylesheetInfo.Name())\n\n\t\tinFile, err := os.Open(path.Join(inPath, stylesheetInfo.Name()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\toutFile.WriteString(\"\/* \" + stylesheetInfo.Name() + \" *\/\\n\\n\")\n\n\t\tif strings.HasSuffix(stylesheetInfo.Name(), \".sass\") {\n\t\t\t_, err := gcss.Compile(outFile, inFile)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error compiling %v: %v\",\n\t\t\t\t\tstylesheetInfo.Name(), err)\n\t\t\t}\n\t\t} else {\n\t\t\t_, err := io.Copy(outFile, inFile)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\toutFile.WriteString(\"\\n\\n\")\n\t}\n\n\treturn nil\n}\n\n\/\/ Detects a hidden file, i.e. one that starts with a dot.\nfunc isHidden(file string) bool {\n\treturn strings.HasPrefix(file, \".\")\n}\n<|endoftext|>"} {"text":"package commands\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/github\/hub\/ui\"\n\t\"github.com\/github\/hub\/utils\"\n)\n\nvar cmdAlias = &Command{\n\tRun: alias,\n\tUsage: \"alias [-s] []\",\n\tLong: `Show shell instructions for wrapping git.\n\n## Options\n\t-s\n\t\tOutput shell script suitable for 'eval'.\n\n\t\n\t\tSpecify the type of shell (default: \"$SHELL\" environment variable).\n\n## See also:\n\nhub(1)\n`,\n}\n\nvar flagAliasScript bool\n\nfunc init() {\n\tcmdAlias.Flag.BoolVarP(&flagAliasScript, \"script\", \"s\", false, \"SCRIPT\")\n\tCmdRunner.Use(cmdAlias)\n}\n\nfunc alias(command *Command, args *Args) {\n\tvar shell string\n\tif args.ParamsSize() > 0 {\n\t\tshell = args.FirstParam()\n\t} else {\n\t\tshell = os.Getenv(\"SHELL\")\n\t}\n\n\tif shell == \"\" {\n\t\tcmd := \"hub alias \"\n\t\tif flagAliasScript {\n\t\t\tcmd = \"hub alias -s \"\n\t\t}\n\t\tutils.Check(fmt.Errorf(\"Error: couldn't detect shell type. Please specify your shell with `%s`\", cmd))\n\t}\n\n\tshells := []string{\"bash\", \"zsh\", \"sh\", \"ksh\", \"csh\", \"tcsh\", \"fish\"}\n\tshell = filepath.Base(shell)\n\tvar validShell bool\n\tfor _, s := range shells {\n\t\tif s == shell {\n\t\t\tvalidShell = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !validShell {\n\t\terr := fmt.Errorf(\"hub alias: unsupported shell\\nsupported shells: %s\", strings.Join(shells, \" \"))\n\t\tutils.Check(err)\n\t}\n\n\tif flagAliasScript {\n\t\tvar alias string\n\t\tswitch shell {\n\t\tcase \"csh\", \"tcsh\":\n\t\t\talias = \"alias git hub\"\n\t\tdefault:\n\t\t\talias = \"alias git=hub\"\n\t\t}\n\n\t\tui.Println(alias)\n\t} else {\n\t\tvar profile string\n\t\tswitch shell {\n\t\tcase \"bash\":\n\t\t\tprofile = \"~\/.bash_profile\"\n\t\tcase \"zsh\":\n\t\t\tprofile = \"~\/.zshrc\"\n\t\tcase \"ksh\":\n\t\t\tprofile = \"~\/.profile\"\n\t\tcase \"fish\":\n\t\t\tprofile = \"~\/.config\/fish\/functions\/git.fish\"\n\t\tcase \"csh\":\n\t\t\tprofile = \"~\/.cshrc\"\n\t\tcase \"tcsh\":\n\t\t\tprofile = \"~\/.tcshrc\"\n\t\tdefault:\n\t\t\tprofile = \"your profile\"\n\t\t}\n\n\t\tmsg := fmt.Sprintf(\"# Wrap git automatically by adding the following to %s:\\n\", profile)\n\t\tui.Println(msg)\n\n\t\tvar eval string\n\t\tswitch shell {\n\t\tcase \"fish\":\n\t\t\teval = `function git --description 'Alias for hub, which wraps git to provide extra functionality with GitHub.'\n\thub $argv\nend`\n\t\tcase \"csh\", \"tcsh\":\n\t\t\teval = \"eval \\\"`hub alias -s`\\\"\"\n\t\tdefault:\n\t\t\teval = `eval \"$(hub alias -s)\"`\n\t\t}\n\n\t\tindent := regexp.MustCompile(`(?m)^\\t+`)\n\t\teval = indent.ReplaceAllStringFunc(eval, func(match string) string {\n\t\t\treturn strings.Repeat(\" \", len(match) * 4)\n\t\t})\n\n\t\tui.Println(eval)\n\t}\n\n\tos.Exit(0)\n}\ngo fmtpackage commands\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/github\/hub\/ui\"\n\t\"github.com\/github\/hub\/utils\"\n)\n\nvar cmdAlias = &Command{\n\tRun: alias,\n\tUsage: \"alias [-s] []\",\n\tLong: `Show shell instructions for wrapping git.\n\n## Options\n\t-s\n\t\tOutput shell script suitable for 'eval'.\n\n\t\n\t\tSpecify the type of shell (default: \"$SHELL\" environment variable).\n\n## See also:\n\nhub(1)\n`,\n}\n\nvar flagAliasScript bool\n\nfunc init() {\n\tcmdAlias.Flag.BoolVarP(&flagAliasScript, \"script\", \"s\", false, \"SCRIPT\")\n\tCmdRunner.Use(cmdAlias)\n}\n\nfunc alias(command *Command, args *Args) {\n\tvar shell string\n\tif args.ParamsSize() > 0 {\n\t\tshell = args.FirstParam()\n\t} else {\n\t\tshell = os.Getenv(\"SHELL\")\n\t}\n\n\tif shell == \"\" {\n\t\tcmd := \"hub alias \"\n\t\tif flagAliasScript {\n\t\t\tcmd = \"hub alias -s \"\n\t\t}\n\t\tutils.Check(fmt.Errorf(\"Error: couldn't detect shell type. Please specify your shell with `%s`\", cmd))\n\t}\n\n\tshells := []string{\"bash\", \"zsh\", \"sh\", \"ksh\", \"csh\", \"tcsh\", \"fish\"}\n\tshell = filepath.Base(shell)\n\tvar validShell bool\n\tfor _, s := range shells {\n\t\tif s == shell {\n\t\t\tvalidShell = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !validShell {\n\t\terr := fmt.Errorf(\"hub alias: unsupported shell\\nsupported shells: %s\", strings.Join(shells, \" \"))\n\t\tutils.Check(err)\n\t}\n\n\tif flagAliasScript {\n\t\tvar alias string\n\t\tswitch shell {\n\t\tcase \"csh\", \"tcsh\":\n\t\t\talias = \"alias git hub\"\n\t\tdefault:\n\t\t\talias = \"alias git=hub\"\n\t\t}\n\n\t\tui.Println(alias)\n\t} else {\n\t\tvar profile string\n\t\tswitch shell {\n\t\tcase \"bash\":\n\t\t\tprofile = \"~\/.bash_profile\"\n\t\tcase \"zsh\":\n\t\t\tprofile = \"~\/.zshrc\"\n\t\tcase \"ksh\":\n\t\t\tprofile = \"~\/.profile\"\n\t\tcase \"fish\":\n\t\t\tprofile = \"~\/.config\/fish\/functions\/git.fish\"\n\t\tcase \"csh\":\n\t\t\tprofile = \"~\/.cshrc\"\n\t\tcase \"tcsh\":\n\t\t\tprofile = \"~\/.tcshrc\"\n\t\tdefault:\n\t\t\tprofile = \"your profile\"\n\t\t}\n\n\t\tmsg := fmt.Sprintf(\"# Wrap git automatically by adding the following to %s:\\n\", profile)\n\t\tui.Println(msg)\n\n\t\tvar eval string\n\t\tswitch shell {\n\t\tcase \"fish\":\n\t\t\teval = `function git --description 'Alias for hub, which wraps git to provide extra functionality with GitHub.'\n\thub $argv\nend`\n\t\tcase \"csh\", \"tcsh\":\n\t\t\teval = \"eval \\\"`hub alias -s`\\\"\"\n\t\tdefault:\n\t\t\teval = `eval \"$(hub alias -s)\"`\n\t\t}\n\n\t\tindent := regexp.MustCompile(`(?m)^\\t+`)\n\t\teval = indent.ReplaceAllStringFunc(eval, func(match string) string {\n\t\t\treturn strings.Repeat(\" \", len(match)*4)\n\t\t})\n\n\t\tui.Println(eval)\n\t}\n\n\tos.Exit(0)\n}\n<|endoftext|>"} {"text":"package cmd\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/jamesnetherton\/homehub-cli\/service\"\n)\n\n\/\/ NewRebootCommand creates a new command to invoke the Hub Reboot function\nfunc NewRebootCommand(authenticatingCommand *GenericCommand) *AuthenticationRequiringCommand {\n\treturn &AuthenticationRequiringCommand{\n\t\tGenericCommand: GenericCommand{\n\t\t\tName: \"Reboot\",\n\t\t\tDescription: \"Reboots the Home Hub\",\n\t\t\tExec: func(context *CommandContext) {\n\t\t\t\tcontext.SetResult(nil, service.GetHub().Reboot())\n\t\t\t},\n\t\t\tPostExec: func(context *CommandContext) {\n\t\t\t\tfmt.Print(\"\\nWaiting for Home Hub to reboot...\")\n\t\t\t\tattempts := 0\n\t\t\t\tfor {\n\t\t\t\t\tattempts++\n\t\t\t\t\tresponse, err := http.Get(service.GetHub().URL)\n\t\t\t\t\tif err != nil || response.StatusCode != 200 {\n\t\t\t\t\t\tif attempts == 24 {\n\t\t\t\t\t\t\tfmt.Println(\"\\nGave up waiting for Home Hub to become available\")\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tfmt.Print(\".\")\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfmt.Println()\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(5000 * time.Millisecond)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\tAuthenticatingCommand: authenticatingCommand,\n\t}\n}\nAdd sleep after sending reboot commandpackage cmd\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/jamesnetherton\/homehub-cli\/service\"\n)\n\n\/\/ NewRebootCommand creates a new command to invoke the Hub Reboot function\nfunc NewRebootCommand(authenticatingCommand *GenericCommand) *AuthenticationRequiringCommand {\n\treturn &AuthenticationRequiringCommand{\n\t\tGenericCommand: GenericCommand{\n\t\t\tName: \"Reboot\",\n\t\t\tDescription: \"Reboots the Home Hub\",\n\t\t\tExec: func(context *CommandContext) {\n\t\t\t\tcontext.SetResult(nil, service.GetHub().Reboot())\n\t\t\t},\n\t\t\tPostExec: func(context *CommandContext) {\n\t\t\t\tfmt.Print(\"\\nWaiting for Home Hub to reboot...\")\n\n\t\t\t\t\/\/ Give the hub a chance to initialise its reboot sequence\n\t\t\t\ttime.Sleep(10000 * time.Millisecond)\n\n\t\t\t\tattempts := 0\n\t\t\t\tfor {\n\t\t\t\t\tattempts++\n\t\t\t\t\tresponse, err := http.Get(service.GetHub().URL)\n\t\t\t\t\tif err != nil || response.StatusCode != 200 {\n\t\t\t\t\t\tif attempts == 25 {\n\t\t\t\t\t\t\tfmt.Println(\"\\nGave up waiting for Home Hub to become available\")\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tfmt.Print(\".\")\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfmt.Println()\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(5000 * time.Millisecond)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\tAuthenticatingCommand: authenticatingCommand,\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2013 Andreas Koch. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage mapper\n\nimport (\n\t\"fmt\"\n\t\"github.com\/andreaskoch\/allmark\/converter\"\n\t\"github.com\/andreaskoch\/allmark\/parser\"\n\t\"github.com\/andreaskoch\/allmark\/path\"\n\t\"github.com\/andreaskoch\/allmark\/repository\"\n\t\"github.com\/andreaskoch\/allmark\/view\"\n\t\"regexp\"\n\t\"time\"\n)\n\n\/\/ Pattern which matches all HTML\/XML tags\nvar HtmlTagPattern = regexp.MustCompile(`\\<[^\\>]*\\>`)\n\nfunc createMessageMapperFunc(pathProvider *path.Provider, targetFormat string) Mapper {\n\treturn func(item *repository.Item) view.Model {\n\n\t\tparsed, err := converter.Convert(item, targetFormat)\n\t\tif err != nil {\n\t\t\treturn view.Error(fmt.Sprintf(\"%s\", err))\n\t\t}\n\n\t\treturn view.Model{\n\t\t\tPath: pathProvider.GetWebRoute(item),\n\t\t\tTitle: getTitle(parsed),\n\t\t\tDescription: getDescription(parsed),\n\t\t\tContent: parsed.ConvertedContent,\n\t\t\tLanguageTag: getTwoLetterLanguageCode(parsed.MetaData.Language),\n\t\t}\n\t}\n}\n\nfunc getDescription(parsedResult *parser.Result) string {\n\treturn parsedResult.MetaData.Date.Format(time.RFC850)\n}\n\nfunc getTitle(parsedResult *parser.Result) string {\n\ttext := HtmlTagPattern.ReplaceAllString(parsedResult.ConvertedContent, \"\")\n\texcerpt := getTextExcerpt(text, 30)\n\ttime := parsedResult.MetaData.Date.Format(time.RFC850)\n\n\treturn fmt.Sprintf(\"%s ◦ %s\", excerpt, time)\n}\n\nfunc getTextExcerpt(text string, length int) string {\n\n\tif len(text) <= length {\n\t\treturn text\n\t}\n\n\treturn text[0:length] + \" ...\"\n}\nChanged the title format for messages\/\/ Copyright 2013 Andreas Koch. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage mapper\n\nimport (\n\t\"fmt\"\n\t\"github.com\/andreaskoch\/allmark\/converter\"\n\t\"github.com\/andreaskoch\/allmark\/parser\"\n\t\"github.com\/andreaskoch\/allmark\/path\"\n\t\"github.com\/andreaskoch\/allmark\/repository\"\n\t\"github.com\/andreaskoch\/allmark\/view\"\n\t\"regexp\"\n\t\"time\"\n)\n\n\/\/ Pattern which matches all HTML\/XML tags\nvar HtmlTagPattern = regexp.MustCompile(`\\<[^\\>]*\\>`)\n\nfunc createMessageMapperFunc(pathProvider *path.Provider, targetFormat string) Mapper {\n\treturn func(item *repository.Item) view.Model {\n\n\t\tparsed, err := converter.Convert(item, targetFormat)\n\t\tif err != nil {\n\t\t\treturn view.Error(fmt.Sprintf(\"%s\", err))\n\t\t}\n\n\t\treturn view.Model{\n\t\t\tPath: pathProvider.GetWebRoute(item),\n\t\t\tTitle: getTitle(parsed),\n\t\t\tDescription: getDescription(parsed),\n\t\t\tContent: parsed.ConvertedContent,\n\t\t\tLanguageTag: getTwoLetterLanguageCode(parsed.MetaData.Language),\n\t\t}\n\t}\n}\n\nfunc getDescription(parsedResult *parser.Result) string {\n\treturn parsedResult.MetaData.Date.Format(time.RFC850)\n}\n\nfunc getTitle(parsedResult *parser.Result) string {\n\ttext := HtmlTagPattern.ReplaceAllString(parsedResult.ConvertedContent, \"\")\n\texcerpt := getTextExcerpt(text, 30)\n\ttime := parsedResult.MetaData.Date.Format(time.RFC850)\n\n\treturn fmt.Sprintf(\"%s: %s\", time, excerpt)\n}\n\nfunc getTextExcerpt(text string, length int) string {\n\n\tif len(text) <= length {\n\t\treturn text\n\t}\n\n\treturn text[0:length] + \" ...\"\n}\n<|endoftext|>"} {"text":"package caddy\n\nimport (\n\t\"net\"\n\t\"strconv\"\n\t\"testing\"\n)\n\n\/*\n\/\/ TODO\nfunc TestCaddyStartStop(t *testing.T) {\n\tcaddyfile := \"localhost:1984\"\n\n\tfor i := 0; i < 2; i++ {\n\t\t_, err := Start(CaddyfileInput{Contents: []byte(caddyfile)})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error starting, iteration %d: %v\", i, err)\n\t\t}\n\n\t\tclient := http.Client{\n\t\t\tTimeout: time.Duration(2 * time.Second),\n\t\t}\n\t\tresp, err := client.Get(\"http:\/\/localhost:1984\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected GET request to succeed (iteration %d), but it failed: %v\", i, err)\n\t\t}\n\t\tresp.Body.Close()\n\n\t\terr = Stop()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error stopping, iteration %d: %v\", i, err)\n\t\t}\n\t}\n}\n*\/\n\nfunc TestIsLoopback(t *testing.T) {\n\tfor i, test := range []struct {\n\t\tinput string\n\t\texpect bool\n\t}{\n\t\t{\"example.com\", false},\n\t\t{\"localhost\", true},\n\t\t{\"localhost:1234\", true},\n\t\t{\"localhost:\", true},\n\t\t{\"127.0.0.1\", true},\n\t\t{\"127.0.0.1:443\", true},\n\t\t{\"127.0.1.5\", true},\n\t\t{\"10.0.0.5\", false},\n\t\t{\"12.7.0.1\", false},\n\t\t{\"[::1]\", true},\n\t\t{\"[::1]:1234\", true},\n\t\t{\"::1\", true},\n\t\t{\"::\", false},\n\t\t{\"[::]\", false},\n\t\t{\"local\", false},\n\t} {\n\t\tif got, want := IsLoopback(test.input), test.expect; got != want {\n\t\t\tt.Errorf(\"Test %d (%s): expected %v but was %v\", i, test.input, want, got)\n\t\t}\n\t}\n}\n\nfunc TestListenerAddrEqual(t *testing.T) {\n\tln1, err := net.Listen(\"tcp\", \"[::]:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ln1.Close()\n\n\tln1port := strconv.Itoa(ln1.Addr().(*net.TCPAddr).Port)\n\n\tln2, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ln2.Close()\n\n\tln2port := strconv.Itoa(ln2.Addr().(*net.TCPAddr).Port)\n\n\tfor i, test := range []struct {\n\t\tln net.Listener\n\t\taddr string\n\t\texpect bool\n\t}{\n\t\t{ln1, \":1234\", false},\n\t\t{ln1, \"0.0.0.0:1234\", false},\n\t\t{ln1, \":\" + ln1port + \"\", true},\n\t\t{ln1, \"0.0.0.0:\" + ln1port + \"\", true},\n\t\t{ln2, \"127.0.0.1:1234\", false},\n\t\t{ln2, \":\" + ln2port + \"\", false},\n\t\t{ln2, \"127.0.0.1:\" + ln2port + \"\", true},\n\t} {\n\t\tif got, want := listenerAddrEqual(test.ln, test.addr), test.expect; got != want {\n\t\t\tt.Errorf(\"Test %d (%s == %s): expected %v but was %v\", i, test.addr, test.ln.Addr().String(), want, got)\n\t\t}\n\t}\n}\nIncrease code coveragepackage caddy\n\nimport (\n\t\"net\"\n\t\"strconv\"\n\t\"testing\"\n)\n\n\/*\n\/\/ TODO\nfunc TestCaddyStartStop(t *testing.T) {\n\tcaddyfile := \"localhost:1984\"\n\n\tfor i := 0; i < 2; i++ {\n\t\t_, err := Start(CaddyfileInput{Contents: []byte(caddyfile)})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error starting, iteration %d: %v\", i, err)\n\t\t}\n\n\t\tclient := http.Client{\n\t\t\tTimeout: time.Duration(2 * time.Second),\n\t\t}\n\t\tresp, err := client.Get(\"http:\/\/localhost:1984\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected GET request to succeed (iteration %d), but it failed: %v\", i, err)\n\t\t}\n\t\tresp.Body.Close()\n\n\t\terr = Stop()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error stopping, iteration %d: %v\", i, err)\n\t\t}\n\t}\n}\n*\/\n\nfunc TestIsLoopback(t *testing.T) {\n\tfor i, test := range []struct {\n\t\tinput string\n\t\texpect bool\n\t}{\n\t\t{\"example.com\", false},\n\t\t{\"localhost\", true},\n\t\t{\"localhost:1234\", true},\n\t\t{\"localhost:\", true},\n\t\t{\"127.0.0.1\", true},\n\t\t{\"127.0.0.1:443\", true},\n\t\t{\"127.0.1.5\", true},\n\t\t{\"10.0.0.5\", false},\n\t\t{\"12.7.0.1\", false},\n\t\t{\"[::1]\", true},\n\t\t{\"[::1]:1234\", true},\n\t\t{\"::1\", true},\n\t\t{\"::\", false},\n\t\t{\"[::]\", false},\n\t\t{\"local\", false},\n\t} {\n\t\tif got, want := IsLoopback(test.input), test.expect; got != want {\n\t\t\tt.Errorf(\"Test %d (%s): expected %v but was %v\", i, test.input, want, got)\n\t\t}\n\t}\n}\n\nfunc TestListenerAddrEqual(t *testing.T) {\n\tln1, err := net.Listen(\"tcp\", \"[::]:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ln1.Close()\n\n\tln1port := strconv.Itoa(ln1.Addr().(*net.TCPAddr).Port)\n\n\tln2, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ln2.Close()\n\n\tln2port := strconv.Itoa(ln2.Addr().(*net.TCPAddr).Port)\n\n\tfor i, test := range []struct {\n\t\tln net.Listener\n\t\taddr string\n\t\texpect bool\n\t}{\n\t\t{ln1, \":1234\", false},\n\t\t{ln1, \"0.0.0.0:1234\", false},\n\t\t{ln1, \"0.0.0.0\", false},\n\t\t{ln1, \":\" + ln1port + \"\", true},\n\t\t{ln1, \"0.0.0.0:\" + ln1port + \"\", true},\n\t\t{ln2, \":\" + ln2port + \"\", false},\n\t\t{ln2, \"127.0.0.1:1234\", false},\n\t\t{ln2, \"127.0.0.1\", false},\n\t\t{ln2, \"127.0.0.1:\" + ln2port + \"\", true},\n\t} {\n\t\tif got, want := listenerAddrEqual(test.ln, test.addr), test.expect; got != want {\n\t\t\tt.Errorf(\"Test %d (%s == %s): expected %v but was %v\", i, test.addr, test.ln.Addr().String(), want, got)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\tcfg \"github.com\/flynn\/flynn\/cli\/config\"\n\t\"github.com\/flynn\/flynn\/controller\/client\"\n\tct \"github.com\/flynn\/flynn\/controller\/types\"\n\t\"github.com\/flynn\/flynn\/host\/types\"\n\t\"github.com\/flynn\/flynn\/pkg\/backup\"\n\t\"github.com\/flynn\/go-docopt\"\n)\n\nfunc init() {\n\tregister(\"docker\", runDocker, `\nusage: flynn docker set-push-url []\n flynn docker login\n flynn docker logout\n flynn docker push \n\nDeploy Docker images to a Flynn cluster.\n\nCommands:\n\tset-push-url set the Docker push URL (defaults to https:\/\/docker.$CLUSTER_DOMAIN)\n\n\tlogin run \"docker login\" against the cluster's docker-receive app\n\n\tlogout run \"docker logout\" against the cluster's docker-receive app\n\n\tpush push and release a Docker image to the cluster\n\nExample:\n\n\tAssuming you have a Docker image tagged \"my-custom-image:v2\":\n\n\t$ flynn docker push my-custom-image:v2\n\tflynn: getting image config with \"docker inspect -f {{ json .Config }} my-custom-image:v2\"\n\tflynn: tagging Docker image with \"docker tag --force my-custom-image:v2 docker.1.localflynn.com\/my-app:latest\"\n\tflynn: pushing Docker image with \"docker push docker.1.localflynn.com\/my-app:latest\"\n\tThe push refers to a repository [docker.1.localflynn.com\/my-app] (len: 1)\n\ta8eb754d1a89: Pushed\n\t...\n\t3059b4820522: Pushed\n\tlatest: digest: sha256:1752ca12bbedb99734ca1ba3ec35720768a95ad83b7b6c371fc37a28b98ea351 size: 61216\n\tflynn: image pushed, waiting for artifact creation\n\tflynn: deploying release using artifact URI http:\/\/docker-receive.discoverd?name=my-app&id=sha256:1752ca12bbedb99734ca1ba3ec35720768a95ad83b7b6c371fc37a28b98ea351\n\tflynn: image deployed, scale it with 'flynn scale app=N'\n`)\n}\n\nfunc runDocker(args *docopt.Args, client controller.Client) error {\n\tif args.Bool[\"set-push-url\"] {\n\t\treturn runDockerSetPushURL(args)\n\t} else if args.Bool[\"login\"] {\n\t\treturn runDockerLogin()\n\t} else if args.Bool[\"logout\"] {\n\t\treturn runDockerLogout()\n\t} else if args.Bool[\"push\"] {\n\t\treturn runDockerPush(args, client)\n\t}\n\treturn errors.New(\"unknown docker subcommand\")\n}\n\nfunc runDockerSetPushURL(args *docopt.Args) error {\n\tcluster, err := getCluster()\n\tif err != nil {\n\t\treturn err\n\t}\n\turl := args.String[\"\"]\n\tif url == \"\" {\n\t\tif cluster.DockerPushURL != \"\" {\n\t\t\treturn fmt.Errorf(\"ERROR: refusing to overwrite current Docker push URL %q with a default one. To overwrite the existing URL, set one explicitly with 'flynn docker set-push-url URL'\", cluster.DockerPushURL)\n\t\t}\n\t\tif !strings.Contains(cluster.ControllerURL, \"controller\") {\n\t\t\treturn errors.New(\"ERROR: unable to determine default Docker push URL, set one explicitly with 'flynn docker set-push-url URL'\")\n\t\t}\n\t\turl = strings.Replace(cluster.ControllerURL, \"controller\", \"docker\", 1)\n\t}\n\tif !strings.HasPrefix(url, \"https:\/\/\") {\n\t\turl = \"https:\/\/\" + url\n\t}\n\tcluster.DockerPushURL = url\n\treturn config.SaveTo(configPath())\n}\n\nfunc runDockerLogin() error {\n\tcluster, err := getCluster()\n\tif err != nil {\n\t\treturn err\n\t}\n\thost, err := cluster.DockerPushHost()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = dockerLogin(host, cluster.Key)\n\tif e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound {\n\t\terr = errors.New(\"Executable 'docker' was not found.\")\n\t} else if err == ErrDockerTLSError {\n\t\tprintDockerTLSWarning(host, cfg.CACertPath(cluster.Name))\n\t\terr = errors.New(\"Error configuring docker, follow the above instructions and try again.\")\n\t}\n\treturn err\n}\n\nfunc runDockerLogout() error {\n\tcluster, err := getCluster()\n\tif err != nil {\n\t\treturn err\n\t}\n\thost, err := cluster.DockerPushHost()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd := dockerLogoutCmd(host)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\nvar ErrDockerTLSError = errors.New(\"docker TLS error\")\n\nfunc dockerLogin(host, key string) error {\n\tvar out bytes.Buffer\n\tcmd := exec.Command(\"docker\", \"login\", \"--email=user@\"+host, \"--username=user\", \"--password=\"+key, host)\n\tcmd.Stdout = &out\n\tcmd.Stderr = &out\n\terr := cmd.Run()\n\tif strings.Contains(out.String(), \"certificate signed by unknown authority\") {\n\t\terr = ErrDockerTLSError\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"error running `docker login`: %s - output: %q\", err, out)\n\t}\n\treturn nil\n}\n\nfunc dockerLogout(host string) error {\n\treturn dockerLogoutCmd(host).Run()\n}\n\nfunc dockerLogoutCmd(host string) *exec.Cmd {\n\treturn exec.Command(\"docker\", \"logout\", host)\n}\n\nfunc printDockerTLSWarning(host, caPath string) {\n\tfmt.Printf(`\nWARN: docker configuration failed with a TLS error.\nWARN:\nWARN: Copy the TLS CA certificate %s\nWARN: to \/etc\/docker\/certs.d\/%s\/ca.crt\nWARN: on the docker daemon's host and restart docker.\nWARN:\nWARN: If using Docker for Mac, go to Docker -> Preferences\nWARN: -> Advanced, add %q as an\nWARN: Insecure Registry and hit \"Apply & Restart\".\n\n`[1:], caPath, host, host)\n}\n\nfunc runDockerPush(args *docopt.Args, client controller.Client) error {\n\tcluster, err := getCluster()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdockerHost, err := cluster.DockerPushHost()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\timage := args.String[\"\"]\n\n\tprevRelease, err := client.GetAppRelease(mustApp())\n\tif err == controller.ErrNotFound {\n\t\tprevRelease = &ct.Release{}\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"error getting current app release: %s\", err)\n\t}\n\n\t\/\/ get the image config to determine Cmd, Entrypoint and Env\n\tcmd := exec.Command(\"docker\", \"inspect\", \"-f\", \"{{ json .Config }}\", image)\n\tlog.Printf(\"flynn: getting image config with %q\", strings.Join(cmd.Args, \" \"))\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\tvar config struct {\n\t\tCmd []string `json:\"Cmd\"`\n\t\tEntrypoint []string `json:\"Entrypoint\"`\n\t\tEnv []string `json:\"Env\"`\n\t}\n\tif err := json.NewDecoder(stdout).Decode(&config); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ tag the docker image ready to be pushed\n\ttag := fmt.Sprintf(\"%s\/%s:latest\", dockerHost, mustApp())\n\tcmd = exec.Command(\"docker\", \"tag\", image, tag)\n\tlog.Printf(\"flynn: tagging Docker image with %q\", strings.Join(cmd.Args, \" \"))\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\n\tartifact, err := dockerPush(client, mustApp(), tag)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ create and deploy a release with the image config and created artifact\n\tlog.Printf(\"flynn: deploying release using artifact URI %s\", artifact.URI)\n\trelease := &ct.Release{\n\t\tArtifactIDs: []string{artifact.ID},\n\t\tProcesses: prevRelease.Processes,\n\t\tEnv: prevRelease.Env,\n\t\tMeta: prevRelease.Meta,\n\t}\n\n\tproc, ok := release.Processes[\"app\"]\n\tif !ok {\n\t\tproc = ct.ProcessType{}\n\t}\n\tproc.Args = append(config.Entrypoint, config.Cmd...)\n\tif len(proc.Ports) == 0 {\n\t\tproc.Ports = []ct.Port{{\n\t\t\tPort: 8080,\n\t\t\tProto: \"tcp\",\n\t\t\tService: &host.Service{\n\t\t\t\tName: mustApp() + \"-web\",\n\t\t\t\tCreate: true,\n\t\t\t},\n\t\t}}\n\t}\n\tif release.Processes == nil {\n\t\trelease.Processes = make(map[string]ct.ProcessType, 1)\n\t}\n\trelease.Processes[\"app\"] = proc\n\n\tif len(config.Env) > 0 && release.Env == nil {\n\t\trelease.Env = make(map[string]string, len(config.Env))\n\t}\n\tfor _, v := range config.Env {\n\t\tkeyVal := strings.SplitN(v, \"=\", 2)\n\t\tif len(keyVal) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ only set the key if it doesn't exist so variables set with\n\t\t\/\/ `flynn env set` are not overwritten\n\t\tif _, ok := release.Env[keyVal[0]]; !ok {\n\t\t\trelease.Env[keyVal[0]] = keyVal[1]\n\t\t}\n\t}\n\n\tif release.Meta == nil {\n\t\trelease.Meta = make(map[string]string, 1)\n\t}\n\trelease.Meta[\"docker-receive\"] = \"true\"\n\n\tif err := client.CreateRelease(release); err != nil {\n\t\treturn err\n\t}\n\tif err := client.DeployAppRelease(mustApp(), release.ID, nil); err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"flynn: image deployed, scale it with 'flynn scale app=N'\")\n\treturn nil\n}\n\nfunc dockerPush(client controller.Client, repo, tag string) (*ct.Artifact, error) {\n\t\/\/ subscribe to artifact events\n\tevents := make(chan *ct.Event)\n\tstream, err := client.StreamEvents(ct.StreamEventsOptions{\n\t\tObjectTypes: []ct.EventType{ct.EventTypeArtifact},\n\t}, events)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer stream.Close()\n\n\t\/\/ push the Docker image to docker-receive\n\tcmd := exec.Command(\"docker\", \"push\", tag)\n\tlog.Printf(\"flynn: pushing Docker image with %q\", strings.Join(cmd.Args, \" \"))\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ wait for an artifact to be created\n\tlog.Printf(\"flynn: image pushed, waiting for artifact creation\")\n\tfor {\n\t\tselect {\n\t\tcase event, ok := <-events:\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"event stream closed unexpectedly: %s\", stream.Err())\n\t\t\t}\n\t\t\tvar artifact ct.Artifact\n\t\t\tif err := json.Unmarshal(event.Data, &artifact); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif artifact.Meta[\"docker-receive.repository\"] == repo {\n\t\t\t\treturn &artifact, nil\n\t\t\t}\n\t\tcase <-time.After(30 * time.Second):\n\t\t\treturn nil, fmt.Errorf(\"timed out waiting for artifact creation\")\n\t\t}\n\t}\n\n}\n\nfunc dockerPull(repo, digest string) error {\n\tcluster, err := getCluster()\n\tif err != nil {\n\t\treturn err\n\t}\n\thost, err := cluster.DockerPushHost()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd := exec.Command(\"docker\", \"pull\", fmt.Sprintf(\"%s\/%s@%s\", host, repo, digest))\n\tlog.Printf(\"flynn: pulling Docker image with %q\", strings.Join(cmd.Args, \" \"))\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\nfunc dockerSave(tag string, tw *backup.TarWriter, progress backup.ProgressBar) error {\n\ttmp, err := ioutil.TempFile(\"\", \"flynn-docker-save\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating temp file: %s\", err)\n\t}\n\tdefer tmp.Close()\n\tdefer os.Remove(tmp.Name())\n\n\tcmd := exec.Command(\"docker\", \"save\", tag)\n\tcmd.Stdout = tmp\n\tif progress != nil {\n\t\tcmd.Stdout = io.MultiWriter(tmp, progress)\n\t}\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\n\tlength, err := tmp.Seek(0, os.SEEK_CUR)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := tw.WriteHeader(\"docker-image.tar\", int(length)); err != nil {\n\t\treturn err\n\t}\n\tif _, err := tmp.Seek(0, os.SEEK_SET); err != nil {\n\t\treturn err\n\t}\n\t_, err = io.Copy(tw, tmp)\n\treturn err\n}\ncli: Remove --force flag from docker push examplepackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\tcfg \"github.com\/flynn\/flynn\/cli\/config\"\n\t\"github.com\/flynn\/flynn\/controller\/client\"\n\tct \"github.com\/flynn\/flynn\/controller\/types\"\n\t\"github.com\/flynn\/flynn\/host\/types\"\n\t\"github.com\/flynn\/flynn\/pkg\/backup\"\n\t\"github.com\/flynn\/go-docopt\"\n)\n\nfunc init() {\n\tregister(\"docker\", runDocker, `\nusage: flynn docker set-push-url []\n flynn docker login\n flynn docker logout\n flynn docker push \n\nDeploy Docker images to a Flynn cluster.\n\nCommands:\n\tset-push-url set the Docker push URL (defaults to https:\/\/docker.$CLUSTER_DOMAIN)\n\n\tlogin run \"docker login\" against the cluster's docker-receive app\n\n\tlogout run \"docker logout\" against the cluster's docker-receive app\n\n\tpush push and release a Docker image to the cluster\n\nExample:\n\n\tAssuming you have a Docker image tagged \"my-custom-image:v2\":\n\n\t$ flynn docker push my-custom-image:v2\n\tflynn: getting image config with \"docker inspect -f {{ json .Config }} my-custom-image:v2\"\n\tflynn: tagging Docker image with \"docker tag my-custom-image:v2 docker.1.localflynn.com\/my-app:latest\"\n\tflynn: pushing Docker image with \"docker push docker.1.localflynn.com\/my-app:latest\"\n\tThe push refers to a repository [docker.1.localflynn.com\/my-app] (len: 1)\n\ta8eb754d1a89: Pushed\n\t...\n\t3059b4820522: Pushed\n\tlatest: digest: sha256:1752ca12bbedb99734ca1ba3ec35720768a95ad83b7b6c371fc37a28b98ea351 size: 61216\n\tflynn: image pushed, waiting for artifact creation\n\tflynn: deploying release using artifact URI http:\/\/docker-receive.discoverd?name=my-app&id=sha256:1752ca12bbedb99734ca1ba3ec35720768a95ad83b7b6c371fc37a28b98ea351\n\tflynn: image deployed, scale it with 'flynn scale app=N'\n`)\n}\n\nfunc runDocker(args *docopt.Args, client controller.Client) error {\n\tif args.Bool[\"set-push-url\"] {\n\t\treturn runDockerSetPushURL(args)\n\t} else if args.Bool[\"login\"] {\n\t\treturn runDockerLogin()\n\t} else if args.Bool[\"logout\"] {\n\t\treturn runDockerLogout()\n\t} else if args.Bool[\"push\"] {\n\t\treturn runDockerPush(args, client)\n\t}\n\treturn errors.New(\"unknown docker subcommand\")\n}\n\nfunc runDockerSetPushURL(args *docopt.Args) error {\n\tcluster, err := getCluster()\n\tif err != nil {\n\t\treturn err\n\t}\n\turl := args.String[\"\"]\n\tif url == \"\" {\n\t\tif cluster.DockerPushURL != \"\" {\n\t\t\treturn fmt.Errorf(\"ERROR: refusing to overwrite current Docker push URL %q with a default one. To overwrite the existing URL, set one explicitly with 'flynn docker set-push-url URL'\", cluster.DockerPushURL)\n\t\t}\n\t\tif !strings.Contains(cluster.ControllerURL, \"controller\") {\n\t\t\treturn errors.New(\"ERROR: unable to determine default Docker push URL, set one explicitly with 'flynn docker set-push-url URL'\")\n\t\t}\n\t\turl = strings.Replace(cluster.ControllerURL, \"controller\", \"docker\", 1)\n\t}\n\tif !strings.HasPrefix(url, \"https:\/\/\") {\n\t\turl = \"https:\/\/\" + url\n\t}\n\tcluster.DockerPushURL = url\n\treturn config.SaveTo(configPath())\n}\n\nfunc runDockerLogin() error {\n\tcluster, err := getCluster()\n\tif err != nil {\n\t\treturn err\n\t}\n\thost, err := cluster.DockerPushHost()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = dockerLogin(host, cluster.Key)\n\tif e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound {\n\t\terr = errors.New(\"Executable 'docker' was not found.\")\n\t} else if err == ErrDockerTLSError {\n\t\tprintDockerTLSWarning(host, cfg.CACertPath(cluster.Name))\n\t\terr = errors.New(\"Error configuring docker, follow the above instructions and try again.\")\n\t}\n\treturn err\n}\n\nfunc runDockerLogout() error {\n\tcluster, err := getCluster()\n\tif err != nil {\n\t\treturn err\n\t}\n\thost, err := cluster.DockerPushHost()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd := dockerLogoutCmd(host)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\nvar ErrDockerTLSError = errors.New(\"docker TLS error\")\n\nfunc dockerLogin(host, key string) error {\n\tvar out bytes.Buffer\n\tcmd := exec.Command(\"docker\", \"login\", \"--email=user@\"+host, \"--username=user\", \"--password=\"+key, host)\n\tcmd.Stdout = &out\n\tcmd.Stderr = &out\n\terr := cmd.Run()\n\tif strings.Contains(out.String(), \"certificate signed by unknown authority\") {\n\t\terr = ErrDockerTLSError\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"error running `docker login`: %s - output: %q\", err, out)\n\t}\n\treturn nil\n}\n\nfunc dockerLogout(host string) error {\n\treturn dockerLogoutCmd(host).Run()\n}\n\nfunc dockerLogoutCmd(host string) *exec.Cmd {\n\treturn exec.Command(\"docker\", \"logout\", host)\n}\n\nfunc printDockerTLSWarning(host, caPath string) {\n\tfmt.Printf(`\nWARN: docker configuration failed with a TLS error.\nWARN:\nWARN: Copy the TLS CA certificate %s\nWARN: to \/etc\/docker\/certs.d\/%s\/ca.crt\nWARN: on the docker daemon's host and restart docker.\nWARN:\nWARN: If using Docker for Mac, go to Docker -> Preferences\nWARN: -> Advanced, add %q as an\nWARN: Insecure Registry and hit \"Apply & Restart\".\n\n`[1:], caPath, host, host)\n}\n\nfunc runDockerPush(args *docopt.Args, client controller.Client) error {\n\tcluster, err := getCluster()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdockerHost, err := cluster.DockerPushHost()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\timage := args.String[\"\"]\n\n\tprevRelease, err := client.GetAppRelease(mustApp())\n\tif err == controller.ErrNotFound {\n\t\tprevRelease = &ct.Release{}\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"error getting current app release: %s\", err)\n\t}\n\n\t\/\/ get the image config to determine Cmd, Entrypoint and Env\n\tcmd := exec.Command(\"docker\", \"inspect\", \"-f\", \"{{ json .Config }}\", image)\n\tlog.Printf(\"flynn: getting image config with %q\", strings.Join(cmd.Args, \" \"))\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\tvar config struct {\n\t\tCmd []string `json:\"Cmd\"`\n\t\tEntrypoint []string `json:\"Entrypoint\"`\n\t\tEnv []string `json:\"Env\"`\n\t}\n\tif err := json.NewDecoder(stdout).Decode(&config); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ tag the docker image ready to be pushed\n\ttag := fmt.Sprintf(\"%s\/%s:latest\", dockerHost, mustApp())\n\tcmd = exec.Command(\"docker\", \"tag\", image, tag)\n\tlog.Printf(\"flynn: tagging Docker image with %q\", strings.Join(cmd.Args, \" \"))\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\n\tartifact, err := dockerPush(client, mustApp(), tag)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ create and deploy a release with the image config and created artifact\n\tlog.Printf(\"flynn: deploying release using artifact URI %s\", artifact.URI)\n\trelease := &ct.Release{\n\t\tArtifactIDs: []string{artifact.ID},\n\t\tProcesses: prevRelease.Processes,\n\t\tEnv: prevRelease.Env,\n\t\tMeta: prevRelease.Meta,\n\t}\n\n\tproc, ok := release.Processes[\"app\"]\n\tif !ok {\n\t\tproc = ct.ProcessType{}\n\t}\n\tproc.Args = append(config.Entrypoint, config.Cmd...)\n\tif len(proc.Ports) == 0 {\n\t\tproc.Ports = []ct.Port{{\n\t\t\tPort: 8080,\n\t\t\tProto: \"tcp\",\n\t\t\tService: &host.Service{\n\t\t\t\tName: mustApp() + \"-web\",\n\t\t\t\tCreate: true,\n\t\t\t},\n\t\t}}\n\t}\n\tif release.Processes == nil {\n\t\trelease.Processes = make(map[string]ct.ProcessType, 1)\n\t}\n\trelease.Processes[\"app\"] = proc\n\n\tif len(config.Env) > 0 && release.Env == nil {\n\t\trelease.Env = make(map[string]string, len(config.Env))\n\t}\n\tfor _, v := range config.Env {\n\t\tkeyVal := strings.SplitN(v, \"=\", 2)\n\t\tif len(keyVal) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ only set the key if it doesn't exist so variables set with\n\t\t\/\/ `flynn env set` are not overwritten\n\t\tif _, ok := release.Env[keyVal[0]]; !ok {\n\t\t\trelease.Env[keyVal[0]] = keyVal[1]\n\t\t}\n\t}\n\n\tif release.Meta == nil {\n\t\trelease.Meta = make(map[string]string, 1)\n\t}\n\trelease.Meta[\"docker-receive\"] = \"true\"\n\n\tif err := client.CreateRelease(release); err != nil {\n\t\treturn err\n\t}\n\tif err := client.DeployAppRelease(mustApp(), release.ID, nil); err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"flynn: image deployed, scale it with 'flynn scale app=N'\")\n\treturn nil\n}\n\nfunc dockerPush(client controller.Client, repo, tag string) (*ct.Artifact, error) {\n\t\/\/ subscribe to artifact events\n\tevents := make(chan *ct.Event)\n\tstream, err := client.StreamEvents(ct.StreamEventsOptions{\n\t\tObjectTypes: []ct.EventType{ct.EventTypeArtifact},\n\t}, events)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer stream.Close()\n\n\t\/\/ push the Docker image to docker-receive\n\tcmd := exec.Command(\"docker\", \"push\", tag)\n\tlog.Printf(\"flynn: pushing Docker image with %q\", strings.Join(cmd.Args, \" \"))\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ wait for an artifact to be created\n\tlog.Printf(\"flynn: image pushed, waiting for artifact creation\")\n\tfor {\n\t\tselect {\n\t\tcase event, ok := <-events:\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"event stream closed unexpectedly: %s\", stream.Err())\n\t\t\t}\n\t\t\tvar artifact ct.Artifact\n\t\t\tif err := json.Unmarshal(event.Data, &artifact); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif artifact.Meta[\"docker-receive.repository\"] == repo {\n\t\t\t\treturn &artifact, nil\n\t\t\t}\n\t\tcase <-time.After(30 * time.Second):\n\t\t\treturn nil, fmt.Errorf(\"timed out waiting for artifact creation\")\n\t\t}\n\t}\n\n}\n\nfunc dockerPull(repo, digest string) error {\n\tcluster, err := getCluster()\n\tif err != nil {\n\t\treturn err\n\t}\n\thost, err := cluster.DockerPushHost()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd := exec.Command(\"docker\", \"pull\", fmt.Sprintf(\"%s\/%s@%s\", host, repo, digest))\n\tlog.Printf(\"flynn: pulling Docker image with %q\", strings.Join(cmd.Args, \" \"))\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\nfunc dockerSave(tag string, tw *backup.TarWriter, progress backup.ProgressBar) error {\n\ttmp, err := ioutil.TempFile(\"\", \"flynn-docker-save\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating temp file: %s\", err)\n\t}\n\tdefer tmp.Close()\n\tdefer os.Remove(tmp.Name())\n\n\tcmd := exec.Command(\"docker\", \"save\", tag)\n\tcmd.Stdout = tmp\n\tif progress != nil {\n\t\tcmd.Stdout = io.MultiWriter(tmp, progress)\n\t}\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\n\tlength, err := tmp.Seek(0, os.SEEK_CUR)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := tw.WriteHeader(\"docker-image.tar\", int(length)); err != nil {\n\t\treturn err\n\t}\n\tif _, err := tmp.Seek(0, os.SEEK_SET); err != nil {\n\t\treturn err\n\t}\n\t_, err = io.Copy(tw, tmp)\n\treturn err\n}\n<|endoftext|>"} {"text":"package dialer\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/rancher\/rancher\/pkg\/encryptedstore\"\n\t\"github.com\/rancher\/rancher\/pkg\/nodeconfig\"\n\t\"github.com\/rancher\/rancher\/pkg\/remotedialer\"\n\t\"github.com\/rancher\/rancher\/pkg\/tunnelserver\"\n\t\"github.com\/rancher\/types\/apis\/management.cattle.io\/v3\"\n\t\"github.com\/rancher\/types\/config\"\n\t\"github.com\/rancher\/types\/config\/dialer\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n)\n\nfunc NewFactory(apiContext *config.ScaledContext) (dialer.Factory, error) {\n\tauthorizer := tunnelserver.NewAuthorizer(apiContext)\n\ttunneler := tunnelserver.NewTunnelServer(apiContext, authorizer)\n\n\tsecretStore, err := nodeconfig.NewStore(apiContext.Core.Namespaces(\"\"), apiContext.K8sClient.CoreV1())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapiContext.Management.Nodes(\"local\").Controller().Informer().AddIndexers(cache.Indexers{\n\t\tnodeAccessIndexer: nodeIndexer,\n\t})\n\n\treturn &Factory{\n\t\tclusterLister: apiContext.Management.Clusters(\"\").Controller().Lister(),\n\t\tlocalNodeController: apiContext.Management.Nodes(\"local\").Controller(),\n\t\tnodeLister: apiContext.Management.Nodes(\"\").Controller().Lister(),\n\t\tTunnelServer: tunneler,\n\t\tTunnelAuthorizer: authorizer,\n\t\tstore: secretStore,\n\t}, nil\n}\n\ntype Factory struct {\n\tlocalNodeController v3.NodeController\n\tnodeLister v3.NodeLister\n\tclusterLister v3.ClusterLister\n\tTunnelServer *remotedialer.Server\n\tTunnelAuthorizer *tunnelserver.Authorizer\n\tstore *encryptedstore.GenericEncryptedStore\n}\n\nfunc (f *Factory) ClusterDialer(clusterName string) (dialer.Dialer, error) {\n\treturn func(network, address string) (net.Conn, error) {\n\t\td, err := f.clusterDialer(clusterName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn d(network, address)\n\t}, nil\n}\n\nfunc (f *Factory) clusterDialer(clusterName string) (dialer.Dialer, error) {\n\tcluster, err := f.clusterLister.Get(\"\", clusterName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif f.TunnelServer.HasSession(cluster.Name) {\n\t\treturn f.TunnelServer.Dialer(cluster.Name, 15*time.Second), nil\n\t}\n\n\tnodes, err := f.nodeLister.List(cluster.Name, labels.Everything())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, node := range nodes {\n\t\tif node.DeletionTimestamp == nil && v3.NodeConditionProvisioned.IsTrue(node) {\n\t\t\tif nodeDialer, err := f.nodeDialer(clusterName, node.Name); err == nil {\n\t\t\t\treturn nodeDialer, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn net.Dial, nil\n}\n\nfunc (f *Factory) DockerDialer(clusterName, machineName string) (dialer.Dialer, error) {\n\tmachine, err := f.nodeLister.Get(clusterName, machineName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif f.TunnelServer.HasSession(machine.Name) {\n\t\td := f.TunnelServer.Dialer(machine.Name, 15*time.Second)\n\t\treturn func(string, string) (net.Conn, error) {\n\t\t\treturn d(\"unix\", \"\/var\/run\/docker.sock\")\n\t\t}, nil\n\t}\n\n\tif machine.Spec.CustomConfig != nil && machine.Spec.CustomConfig.Address != \"\" && machine.Spec.CustomConfig.SSHKey != \"\" {\n\t\treturn f.sshDialer(machine)\n\t}\n\n\tif machine.Spec.NodeTemplateName != \"\" {\n\t\treturn f.tlsDialer(machine)\n\t}\n\n\treturn nil, fmt.Errorf(\"can not build dialer to %s:%s\", clusterName, machineName)\n}\n\nfunc (f *Factory) NodeDialer(clusterName, machineName string) (dialer.Dialer, error) {\n\treturn func(network, address string) (net.Conn, error) {\n\t\td, err := f.nodeDialer(clusterName, machineName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn d(network, address)\n\t}, nil\n}\n\nfunc (f *Factory) nodeDialer(clusterName, machineName string) (dialer.Dialer, error) {\n\tmachine, err := f.nodeLister.Get(clusterName, machineName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif f.TunnelServer.HasSession(machine.Name) {\n\t\td := f.TunnelServer.Dialer(machine.Name, 15*time.Second)\n\t\treturn dialer.Dialer(d), nil\n\t}\n\n\tif machine.Spec.CustomConfig != nil && machine.Spec.CustomConfig.Address != \"\" && machine.Spec.CustomConfig.SSHKey != \"\" {\n\t\treturn f.sshLocalDialer(machine)\n\t}\n\n\tif machine.Spec.NodeTemplateName != \"\" {\n\t\treturn f.sshLocalDialer(machine)\n\t}\n\n\treturn nil, fmt.Errorf(\"can not build dialer to %s:%s\", clusterName, machineName)\n}\nTimeout in Dialpackage dialer\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/rancher\/rancher\/pkg\/encryptedstore\"\n\t\"github.com\/rancher\/rancher\/pkg\/nodeconfig\"\n\t\"github.com\/rancher\/rancher\/pkg\/remotedialer\"\n\t\"github.com\/rancher\/rancher\/pkg\/tunnelserver\"\n\t\"github.com\/rancher\/types\/apis\/management.cattle.io\/v3\"\n\t\"github.com\/rancher\/types\/config\"\n\t\"github.com\/rancher\/types\/config\/dialer\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n)\n\nfunc NewFactory(apiContext *config.ScaledContext) (dialer.Factory, error) {\n\tauthorizer := tunnelserver.NewAuthorizer(apiContext)\n\ttunneler := tunnelserver.NewTunnelServer(apiContext, authorizer)\n\n\tsecretStore, err := nodeconfig.NewStore(apiContext.Core.Namespaces(\"\"), apiContext.K8sClient.CoreV1())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapiContext.Management.Nodes(\"local\").Controller().Informer().AddIndexers(cache.Indexers{\n\t\tnodeAccessIndexer: nodeIndexer,\n\t})\n\n\treturn &Factory{\n\t\tclusterLister: apiContext.Management.Clusters(\"\").Controller().Lister(),\n\t\tlocalNodeController: apiContext.Management.Nodes(\"local\").Controller(),\n\t\tnodeLister: apiContext.Management.Nodes(\"\").Controller().Lister(),\n\t\tTunnelServer: tunneler,\n\t\tTunnelAuthorizer: authorizer,\n\t\tstore: secretStore,\n\t}, nil\n}\n\ntype Factory struct {\n\tlocalNodeController v3.NodeController\n\tnodeLister v3.NodeLister\n\tclusterLister v3.ClusterLister\n\tTunnelServer *remotedialer.Server\n\tTunnelAuthorizer *tunnelserver.Authorizer\n\tstore *encryptedstore.GenericEncryptedStore\n}\n\nfunc (f *Factory) ClusterDialer(clusterName string) (dialer.Dialer, error) {\n\treturn func(network, address string) (net.Conn, error) {\n\t\td, err := f.clusterDialer(clusterName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn d(network, address)\n\t}, nil\n}\n\nfunc (f *Factory) clusterDialer(clusterName string) (dialer.Dialer, error) {\n\tcluster, err := f.clusterLister.Get(\"\", clusterName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif f.TunnelServer.HasSession(cluster.Name) {\n\t\treturn f.TunnelServer.Dialer(cluster.Name, 15*time.Second), nil\n\t}\n\n\tnodes, err := f.nodeLister.List(cluster.Name, labels.Everything())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, node := range nodes {\n\t\tif node.DeletionTimestamp == nil && v3.NodeConditionProvisioned.IsTrue(node) {\n\t\t\tif nodeDialer, err := f.nodeDialer(clusterName, node.Name); err == nil {\n\t\t\t\treturn nodeDialer, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn func(network, address string) (net.Conn, error) {\n\t\treturn net.DialTimeout(network, address, 30*time.Second)\n\t}, nil\n}\n\nfunc (f *Factory) DockerDialer(clusterName, machineName string) (dialer.Dialer, error) {\n\tmachine, err := f.nodeLister.Get(clusterName, machineName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif f.TunnelServer.HasSession(machine.Name) {\n\t\td := f.TunnelServer.Dialer(machine.Name, 15*time.Second)\n\t\treturn func(string, string) (net.Conn, error) {\n\t\t\treturn d(\"unix\", \"\/var\/run\/docker.sock\")\n\t\t}, nil\n\t}\n\n\tif machine.Spec.CustomConfig != nil && machine.Spec.CustomConfig.Address != \"\" && machine.Spec.CustomConfig.SSHKey != \"\" {\n\t\treturn f.sshDialer(machine)\n\t}\n\n\tif machine.Spec.NodeTemplateName != \"\" {\n\t\treturn f.tlsDialer(machine)\n\t}\n\n\treturn nil, fmt.Errorf(\"can not build dialer to %s:%s\", clusterName, machineName)\n}\n\nfunc (f *Factory) NodeDialer(clusterName, machineName string) (dialer.Dialer, error) {\n\treturn func(network, address string) (net.Conn, error) {\n\t\td, err := f.nodeDialer(clusterName, machineName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn d(network, address)\n\t}, nil\n}\n\nfunc (f *Factory) nodeDialer(clusterName, machineName string) (dialer.Dialer, error) {\n\tmachine, err := f.nodeLister.Get(clusterName, machineName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif f.TunnelServer.HasSession(machine.Name) {\n\t\td := f.TunnelServer.Dialer(machine.Name, 15*time.Second)\n\t\treturn dialer.Dialer(d), nil\n\t}\n\n\tif machine.Spec.CustomConfig != nil && machine.Spec.CustomConfig.Address != \"\" && machine.Spec.CustomConfig.SSHKey != \"\" {\n\t\treturn f.sshLocalDialer(machine)\n\t}\n\n\tif machine.Spec.NodeTemplateName != \"\" {\n\t\treturn f.sshLocalDialer(machine)\n\t}\n\n\treturn nil, fmt.Errorf(\"can not build dialer to %s:%s\", clusterName, machineName)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2018 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build !windows\n\npackage logutil\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/coreos\/go-systemd\/journal\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\n\/\/ NewJournaldWriter wraps \"io.Writer\" to redirect log output\n\/\/ to the local systemd journal. If journald send fails, it fails\n\/\/ back to writing to the original writer.\n\/\/ The decode overhead is only <30µs per write.\n\/\/ Reference: https:\/\/github.com\/coreos\/pkg\/blob\/master\/capnslog\/journald_formatter.go\nfunc NewJournaldWriter(wr io.Writer) io.Writer {\n\treturn &journaldWriter{Writer: wr}\n}\n\ntype journaldWriter struct {\n\tio.Writer\n}\n\n\/\/ WARN: assume that etcd uses default field names in zap encoder config\n\/\/ make sure to keep this up-to-date!\ntype logLine struct {\n\tLevel string `json:\"level\"`\n\tCaller string `json:\"caller\"`\n}\n\nfunc (w *journaldWriter) Write(p []byte) (int, error) {\n\tline := &logLine{}\n\tif err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar pri journal.Priority\n\tswitch line.Level {\n\tcase zapcore.DebugLevel.String():\n\t\tpri = journal.PriDebug\n\tcase zapcore.InfoLevel.String():\n\t\tpri = journal.PriInfo\n\n\tcase zapcore.WarnLevel.String():\n\t\tpri = journal.PriWarning\n\tcase zapcore.ErrorLevel.String():\n\t\tpri = journal.PriErr\n\n\tcase zapcore.DPanicLevel.String():\n\t\tpri = journal.PriCrit\n\tcase zapcore.PanicLevel.String():\n\t\tpri = journal.PriCrit\n\tcase zapcore.FatalLevel.String():\n\t\tpri = journal.PriCrit\n\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unknown log level: %q\", line.Level))\n\t}\n\n\terr := journal.Send(string(p), pri, map[string]string{\n\t\t\"PACKAGE\": filepath.Dir(line.Caller),\n\t\t\"SYSLOG_IDENTIFIER\": filepath.Base(os.Args[0]),\n\t})\n\tif err != nil {\n\t\tfmt.Println(\"FAILED TO WRITE TO JOURNALD\", err, string(p))\n\t\treturn w.Writer.Write(p)\n\t}\n\treturn 0, nil\n}\npkg\/logutil: do not print error message on journaldWriter\/\/ Copyright 2018 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build !windows\n\npackage logutil\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/coreos\/go-systemd\/journal\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\n\/\/ NewJournaldWriter wraps \"io.Writer\" to redirect log output\n\/\/ to the local systemd journal. If journald send fails, it fails\n\/\/ back to writing to the original writer.\n\/\/ The decode overhead is only <30µs per write.\n\/\/ Reference: https:\/\/github.com\/coreos\/pkg\/blob\/master\/capnslog\/journald_formatter.go\nfunc NewJournaldWriter(wr io.Writer) io.Writer {\n\treturn &journaldWriter{Writer: wr}\n}\n\ntype journaldWriter struct {\n\tio.Writer\n}\n\n\/\/ WARN: assume that etcd uses default field names in zap encoder config\n\/\/ make sure to keep this up-to-date!\ntype logLine struct {\n\tLevel string `json:\"level\"`\n\tCaller string `json:\"caller\"`\n}\n\nfunc (w *journaldWriter) Write(p []byte) (int, error) {\n\tline := &logLine{}\n\tif err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar pri journal.Priority\n\tswitch line.Level {\n\tcase zapcore.DebugLevel.String():\n\t\tpri = journal.PriDebug\n\tcase zapcore.InfoLevel.String():\n\t\tpri = journal.PriInfo\n\n\tcase zapcore.WarnLevel.String():\n\t\tpri = journal.PriWarning\n\tcase zapcore.ErrorLevel.String():\n\t\tpri = journal.PriErr\n\n\tcase zapcore.DPanicLevel.String():\n\t\tpri = journal.PriCrit\n\tcase zapcore.PanicLevel.String():\n\t\tpri = journal.PriCrit\n\tcase zapcore.FatalLevel.String():\n\t\tpri = journal.PriCrit\n\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unknown log level: %q\", line.Level))\n\t}\n\n\terr := journal.Send(string(p), pri, map[string]string{\n\t\t\"PACKAGE\": filepath.Dir(line.Caller),\n\t\t\"SYSLOG_IDENTIFIER\": filepath.Base(os.Args[0]),\n\t})\n\tif err != nil {\n\t\treturn w.Writer.Write(p)\n\t}\n\treturn 0, nil\n}\n<|endoftext|>"} {"text":"package cmd\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/evandroflores\/claimr\/messages\"\n\t\"github.com\/evandroflores\/claimr\/model\"\n\t\"github.com\/shomali11\/slacker\"\n)\n\nfunc init() {\n\tRegister(\"remove \", \"Removes a container from your channel.\", remove)\n}\n\nfunc remove(request *slacker.Request, response slacker.ResponseWriter) {\n\tresponse.Typing()\n\n\tevent := getEvent(request)\n\tif direct, err := isDirect(event.Channel); direct {\n\t\tresponse.Reply(err.Error())\n\t\treturn\n\t}\n\tcontainerName := request.Param(\"container-name\")\n\n\tcontainer, err := model.GetContainer(event.Team, event.Channel, containerName)\n\n\tif err != nil {\n\t\tresponse.Reply(err.Error())\n\t\treturn\n\t}\n\n\tchecks := []Check{\n\t\t{container == (model.Container{}), fmt.Sprintf(messages.Get(\"container-not-found-on-channel\"), containerName, event.Channel)},\n\t\t{container.InUseBy != \"\", fmt.Sprintf(messages.Get(\"container-in-use-by-this\"), containerName, container.InUseBy, container.UpdatedAt.Format(time.RFC1123))},\n\t\t{container.CreatedByUser != event.User, fmt.Sprintf(messages.Get(\"only-owner-can-remove\"), containerName, container.CreatedByUser)},\n\t}\n\n\terr = RunChecks(checks)\n\tif err != nil {\n\t\tresponse.Reply(err.Error())\n\t\treturn\n\t}\n\n\terr = container.Delete()\n\tif err != nil {\n\t\tresponse.Reply(err.Error())\n\t\treturn\n\t}\n\n\tresponse.Reply(fmt.Sprintf(messages.Get(\"container-removed\"), containerName))\n}\nmigrate cmd\/removepackage cmd\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/evandroflores\/claimr\/messages\"\n\t\"github.com\/evandroflores\/claimr\/model\"\n\t\"github.com\/shomali11\/slacker\"\n)\n\nfunc init() {\n\tRegister(\"remove \", \"Removes a container from your channel.\", remove)\n}\n\nfunc remove(request *slacker.Request, response slacker.ResponseWriter) {\n\tresponse.Typing()\n\n\tevent := getEvent(request)\n\tcontainerName := request.Param(\"container-name\")\n\n\terr := validateInput(event.Channel, containerName)\n\tif err != nil {\n\t\tresponse.Reply(err.Error())\n\t\treturn\n\t}\n\n\tcontainer, err := model.GetContainer(event.Team, event.Channel, containerName)\n\n\tif err != nil {\n\t\tresponse.Reply(err.Error())\n\t\treturn\n\t}\n\n\tchecks := []Check{\n\t\t{container == (model.Container{}), fmt.Sprintf(messages.Get(\"container-not-found-on-channel\"), containerName, event.Channel)},\n\t\t{container.InUseBy != \"\", fmt.Sprintf(messages.Get(\"container-in-use-by-this\"), containerName, container.InUseBy, container.UpdatedAt.Format(time.RFC1123))},\n\t\t{container.CreatedByUser != event.User, fmt.Sprintf(messages.Get(\"only-owner-can-remove\"), containerName, container.CreatedByUser)},\n\t}\n\n\terr = RunChecks(checks)\n\tif err != nil {\n\t\tresponse.Reply(err.Error())\n\t\treturn\n\t}\n\n\terr = container.Delete()\n\tif err != nil {\n\t\tresponse.Reply(err.Error())\n\t\treturn\n\t}\n\n\tresponse.Reply(fmt.Sprintf(messages.Get(\"container-removed\"), containerName))\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"crypto\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar testTaPriKey crypto.PrivateKey\nvar testTaPubKey crypto.PublicKey\n\nvar testTa *ta\n\nfunc init() {\n\tpriKey, err := rsa.GenerateKey(rand.Reader, 1024)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ttestTaPriKey = priKey\n\ttestTaPubKey = &priKey.PublicKey\n\n\ttestTa = newTa(\n\t\t\"testta\",\n\t\t\"testtaname\",\n\t\tmap[string]bool{\n\t\t\t\"https:\/\/testta.example.org\/\": true,\n\t\t\t\"https:\/\/testta.example.org\/redirect\/uri\": true,\n\t\t},\n\t\tmap[string]crypto.PublicKey{\n\t\t\t\"\": testTaPubKey,\n\t\t})\n\ttestTa.Upd = testTa.Upd.Add(-(time.Duration(testTa.Upd.Nanosecond()) % time.Millisecond)) \/\/ mongodb の粒度がミリ秒のため。\n}\n\nfunc testTaContainer(t *testing.T, taCont taContainer) {\n\tif ta_, err := taCont.get(testTa.id()); err != nil {\n\t\tt.Fatal(err)\n\t} else if !reflect.DeepEqual(ta_, testTa) {\n\t\tt.Error(ta_, testTa)\n\t}\n\n\tif ta_, err := taCont.get(testTa.id() + \"a\"); err != nil {\n\t\tt.Fatal(err)\n\t} else if ta_ != nil {\n\t\tt.Error(ta_)\n\t}\n}\nテスト用の鍵 ID を付けたpackage main\n\nimport (\n\t\"crypto\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar testTaPriKey crypto.PrivateKey\nvar testTaPubKey crypto.PublicKey\nvar testTaKid = \"testkey\"\n\nvar testTa *ta\n\nfunc init() {\n\tpriKey, err := rsa.GenerateKey(rand.Reader, 1024)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ttestTaPriKey = priKey\n\ttestTaPubKey = &priKey.PublicKey\n\n\ttestTa = newTa(\n\t\t\"testta\",\n\t\t\"testtaname\",\n\t\tmap[string]bool{\n\t\t\t\"https:\/\/testta.example.org\/\": true,\n\t\t\t\"https:\/\/testta.example.org\/redirect\/uri\": true,\n\t\t},\n\t\tmap[string]crypto.PublicKey{\n\t\t\ttestTaKid: testTaPubKey,\n\t\t})\n\ttestTa.Upd = testTa.Upd.Add(-(time.Duration(testTa.Upd.Nanosecond()) % time.Millisecond)) \/\/ mongodb の粒度がミリ秒のため。\n}\n\nfunc testTaContainer(t *testing.T, taCont taContainer) {\n\tif ta_, err := taCont.get(testTa.id()); err != nil {\n\t\tt.Fatal(err)\n\t} else if !reflect.DeepEqual(ta_, testTa) {\n\t\tt.Error(ta_, testTa)\n\t}\n\n\tif ta_, err := taCont.get(testTa.id() + \"a\"); err != nil {\n\t\tt.Fatal(err)\n\t} else if ta_ != nil {\n\t\tt.Error(ta_)\n\t}\n}\n<|endoftext|>"} {"text":"package mapping\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"os\"\n\n\t\"github.com\/omniscale\/imposm3\/element\"\n)\n\ntype Field struct {\n\tName string `json:\"name\"`\n\tKey Key `json:\"key\"`\n\tType string `json:\"type\"`\n\tArgs map[string]interface{} `json:\"args\"`\n}\n\ntype Table struct {\n\tName string\n\tType TableType `json:\"type\"`\n\tMapping map[Key][]Value `json:\"mapping\"`\n\tMappings map[string]SubMapping `json:\"mappings\"`\n\tTypeMappings TypeMappings `json:\"type_mappings\"`\n\tFields []*Field `json:\"columns\"`\n\tOldFields []*Field `json:\"fields\"`\n\tFilters *Filters `json:\"filters\"`\n}\n\ntype GeneralizedTable struct {\n\tName string\n\tSourceTableName string `json:\"source\"`\n\tTolerance float64 `json:\"tolerance\"`\n\tSqlFilter string `json:\"sql_filter\"`\n}\n\ntype Filters struct {\n\tExcludeTags *[][2]string `json:\"exclude_tags\"`\n}\n\ntype Tables map[string]*Table\n\ntype GeneralizedTables map[string]*GeneralizedTable\n\ntype Mapping struct {\n\tTables Tables `json:\"tables\"`\n\tGeneralizedTables GeneralizedTables `json:\"generalized_tables\"`\n\tTags Tags `json:\"tags\"`\n\t\/\/ SingleIdSpace mangles the overlapping node\/way\/relation IDs\n\t\/\/ to be unique (nodes positive, ways negative, relations negative -1e17)\n\tSingleIdSpace bool `json:\"use_single_id_space\"`\n}\n\ntype Tags struct {\n\tLoadAll bool `json:\"load_all\"`\n\tExclude []Key `json:\"exclude\"`\n}\n\ntype SubMapping struct {\n\tMapping map[Key][]Value\n}\n\ntype TypeMappings struct {\n\tPoints map[Key][]Value `json:\"points\"`\n\tLineStrings map[Key][]Value `json:\"linestrings\"`\n\tPolygons map[Key][]Value `json:\"polygons\"`\n}\n\ntype ElementFilter func(tags *element.Tags) bool\n\ntype TagTables map[Key]map[Value][]DestTable\n\ntype DestTable struct {\n\tName string\n\tSubMapping string\n}\n\ntype TableType string\n\nfunc (tt *TableType) UnmarshalJSON(data []byte) error {\n\tswitch string(data) {\n\tcase \"\":\n\t\treturn errors.New(\"missing table type\")\n\tcase `\"point\"`:\n\t\t*tt = PointTable\n\tcase `\"linestring\"`:\n\t\t*tt = LineStringTable\n\tcase `\"polygon\"`:\n\t\t*tt = PolygonTable\n\tcase `\"geometry\"`:\n\t\t*tt = GeometryTable\n\tdefault:\n\t\treturn errors.New(\"unknown type \" + string(data))\n\t}\n\treturn nil\n}\n\nconst (\n\tPolygonTable TableType = \"polygon\"\n\tLineStringTable TableType = \"linestring\"\n\tPointTable TableType = \"point\"\n\tGeometryTable TableType = \"geometry\"\n)\n\nfunc NewMapping(filename string) (*Mapping, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tdecoder := json.NewDecoder(f)\n\n\tmapping := Mapping{}\n\terr = decoder.Decode(&mapping)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = mapping.prepare()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &mapping, nil\n}\n\nfunc (t *Table) ExtraTags() map[Key]bool {\n\ttags := make(map[Key]bool)\n\tfor _, field := range t.Fields {\n\t\tif field.Key != \"\" {\n\t\t\ttags[field.Key] = true\n\t\t}\n\t}\n\treturn tags\n}\n\nfunc (m *Mapping) prepare() error {\n\tfor name, t := range m.Tables {\n\t\tt.Name = name\n\t\tif t.OldFields != nil {\n\t\t\t\/\/ todo deprecate 'fields'\n\t\t\tt.Fields = t.OldFields\n\t\t}\n\t}\n\n\tfor name, t := range m.GeneralizedTables {\n\t\tt.Name = name\n\t}\n\treturn nil\n}\n\nfunc (tt TagTables) addFromMapping(mapping map[Key][]Value, table DestTable) {\n\tfor key, vals := range mapping {\n\t\tfor _, v := range vals {\n\t\t\tvals, ok := tt[key]\n\t\t\tif ok {\n\t\t\t\tvals[v] = append(vals[v], table)\n\t\t\t} else {\n\t\t\t\ttt[key] = make(map[Value][]DestTable)\n\t\t\t\ttt[key][v] = append(tt[key][v], table)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (m *Mapping) mappings(tableType TableType, mappings TagTables) {\n\tfor name, t := range m.Tables {\n\t\tif t.Type != GeometryTable && t.Type != tableType {\n\t\t\tcontinue\n\t\t}\n\t\tmappings.addFromMapping(t.Mapping, DestTable{name, \"\"})\n\n\t\tfor subMappingName, subMapping := range t.Mappings {\n\t\t\tmappings.addFromMapping(subMapping.Mapping, DestTable{name, subMappingName})\n\t\t}\n\n\t\tswitch tableType {\n\t\tcase PointTable:\n\t\t\tmappings.addFromMapping(t.TypeMappings.Points, DestTable{name, \"\"})\n\t\tcase LineStringTable:\n\t\t\tmappings.addFromMapping(t.TypeMappings.LineStrings, DestTable{name, \"\"})\n\t\tcase PolygonTable:\n\t\t\tmappings.addFromMapping(t.TypeMappings.Polygons, DestTable{name, \"\"})\n\t\t}\n\t}\n}\n\nfunc (m *Mapping) tables(tableType TableType) map[string]*TableFields {\n\tresult := make(map[string]*TableFields)\n\tfor name, t := range m.Tables {\n\t\tif t.Type == tableType || t.Type == \"geometry\" {\n\t\t\tresult[name] = t.TableFields()\n\t\t}\n\t}\n\treturn result\n}\n\nfunc (m *Mapping) extraTags(tableType TableType, tags map[Key]bool) {\n\tfor _, t := range m.Tables {\n\t\tif t.Type != tableType {\n\t\t\tcontinue\n\t\t}\n\t\tfor key, _ := range t.ExtraTags() {\n\t\t\ttags[key] = true\n\t\t}\n\t\tif t.Filters != nil && t.Filters.ExcludeTags != nil {\n\t\t\tfor _, keyVal := range *t.Filters.ExcludeTags {\n\t\t\t\ttags[Key(keyVal[0])] = true\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (m *Mapping) ElementFilters() map[string][]ElementFilter {\n\tresult := make(map[string][]ElementFilter)\n\tfor name, t := range m.Tables {\n\t\tif t.Filters == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif t.Filters.ExcludeTags != nil {\n\t\t\tfor _, filterKeyVal := range *t.Filters.ExcludeTags {\n\t\t\t\tf := func(tags *element.Tags) bool {\n\t\t\t\t\tif v, ok := (*tags)[filterKeyVal[0]]; ok {\n\t\t\t\t\t\tif filterKeyVal[1] == \"__any__\" || v == filterKeyVal[1] {\n\t\t\t\t\t\t\treturn false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t\tresult[name] = append(result[name], f)\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\nadd TODO notepackage mapping\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"os\"\n\n\t\"github.com\/omniscale\/imposm3\/element\"\n)\n\ntype Field struct {\n\tName string `json:\"name\"`\n\tKey Key `json:\"key\"`\n\tType string `json:\"type\"`\n\tArgs map[string]interface{} `json:\"args\"`\n}\n\ntype Table struct {\n\tName string\n\tType TableType `json:\"type\"`\n\tMapping map[Key][]Value `json:\"mapping\"`\n\tMappings map[string]SubMapping `json:\"mappings\"`\n\tTypeMappings TypeMappings `json:\"type_mappings\"`\n\tFields []*Field `json:\"columns\"` \/\/ TODO rename Fields internaly to Columns\n\tOldFields []*Field `json:\"fields\"`\n\tFilters *Filters `json:\"filters\"`\n}\n\ntype GeneralizedTable struct {\n\tName string\n\tSourceTableName string `json:\"source\"`\n\tTolerance float64 `json:\"tolerance\"`\n\tSqlFilter string `json:\"sql_filter\"`\n}\n\ntype Filters struct {\n\tExcludeTags *[][2]string `json:\"exclude_tags\"`\n}\n\ntype Tables map[string]*Table\n\ntype GeneralizedTables map[string]*GeneralizedTable\n\ntype Mapping struct {\n\tTables Tables `json:\"tables\"`\n\tGeneralizedTables GeneralizedTables `json:\"generalized_tables\"`\n\tTags Tags `json:\"tags\"`\n\t\/\/ SingleIdSpace mangles the overlapping node\/way\/relation IDs\n\t\/\/ to be unique (nodes positive, ways negative, relations negative -1e17)\n\tSingleIdSpace bool `json:\"use_single_id_space\"`\n}\n\ntype Tags struct {\n\tLoadAll bool `json:\"load_all\"`\n\tExclude []Key `json:\"exclude\"`\n}\n\ntype SubMapping struct {\n\tMapping map[Key][]Value\n}\n\ntype TypeMappings struct {\n\tPoints map[Key][]Value `json:\"points\"`\n\tLineStrings map[Key][]Value `json:\"linestrings\"`\n\tPolygons map[Key][]Value `json:\"polygons\"`\n}\n\ntype ElementFilter func(tags *element.Tags) bool\n\ntype TagTables map[Key]map[Value][]DestTable\n\ntype DestTable struct {\n\tName string\n\tSubMapping string\n}\n\ntype TableType string\n\nfunc (tt *TableType) UnmarshalJSON(data []byte) error {\n\tswitch string(data) {\n\tcase \"\":\n\t\treturn errors.New(\"missing table type\")\n\tcase `\"point\"`:\n\t\t*tt = PointTable\n\tcase `\"linestring\"`:\n\t\t*tt = LineStringTable\n\tcase `\"polygon\"`:\n\t\t*tt = PolygonTable\n\tcase `\"geometry\"`:\n\t\t*tt = GeometryTable\n\tdefault:\n\t\treturn errors.New(\"unknown type \" + string(data))\n\t}\n\treturn nil\n}\n\nconst (\n\tPolygonTable TableType = \"polygon\"\n\tLineStringTable TableType = \"linestring\"\n\tPointTable TableType = \"point\"\n\tGeometryTable TableType = \"geometry\"\n)\n\nfunc NewMapping(filename string) (*Mapping, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tdecoder := json.NewDecoder(f)\n\n\tmapping := Mapping{}\n\terr = decoder.Decode(&mapping)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = mapping.prepare()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &mapping, nil\n}\n\nfunc (t *Table) ExtraTags() map[Key]bool {\n\ttags := make(map[Key]bool)\n\tfor _, field := range t.Fields {\n\t\tif field.Key != \"\" {\n\t\t\ttags[field.Key] = true\n\t\t}\n\t}\n\treturn tags\n}\n\nfunc (m *Mapping) prepare() error {\n\tfor name, t := range m.Tables {\n\t\tt.Name = name\n\t\tif t.OldFields != nil {\n\t\t\t\/\/ todo deprecate 'fields'\n\t\t\tt.Fields = t.OldFields\n\t\t}\n\t}\n\n\tfor name, t := range m.GeneralizedTables {\n\t\tt.Name = name\n\t}\n\treturn nil\n}\n\nfunc (tt TagTables) addFromMapping(mapping map[Key][]Value, table DestTable) {\n\tfor key, vals := range mapping {\n\t\tfor _, v := range vals {\n\t\t\tvals, ok := tt[key]\n\t\t\tif ok {\n\t\t\t\tvals[v] = append(vals[v], table)\n\t\t\t} else {\n\t\t\t\ttt[key] = make(map[Value][]DestTable)\n\t\t\t\ttt[key][v] = append(tt[key][v], table)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (m *Mapping) mappings(tableType TableType, mappings TagTables) {\n\tfor name, t := range m.Tables {\n\t\tif t.Type != GeometryTable && t.Type != tableType {\n\t\t\tcontinue\n\t\t}\n\t\tmappings.addFromMapping(t.Mapping, DestTable{name, \"\"})\n\n\t\tfor subMappingName, subMapping := range t.Mappings {\n\t\t\tmappings.addFromMapping(subMapping.Mapping, DestTable{name, subMappingName})\n\t\t}\n\n\t\tswitch tableType {\n\t\tcase PointTable:\n\t\t\tmappings.addFromMapping(t.TypeMappings.Points, DestTable{name, \"\"})\n\t\tcase LineStringTable:\n\t\t\tmappings.addFromMapping(t.TypeMappings.LineStrings, DestTable{name, \"\"})\n\t\tcase PolygonTable:\n\t\t\tmappings.addFromMapping(t.TypeMappings.Polygons, DestTable{name, \"\"})\n\t\t}\n\t}\n}\n\nfunc (m *Mapping) tables(tableType TableType) map[string]*TableFields {\n\tresult := make(map[string]*TableFields)\n\tfor name, t := range m.Tables {\n\t\tif t.Type == tableType || t.Type == \"geometry\" {\n\t\t\tresult[name] = t.TableFields()\n\t\t}\n\t}\n\treturn result\n}\n\nfunc (m *Mapping) extraTags(tableType TableType, tags map[Key]bool) {\n\tfor _, t := range m.Tables {\n\t\tif t.Type != tableType {\n\t\t\tcontinue\n\t\t}\n\t\tfor key, _ := range t.ExtraTags() {\n\t\t\ttags[key] = true\n\t\t}\n\t\tif t.Filters != nil && t.Filters.ExcludeTags != nil {\n\t\t\tfor _, keyVal := range *t.Filters.ExcludeTags {\n\t\t\t\ttags[Key(keyVal[0])] = true\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (m *Mapping) ElementFilters() map[string][]ElementFilter {\n\tresult := make(map[string][]ElementFilter)\n\tfor name, t := range m.Tables {\n\t\tif t.Filters == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif t.Filters.ExcludeTags != nil {\n\t\t\tfor _, filterKeyVal := range *t.Filters.ExcludeTags {\n\t\t\t\tf := func(tags *element.Tags) bool {\n\t\t\t\t\tif v, ok := (*tags)[filterKeyVal[0]]; ok {\n\t\t\t\t\t\tif filterKeyVal[1] == \"__any__\" || v == filterKeyVal[1] {\n\t\t\t\t\t\t\treturn false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t\tresult[name] = append(result[name], f)\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\n<|endoftext|>"} {"text":"package commands\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/github\/hub\/github\"\n\t\"github.com\/github\/hub\/utils\"\n)\n\nvar cmdClone = &Command{\n\tRun: clone,\n\tGitExtension: true,\n\tUsage: \"clone [-p] OPTIONS [USER\/]REPOSITORY DIRECTORY\",\n\tShort: \"Clone a remote repository into a new directory\",\n\tLong: `Clone repository \"git:\/\/github.com\/USER\/REPOSITORY.git\" into\nDIRECTORY as with git-clone(1). When USER\/ is omitted, assumes\nyour GitHub login. With -p, clone private repositories over SSH.\nFor repositories under your GitHub login, -p is implicit.\n`,\n}\n\nfunc init() {\n\tCmdRunner.Use(cmdClone)\n}\n\n\/**\n $ gh clone jingweno\/gh\n > git clone git:\/\/github.com\/jingweno\/gh.git\n\n $ gh clone -p jingweno\/gh\n > git clone git@github.com:jingweno\/gh.git\n\n $ gh clone jekyll_and_hyde\n > git clone git:\/\/github.com\/YOUR_LOGIN\/jekyll_and_hyde.git\n\n $ gh clone -p jekyll_and_hyde\n > git clone git@github.com:YOUR_LOGIN\/jekyll_and_hyde.git\n*\/\nfunc clone(command *Command, args *Args) {\n\tif !args.IsParamsEmpty() {\n\t\ttransformCloneArgs(args)\n\t}\n}\n\nfunc transformCloneArgs(args *Args) {\n\tisSSH := parseClonePrivateFlag(args)\n\thasValueRegxp := regexp.MustCompile(\"^(--(upload-pack|template|depth|origin|branch|reference|name)|-[ubo])$\")\n\tnameWithOwnerRegexp := regexp.MustCompile(NameWithOwnerRe)\n\tfor i := 0; i < args.ParamsSize(); i++ {\n\t\ta := args.Params[i]\n\n\t\tif strings.HasPrefix(a, \"-\") {\n\t\t\tif hasValueRegxp.MatchString(a) {\n\t\t\t\ti++\n\t\t\t}\n\t\t} else {\n\t\t\tif nameWithOwnerRegexp.MatchString(a) && !isDir(a) {\n\t\t\t\tname, owner := parseCloneNameAndOwner(a)\n\t\t\t\tvar host *github.Host\n\t\t\t\tif owner == \"\" {\n\t\t\t\t\tconfigs := github.CurrentConfigs()\n\t\t\t\t\th, err := configs.DefaultHost()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tutils.Check(github.FormatError(\"cloning repository\", err))\n\t\t\t\t\t}\n\n\t\t\t\t\thost = h\n\t\t\t\t\towner = host.User\n\t\t\t\t}\n\n\t\t\t\tvar hostStr string\n\t\t\t\tif host != nil {\n\t\t\t\t\thostStr = host.Host\n\t\t\t\t}\n\n\t\t\t\tproject := github.NewProject(owner, name, hostStr)\n\t\t\t\tif !isSSH &&\n\t\t\t\t\targs.Command != \"submodule\" &&\n\t\t\t\t\t!args.Noop &&\n\t\t\t\t\t!github.IsHttpsProtocol() {\n\t\t\t\t\tclient := github.NewClient(project.Host)\n\t\t\t\t\trepo, err := client.Repository(project)\n\t\t\t\t\tisSSH = (err == nil) && (repo.Private || repo.Permissions.Push)\n\t\t\t\t}\n\n\t\t\t\turl := project.GitURL(name, owner, isSSH)\n\t\t\t\targs.ReplaceParam(i, url)\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc parseClonePrivateFlag(args *Args) bool {\n\tif i := args.IndexOfParam(\"-p\"); i != -1 {\n\t\targs.RemoveParam(i)\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc parseCloneNameAndOwner(arg string) (name, owner string) {\n\tname, owner = arg, \"\"\n\tif strings.Contains(arg, \"\/\") {\n\t\tsplit := strings.SplitN(arg, \"\/\", 2)\n\t\tname = split[1]\n\t\towner = split[0]\n\t}\n\n\treturn\n}\nClone with noop still workspackage commands\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/github\/hub\/github\"\n\t\"github.com\/github\/hub\/utils\"\n)\n\nvar cmdClone = &Command{\n\tRun: clone,\n\tGitExtension: true,\n\tUsage: \"clone [-p] OPTIONS [USER\/]REPOSITORY DIRECTORY\",\n\tShort: \"Clone a remote repository into a new directory\",\n\tLong: `Clone repository \"git:\/\/github.com\/USER\/REPOSITORY.git\" into\nDIRECTORY as with git-clone(1). When USER\/ is omitted, assumes\nyour GitHub login. With -p, clone private repositories over SSH.\nFor repositories under your GitHub login, -p is implicit.\n`,\n}\n\nfunc init() {\n\tCmdRunner.Use(cmdClone)\n}\n\n\/**\n $ gh clone jingweno\/gh\n > git clone git:\/\/github.com\/jingweno\/gh.git\n\n $ gh clone -p jingweno\/gh\n > git clone git@github.com:jingweno\/gh.git\n\n $ gh clone jekyll_and_hyde\n > git clone git:\/\/github.com\/YOUR_LOGIN\/jekyll_and_hyde.git\n\n $ gh clone -p jekyll_and_hyde\n > git clone git@github.com:YOUR_LOGIN\/jekyll_and_hyde.git\n*\/\nfunc clone(command *Command, args *Args) {\n\tif !args.IsParamsEmpty() {\n\t\ttransformCloneArgs(args)\n\t}\n}\n\nfunc transformCloneArgs(args *Args) {\n\tisSSH := parseClonePrivateFlag(args)\n\thasValueRegxp := regexp.MustCompile(\"^(--(upload-pack|template|depth|origin|branch|reference|name)|-[ubo])$\")\n\tnameWithOwnerRegexp := regexp.MustCompile(NameWithOwnerRe)\n\tfor i := 0; i < args.ParamsSize(); i++ {\n\t\ta := args.Params[i]\n\n\t\tif strings.HasPrefix(a, \"-\") {\n\t\t\tif hasValueRegxp.MatchString(a) {\n\t\t\t\ti++\n\t\t\t}\n\t\t} else {\n\t\t\tif nameWithOwnerRegexp.MatchString(a) && !isDir(a) {\n\t\t\t\tname, owner := parseCloneNameAndOwner(a)\n\t\t\t\tvar host *github.Host\n\t\t\t\tif owner == \"\" {\n\t\t\t\t\tconfigs := github.CurrentConfigs()\n\t\t\t\t\th, err := configs.DefaultHost()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tutils.Check(github.FormatError(\"cloning repository\", err))\n\t\t\t\t\t}\n\n\t\t\t\t\thost = h\n\t\t\t\t\towner = host.User\n\t\t\t\t}\n\n\t\t\t\tvar hostStr string\n\t\t\t\tif host != nil {\n\t\t\t\t\thostStr = host.Host\n\t\t\t\t}\n\n\t\t\t\tproject := github.NewProject(owner, name, hostStr)\n\t\t\t\tif !isSSH &&\n\t\t\t\t\targs.Command != \"submodule\" &&\n\t\t\t\t\t!github.IsHttpsProtocol() {\n\t\t\t\t\tclient := github.NewClient(project.Host)\n\t\t\t\t\trepo, err := client.Repository(project)\n\t\t\t\t\tisSSH = (err == nil) && (repo.Private || repo.Permissions.Push)\n\t\t\t\t}\n\n\t\t\t\turl := project.GitURL(name, owner, isSSH)\n\t\t\t\targs.ReplaceParam(i, url)\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc parseClonePrivateFlag(args *Args) bool {\n\tif i := args.IndexOfParam(\"-p\"); i != -1 {\n\t\targs.RemoveParam(i)\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc parseCloneNameAndOwner(arg string) (name, owner string) {\n\tname, owner = arg, \"\"\n\tif strings.Contains(arg, \"\/\") {\n\t\tsplit := strings.SplitN(arg, \"\/\", 2)\n\t\tname = split[1]\n\t\towner = split[0]\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"package clc\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"github.com\/mikebeyer\/env\"\n)\n\ntype Config struct {\n\tUser User\n\tAlias string\n\tBaseURL string\n}\n\nfunc EnvConfig() Config {\n\treturn Config{\n\t\tUser: User{\n\t\t\tUsername: env.MustString(\"CLC_USERNAME\"),\n\t\t\tPassword: env.MustString(\"CLC_PASSWORD\"),\n\t\t},\n\t\tAlias: env.MustString(\"CLC_ALIAS\"),\n\t\tBaseURL: env.String(\"CLC_BASE_URL\", \"https:\/\/api.ctl.io\/v2\"),\n\t}\n}\n\ntype Client struct {\n\tconfig Config\n\tclient *http.Client\n\tbaseURL string\n}\n\nfunc New(config Config) *Client {\n\turl := config.BaseURL\n\tif url == \"\" {\n\t\turl = \"https:\/\/api.ctl.io\/v2\"\n\t}\n\treturn &Client{\n\t\tconfig: config,\n\t\tclient: http.DefaultClient,\n\t\tbaseURL: url,\n\t}\n}\n\nfunc (c *Client) Auth() (string, error) {\n\turl := fmt.Sprintf(\"%s\/authentication\/login\", c.baseURL)\n\tb, err := json.Marshal(&c.config.User)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tresp, err := http.Post(url, \"application\/json\", ioutil.NopCloser(bytes.NewReader(b)))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tauth := &Auth{}\n\tif err := json.NewDecoder(resp.Body).Decode(auth); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn auth.Token, nil\n}\n\nfunc (c *Client) get(url string, resp interface{}) error {\n\treturn c.do(\"GET\", url, nil, resp)\n}\n\nfunc (c *Client) do(method, url string, body io.Reader, resp interface{}) error {\n\treq, err := http.NewRequest(method, url, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\tres, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.NewDecoder(res.Body).Decode(resp)\n}\n\ntype User struct {\n\tUsername string `json:\"username\"`\n\tPassword string `json:\"password\"`\n}\n\ntype Auth struct {\n\tUsername string `json:\"userName\"`\n\tAlias string `json:\"accountAlias\"`\n\tLocation string `json:\"locationAlias\"`\n\tRoles []string `json:\"roles\"`\n\tToken string `json:\"bearerToken\"`\n}\nevaluate server response for non-success error codespackage clc\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"github.com\/mikebeyer\/env\"\n)\n\ntype Config struct {\n\tUser User\n\tAlias string\n\tBaseURL string\n}\n\nfunc EnvConfig() Config {\n\treturn Config{\n\t\tUser: User{\n\t\t\tUsername: env.MustString(\"CLC_USERNAME\"),\n\t\t\tPassword: env.MustString(\"CLC_PASSWORD\"),\n\t\t},\n\t\tAlias: env.MustString(\"CLC_ALIAS\"),\n\t\tBaseURL: env.String(\"CLC_BASE_URL\", \"https:\/\/api.ctl.io\/v2\"),\n\t}\n}\n\ntype Client struct {\n\tconfig Config\n\tclient *http.Client\n\tbaseURL string\n}\n\nfunc New(config Config) *Client {\n\turl := config.BaseURL\n\tif url == \"\" {\n\t\turl = \"https:\/\/api.ctl.io\/v2\"\n\t}\n\treturn &Client{\n\t\tconfig: config,\n\t\tclient: http.DefaultClient,\n\t\tbaseURL: url,\n\t}\n}\n\nfunc (c *Client) Auth() (string, error) {\n\turl := fmt.Sprintf(\"%s\/authentication\/login\", c.baseURL)\n\tb, err := json.Marshal(&c.config.User)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tresp, err := http.Post(url, \"application\/json\", ioutil.NopCloser(bytes.NewReader(b)))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tauth := &Auth{}\n\tif err := json.NewDecoder(resp.Body).Decode(auth); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn auth.Token, nil\n}\n\nfunc (c *Client) get(url string, resp interface{}) error {\n\treturn c.do(\"GET\", url, nil, resp)\n}\n\nfunc (c *Client) do(method, url string, body io.Reader, resp interface{}) error {\n\treq, err := http.NewRequest(method, url, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\tres, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif res.StatusCode >= 300 {\n\t\treturn errors.New(fmt.Sprintf(\"http error: %s\", res.Status))\n\t}\n\n\treturn json.NewDecoder(res.Body).Decode(resp)\n}\n\ntype User struct {\n\tUsername string `json:\"username\"`\n\tPassword string `json:\"password\"`\n}\n\ntype Auth struct {\n\tUsername string `json:\"userName\"`\n\tAlias string `json:\"accountAlias\"`\n\tLocation string `json:\"locationAlias\"`\n\tRoles []string `json:\"roles\"`\n\tToken string `json:\"bearerToken\"`\n}\n<|endoftext|>"} {"text":"package cli\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\n\/\/ WriteDemoConfig writes a toml file with the given values.\n\/\/ It returns the RootDir the config.toml file is stored in,\n\/\/ or an error if writing was impossible\nfunc WriteDemoConfig(vals map[string]string) (string, error) {\n\tcdir, err := ioutil.TempDir(\"\", \"test-cli\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdata := \"\"\n\tfor k, v := range vals {\n\t\tdata = data + fmt.Sprintf(\"%s = \\\"%s\\\"\\n\", k, v)\n\t}\n\tcfile := filepath.Join(cdir, \"config.toml\")\n\terr = ioutil.WriteFile(cfile, []byte(data), 0666)\n\treturn cdir, err\n}\n\n\/\/ RunWithArgs executes the given command with the specified command line args\n\/\/ and environmental variables set. It returns any error returned from cmd.Execute()\nfunc RunWithArgs(cmd Executable, args []string, env map[string]string) error {\n\toargs := os.Args\n\toenv := map[string]string{}\n\t\/\/ defer returns the environment back to normal\n\tdefer func() {\n\t\tos.Args = oargs\n\t\tfor k, v := range oenv {\n\t\t\tos.Setenv(k, v)\n\t\t}\n\t}()\n\n\t\/\/ set the args and env how we want them\n\tos.Args = args\n\tfor k, v := range env {\n\t\t\/\/ backup old value if there, to restore at end\n\t\toenv[k] = os.Getenv(k)\n\t\terr := os.Setenv(k, v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ and finally run the command\n\treturn cmd.Execute()\n}\n\n\/\/ RunCaptureWithArgs executes the given command with the specified command\n\/\/ line args and environmental variables set. It returns string fields\n\/\/ representing output written to stdout and stderr, additionally any error\n\/\/ from cmd.Execute() is also returned\nfunc RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) {\n\toldout, olderr := os.Stdout, os.Stderr \/\/ keep backup of the real stdout\n\trOut, wOut, _ := os.Pipe()\n\trErr, wErr, _ := os.Pipe()\n\tos.Stdout, os.Stderr = wOut, wErr\n\tdefer func() {\n\t\tos.Stdout, os.Stderr = oldout, olderr \/\/ restoring the real stdout\n\t}()\n\n\t\/\/ copy the output in a separate goroutine so printing can't block indefinitely\n\tcopyStd := func(reader *os.File) *(chan string) {\n\t\tstdC := make(chan string)\n\t\tgo func() {\n\t\t\tvar buf bytes.Buffer\n\t\t\t\/\/ io.Copy will end when we call reader.Close() below\n\t\t\tio.Copy(&buf, *reader)\n\t\t\tstdC <- buf.String()\n\t\t}()\n\t\treturn stdC\n\t}\n\toutC := copyStd(&rOut)\n\terrC := copyStd(&rErr)\n\n\t\/\/ now run the command\n\terr = RunWithArgs(cmd, args, env)\n\n\t\/\/ and grab the stdout to return\n\twOut.Close()\n\twErr.Close()\n\tstdout = <-outC\n\tstderr = <-errC\n\treturn stdout, stderr, err\n}\nquickfixpackage cli\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\n\/\/ WriteDemoConfig writes a toml file with the given values.\n\/\/ It returns the RootDir the config.toml file is stored in,\n\/\/ or an error if writing was impossible\nfunc WriteDemoConfig(vals map[string]string) (string, error) {\n\tcdir, err := ioutil.TempDir(\"\", \"test-cli\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdata := \"\"\n\tfor k, v := range vals {\n\t\tdata = data + fmt.Sprintf(\"%s = \\\"%s\\\"\\n\", k, v)\n\t}\n\tcfile := filepath.Join(cdir, \"config.toml\")\n\terr = ioutil.WriteFile(cfile, []byte(data), 0666)\n\treturn cdir, err\n}\n\n\/\/ RunWithArgs executes the given command with the specified command line args\n\/\/ and environmental variables set. It returns any error returned from cmd.Execute()\nfunc RunWithArgs(cmd Executable, args []string, env map[string]string) error {\n\toargs := os.Args\n\toenv := map[string]string{}\n\t\/\/ defer returns the environment back to normal\n\tdefer func() {\n\t\tos.Args = oargs\n\t\tfor k, v := range oenv {\n\t\t\tos.Setenv(k, v)\n\t\t}\n\t}()\n\n\t\/\/ set the args and env how we want them\n\tos.Args = args\n\tfor k, v := range env {\n\t\t\/\/ backup old value if there, to restore at end\n\t\toenv[k] = os.Getenv(k)\n\t\terr := os.Setenv(k, v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ and finally run the command\n\treturn cmd.Execute()\n}\n\n\/\/ RunCaptureWithArgs executes the given command with the specified command\n\/\/ line args and environmental variables set. It returns string fields\n\/\/ representing output written to stdout and stderr, additionally any error\n\/\/ from cmd.Execute() is also returned\nfunc RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) {\n\toldout, olderr := os.Stdout, os.Stderr \/\/ keep backup of the real stdout\n\trOut, wOut, _ := os.Pipe()\n\trErr, wErr, _ := os.Pipe()\n\tos.Stdout, os.Stderr = wOut, wErr\n\tdefer func() {\n\t\tos.Stdout, os.Stderr = oldout, olderr \/\/ restoring the real stdout\n\t}()\n\n\t\/\/ copy the output in a separate goroutine so printing can't block indefinitely\n\tcopyStd := func(reader *os.File) *(chan string) {\n\t\tstdC := make(chan string)\n\t\tgo func() {\n\t\t\tvar buf bytes.Buffer\n\t\t\t\/\/ io.Copy will end when we call reader.Close() below\n\t\t\tio.Copy(&buf, reader)\n\t\t\tstdC <- buf.String()\n\t\t}()\n\t\treturn &stdC\n\t}\n\toutC := copyStd(rOut)\n\terrC := copyStd(rErr)\n\n\t\/\/ now run the command\n\terr = RunWithArgs(cmd, args, env)\n\n\t\/\/ and grab the stdout to return\n\twOut.Close()\n\twErr.Close()\n\tstdout = <-*outC\n\tstderr = <-*errC\n\treturn stdout, stderr, err\n}\n<|endoftext|>"} {"text":"package kafka\n\nimport (\n\t\"github.com\/Shopify\/sarama\"\n\tlog \"github.com\/funkygao\/log4go\"\n)\n\n\/\/ Producer is a kafka producer that is transparent for sync\/async mode.\ntype Producer struct {\n\tcf *Config\n\tname string\n\tbrokers []string\n\tstopper chan struct{}\n\n\tp sarama.SyncProducer\n\tap sarama.AsyncProducer\n\n\tsendMessage func(*sarama.ProducerMessage) error\n\n\tonError func(*sarama.ProducerError)\n\tonSuccess func(*sarama.ProducerMessage)\n}\n\nfunc NewProducer(name string, brokers []string, cf *Config) *Producer {\n\tp := &Producer{\n\t\tname: name,\n\t\tbrokers: brokers,\n\t\tcf: cf,\n\t\tstopper: make(chan struct{}),\n\t}\n\n\treturn p\n}\n\nfunc (p *Producer) Start() error {\n\tvar err error\n\tif p.cf.async {\n\t\tp.ap, err = sarama.NewAsyncProducer(p.brokers, p.cf.Sarama)\n\t\tp.sendMessage = p.asyncSend\n\t} else {\n\t\tp.p, err = sarama.NewSyncProducer(p.brokers, p.cf.Sarama)\n\t\tp.sendMessage = p.syncSend\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !p.cf.async {\n\t\treturn nil\n\t}\n\n\tif p.onError == nil || p.onSuccess == nil {\n\t\treturn ErrNotReady\n\t}\n\n\tgo func() {\n\t\t\/\/ loop till Producer success channel closed\n\t\terrChan := p.ap.Errors()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase msg, ok := <-p.ap.Successes():\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Trace(\"[%s] success chan closed\", p.name)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tp.onSuccess(msg)\n\n\t\t\tcase err, ok := <-errChan:\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Trace(\"[%s] err chan closed\", p.name)\n\t\t\t\t\terrChan = nil\n\t\t\t\t}\n\n\t\t\t\tp.onError(err)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\n\/\/ Close will drain and close the Producer.\nfunc (p *Producer) Close() error {\n\tclose(p.stopper)\n\n\tif p.cf.async {\n\t\tp.ap.AsyncClose()\n\n\t\t\/\/ drain successes\n\t\tif p.onSuccess != nil {\n\t\t\tfor msg := range p.ap.Successes() {\n\t\t\t\tp.onSuccess(msg)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ drain errors\n\t\tif p.onError != nil {\n\t\t\tfor err := range p.ap.Errors() {\n\t\t\t\tp.onError(err)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn p.p.Close()\n}\n\nfunc (p *Producer) ClientID() string {\n\treturn p.cf.Sarama.ClientID\n}\n\nfunc (p *Producer) SetErrorHandler(f func(err *sarama.ProducerError)) error {\n\tif !p.cf.async {\n\t\treturn ErrNotAllowed\n\t}\n\n\tif f == nil {\n\t\tp.cf.Sarama.Producer.Return.Errors = false\n\t}\n\tp.onError = f\n\treturn nil\n}\n\nfunc (p *Producer) SetSuccessHandler(f func(err *sarama.ProducerMessage)) error {\n\tif !p.cf.async {\n\t\treturn ErrNotAllowed\n\t}\n\n\tif f == nil {\n\t\tp.cf.Sarama.Producer.Return.Successes = false\n\t}\n\tp.onSuccess = f\n\treturn nil\n}\n\n\/\/ Send will send a kafka message.\nfunc (p *Producer) Send(m *sarama.ProducerMessage) error {\n\treturn p.sendMessage(m)\n}\n\nfunc (p *Producer) asyncSend(m *sarama.ProducerMessage) error {\n\tlog.Debug(\"[%s] async sending: %+v\", p.name, m)\n\n\tselect {\n\tcase <-p.stopper:\n\t\treturn ErrStopping\n\n\tcase p.ap.Input() <- m:\n\t}\n\treturn nil\n}\n\nfunc (p *Producer) syncSend(m *sarama.ProducerMessage) error {\n\tlog.Debug(\"[%s] sync sending: %+v\", p.name, m)\n\n\t_, _, err := p.p.SendMessage(m)\n\treturn err\n}\nBUG FIX: when shutdown, err handler receives nil errpackage kafka\n\nimport (\n\t\"github.com\/Shopify\/sarama\"\n\tlog \"github.com\/funkygao\/log4go\"\n)\n\n\/\/ Producer is a kafka producer that is transparent for sync\/async mode.\ntype Producer struct {\n\tcf *Config\n\tname string\n\tbrokers []string\n\tstopper chan struct{}\n\n\tp sarama.SyncProducer\n\tap sarama.AsyncProducer\n\n\tsendMessage func(*sarama.ProducerMessage) error\n\n\tonError func(*sarama.ProducerError)\n\tonSuccess func(*sarama.ProducerMessage)\n}\n\nfunc NewProducer(name string, brokers []string, cf *Config) *Producer {\n\tp := &Producer{\n\t\tname: name,\n\t\tbrokers: brokers,\n\t\tcf: cf,\n\t\tstopper: make(chan struct{}),\n\t}\n\n\treturn p\n}\n\nfunc (p *Producer) Start() error {\n\tvar err error\n\tif p.cf.async {\n\t\tp.ap, err = sarama.NewAsyncProducer(p.brokers, p.cf.Sarama)\n\t\tp.sendMessage = p.asyncSend\n\t} else {\n\t\tp.p, err = sarama.NewSyncProducer(p.brokers, p.cf.Sarama)\n\t\tp.sendMessage = p.syncSend\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !p.cf.async {\n\t\treturn nil\n\t}\n\n\tif p.onError == nil || p.onSuccess == nil {\n\t\treturn ErrNotReady\n\t}\n\n\tgo func() {\n\t\t\/\/ loop till Producer success channel closed\n\t\terrChan := p.ap.Errors()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase msg, ok := <-p.ap.Successes():\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Trace(\"[%s] success chan closed\", p.name)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tp.onSuccess(msg)\n\n\t\t\tcase err, ok := <-errChan:\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Trace(\"[%s] err chan closed\", p.name)\n\t\t\t\t\terrChan = nil\n\t\t\t\t} else {\n\t\t\t\t\tp.onError(err)\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\n\/\/ Close will drain and close the Producer.\nfunc (p *Producer) Close() error {\n\tclose(p.stopper)\n\n\tif p.cf.async {\n\t\tp.ap.AsyncClose()\n\n\t\t\/\/ drain successes\n\t\tif p.onSuccess != nil {\n\t\t\tfor msg := range p.ap.Successes() {\n\t\t\t\tp.onSuccess(msg)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ drain errors\n\t\tif p.onError != nil {\n\t\t\tfor err := range p.ap.Errors() {\n\t\t\t\tp.onError(err)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn p.p.Close()\n}\n\nfunc (p *Producer) ClientID() string {\n\treturn p.cf.Sarama.ClientID\n}\n\n\/\/ SetErrorHandler setup the async producer unretriable errors, e.g:\n\/\/ ErrInvalidPartition, ErrMessageSizeTooLarge, ErrIncompleteResponse\n\/\/ ErrBreakerOpen(e,g. update leader fails)\nfunc (p *Producer) SetErrorHandler(f func(err *sarama.ProducerError)) error {\n\tif !p.cf.async {\n\t\treturn ErrNotAllowed\n\t}\n\n\tif f == nil {\n\t\tp.cf.Sarama.Producer.Return.Errors = false\n\t}\n\tp.onError = f\n\treturn nil\n}\n\nfunc (p *Producer) SetSuccessHandler(f func(err *sarama.ProducerMessage)) error {\n\tif !p.cf.async {\n\t\treturn ErrNotAllowed\n\t}\n\n\tif f == nil {\n\t\tp.cf.Sarama.Producer.Return.Successes = false\n\t}\n\tp.onSuccess = f\n\treturn nil\n}\n\n\/\/ Send will send a kafka message.\nfunc (p *Producer) Send(m *sarama.ProducerMessage) error {\n\treturn p.sendMessage(m)\n}\n\nfunc (p *Producer) asyncSend(m *sarama.ProducerMessage) error {\n\tlog.Debug(\"[%s] async sending: %+v\", p.name, m)\n\n\tselect {\n\tcase <-p.stopper:\n\t\treturn ErrStopping\n\n\tcase p.ap.Input() <- m:\n\t}\n\treturn nil\n}\n\nfunc (p *Producer) syncSend(m *sarama.ProducerMessage) error {\n\tlog.Debug(\"[%s] sync sending: %+v\", p.name, m)\n\n\t_, _, err := p.p.SendMessage(m)\n\treturn err\n}\n<|endoftext|>"} {"text":"package model\n\nimport (\n\t\"encoding\/json\"\n\t\"testing\"\n)\n\nfunc makeRowsEvent() *RowsEvent {\n\treturn &RowsEvent{\n\t\tLog: \"mysql-bin.0001\",\n\t\tPosition: 498876,\n\t\tSchema: \"mydabase\",\n\t\tTable: \"user_account\",\n\t\tAction: \"I\",\n\t\tTimestamp: 1486554654,\n\t\tRows: [][]interface{}{{\"user\", 15, \"hello world\"}},\n\t}\n}\n\nfunc TestRowsEventEncode(t *testing.T) {\n\tr := makeRowsEvent()\n\tb, err := r.Encode()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif string(b) != `{\"log\":\"mysql-bin.0001\",\"pos\":498876,\"db\":\"mydabase\",\"tbl\":\"user_account\",\"dml\":\"I\",\"ts\":1486554654,\"rows\":[[\"user\",15,\"hello world\"]]}` {\n\t\tt.Fatal(\"encoded wrong:\" + string(b))\n\t}\n}\n\nfunc BenchmarkRowsEventEncode(b *testing.B) {\n\tr := makeRowsEvent()\n\tfor i := 0; i < b.N; i++ {\n\t\tr.Encode()\n\t}\n}\n\nfunc BenchmarkRowsEventLength(b *testing.B) {\n\tr := makeRowsEvent()\n\tfor i := 0; i < b.N; i++ {\n\t\tr.Length()\n\t}\n}\n\nfunc BenchmarkJsonEncodeRowsEvent(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tr := makeRowsEvent()\n\t\tjson.Marshal(r)\n\t}\n}\n\nfunc BenchmarkRowsEventJsonMarshalFF(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tr := makeRowsEvent()\n\t\tr.MarshalJSON()\n\t}\n}\nmsgpack have little gains compared with ffjsonpackage model\n\nimport (\n\t\"encoding\/json\"\n\t\"testing\"\n\n\t\"gopkg.in\/vmihailenco\/msgpack.v2\"\n)\n\nfunc makeRowsEvent() *RowsEvent {\n\treturn &RowsEvent{\n\t\tLog: \"mysql-bin.0001\",\n\t\tPosition: 498876,\n\t\tSchema: \"mydabase\",\n\t\tTable: \"user_account\",\n\t\tAction: \"I\",\n\t\tTimestamp: 1486554654,\n\t\tRows: [][]interface{}{{\"user\", 15, \"hello world\"}},\n\t}\n}\n\nfunc TestRowsEventEncode(t *testing.T) {\n\tr := makeRowsEvent()\n\tb, err := r.Encode()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif string(b) != `{\"log\":\"mysql-bin.0001\",\"pos\":498876,\"db\":\"mydabase\",\"tbl\":\"user_account\",\"dml\":\"I\",\"ts\":1486554654,\"rows\":[[\"user\",15,\"hello world\"]]}` {\n\t\tt.Fatal(\"encoded wrong:\" + string(b))\n\t}\n}\n\nfunc BenchmarkRowsEventEncode(b *testing.B) {\n\tr := makeRowsEvent()\n\tfor i := 0; i < b.N; i++ {\n\t\tr.Encode()\n\t}\n}\n\nfunc BenchmarkRowsEventLength(b *testing.B) {\n\tr := makeRowsEvent()\n\tfor i := 0; i < b.N; i++ {\n\t\tr.Length()\n\t}\n}\n\nfunc BenchmarkJsonEncodeRowsEvent(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tr := makeRowsEvent()\n\t\tjson.Marshal(r)\n\t}\n}\n\nfunc BenchmarkMsgpackEncodeRowsEvent(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tr := makeRowsEvent()\n\t\tmsgpack.Marshal(r)\n\t}\n}\n\nfunc BenchmarkRowsEventJsonMarshalFF(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tr := makeRowsEvent()\n\t\tr.MarshalJSON()\n\t}\n}\n<|endoftext|>"} {"text":"package ticker\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/bitfinexcom\/bitfinex-api-go\/pkg\/convert\"\n)\n\ntype Ticker struct {\n\tSymbol string\n\tFrr float64\n\tBid float64\n\tBidPeriod int64\n\tBidSize float64\n\tAsk float64\n\tAskPeriod int64\n\tAskSize float64\n\tDailyChange float64\n\tDailyChangePerc float64\n\tLastPrice float64\n\tVolume float64\n\tHigh float64\n\tLow float64\n}\n\ntype Update Ticker\ntype Snapshot struct {\n\tSnapshot []*Ticker\n}\n\nfunc SnapshotFromRaw(symbol string, raw [][]interface{}) (*Snapshot, error) {\n\tif len(raw) == 0 {\n\t\treturn nil, fmt.Errorf(\"data slice too short for ticker snapshot: %#v\", raw)\n\t}\n\n\tsnap := make([]*Ticker, 0)\n\tfor _, f := range raw {\n\t\tc, err := FromRaw(symbol, f)\n\t\tif err == nil {\n\t\t\tsnap = append(snap, c)\n\t\t}\n\t}\n\n\treturn &Snapshot{Snapshot: snap}, nil\n}\n\nfunc FromRaw(symbol string, raw []interface{}) (t *Ticker, err error) {\n\tif len(raw) < 10 {\n\t\treturn t, fmt.Errorf(\"data slice too short for ticker, expected %d got %d: %#v\", 10, len(raw), raw)\n\t}\n\n\t\/\/ funding currency ticker\n\t\/\/ ignore bid\/ask period for now\n\tif len(raw) == 13 {\n\t\tt = &Ticker{\n\t\t\tSymbol: symbol,\n\t\t\tBid: convert.F64ValOrZero(raw[1]),\n\t\t\tBidSize: convert.F64ValOrZero(raw[2]),\n\t\t\tAsk: convert.F64ValOrZero(raw[4]),\n\t\t\tAskSize: convert.F64ValOrZero(raw[5]),\n\t\t\tDailyChange: convert.F64ValOrZero(raw[7]),\n\t\t\tDailyChangePerc: convert.F64ValOrZero(raw[8]),\n\t\t\tLastPrice: convert.F64ValOrZero(raw[9]),\n\t\t\tVolume: convert.F64ValOrZero(raw[10]),\n\t\t\tHigh: convert.F64ValOrZero(raw[11]),\n\t\t\tLow: convert.F64ValOrZero(raw[12]),\n\t\t}\n\t\treturn\n\t}\n\n\tif len(raw) == 16 {\n\t\tt = &Ticker{\n\t\t\tSymbol: symbol,\n\t\t\tFrr: convert.F64ValOrZero(raw[0]),\n\t\t\tBid: convert.F64ValOrZero(raw[1]),\n\t\t\tBidPeriod: convert.I64ValOrZero(raw[2]),\n\t\t\tBidSize: convert.F64ValOrZero(raw[3]),\n\t\t\tAsk: convert.F64ValOrZero(raw[4]),\n\t\t\tAskPeriod: convert.I64ValOrZero(raw[5]),\n\t\t\tAskSize: convert.F64ValOrZero(raw[6]),\n\t\t\tDailyChange: convert.F64ValOrZero(raw[7]),\n\t\t\tDailyChangePerc: convert.F64ValOrZero(raw[8]),\n\t\t\tLastPrice: convert.F64ValOrZero(raw[9]),\n\t\t\tVolume: convert.F64ValOrZero(raw[10]),\n\t\t\tHigh: convert.F64ValOrZero(raw[11]),\n\t\t\tLow: convert.F64ValOrZero(raw[12]),\n\t\t}\n\t\treturn\n\t}\n\n\t\/\/ all other tickers\n\t\/\/ on trading pairs (ex. tBTCUSD)\n\tt = &Ticker{\n\t\tSymbol: symbol,\n\t\tBid: convert.F64ValOrZero(raw[0]),\n\t\tBidSize: convert.F64ValOrZero(raw[1]),\n\t\tAsk: convert.F64ValOrZero(raw[2]),\n\t\tAskSize: convert.F64ValOrZero(raw[3]),\n\t\tDailyChange: convert.F64ValOrZero(raw[4]),\n\t\tDailyChangePerc: convert.F64ValOrZero(raw[5]),\n\t\tLastPrice: convert.F64ValOrZero(raw[6]),\n\t\tVolume: convert.F64ValOrZero(raw[7]),\n\t\tHigh: convert.F64ValOrZero(raw[8]),\n\t\tLow: convert.F64ValOrZero(raw[9]),\n\t}\n\treturn\n}\n\nfunc FromRestRaw(raw []interface{}) (t *Ticker, err error) {\n\tif len(raw) == 0 {\n\t\treturn t, fmt.Errorf(\"data slice too short for ticker\")\n\t}\n\n\treturn FromRaw(raw[0].(string), raw[1:])\n}\nupdating ticker model key\/value pairs and mapping logic as per docspackage ticker\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/bitfinexcom\/bitfinex-api-go\/pkg\/convert\"\n)\n\ntype Ticker struct {\n\tSymbol string\n\tFrr float64\n\tBid float64\n\tBidPeriod int64\n\tBidSize float64\n\tAsk float64\n\tAskPeriod int64\n\tAskSize float64\n\tDailyChange float64\n\tDailyChangeRelative float64\n\tLastPrice float64\n\tVolume float64\n\tHigh float64\n\tLow float64\n\t\/\/ PLACEHOLDER,\n\t\/\/ PLACEHOLDER,\n\tFrrAmountAvailable float64\n}\n\ntype Update Ticker\ntype Snapshot struct {\n\tSnapshot []*Ticker\n}\n\nfunc SnapshotFromRaw(symbol string, raw [][]interface{}) (*Snapshot, error) {\n\tif len(raw) == 0 {\n\t\treturn nil, fmt.Errorf(\"data slice too short for ticker snapshot: %#v\", raw)\n\t}\n\n\tsnap := make([]*Ticker, 0)\n\tfor _, f := range raw {\n\t\tc, err := FromRaw(symbol, f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsnap = append(snap, c)\n\t}\n\n\treturn &Snapshot{Snapshot: snap}, nil\n}\n\nfunc FromRaw(symbol string, raw []interface{}) (t *Ticker, err error) {\n\tif strings.HasPrefix(symbol, \"t\") && len(raw) >= 10 {\n\t\tt = &Ticker{\n\t\t\tSymbol: symbol,\n\t\t\tBid: convert.F64ValOrZero(raw[0]),\n\t\t\tBidSize: convert.F64ValOrZero(raw[1]),\n\t\t\tAsk: convert.F64ValOrZero(raw[2]),\n\t\t\tAskSize: convert.F64ValOrZero(raw[3]),\n\t\t\tDailyChange: convert.F64ValOrZero(raw[4]),\n\t\t\tDailyChangeRelative: convert.F64ValOrZero(raw[5]),\n\t\t\tLastPrice: convert.F64ValOrZero(raw[6]),\n\t\t\tVolume: convert.F64ValOrZero(raw[7]),\n\t\t\tHigh: convert.F64ValOrZero(raw[8]),\n\t\t\tLow: convert.F64ValOrZero(raw[9]),\n\t\t}\n\t\treturn\n\t}\n\n\tif strings.HasPrefix(symbol, \"f\") && len(raw) >= 16 {\n\t\tt = &Ticker{\n\t\t\tSymbol: symbol,\n\t\t\tFrr: convert.F64ValOrZero(raw[0]),\n\t\t\tBid: convert.F64ValOrZero(raw[1]),\n\t\t\tBidPeriod: convert.I64ValOrZero(raw[2]),\n\t\t\tBidSize: convert.F64ValOrZero(raw[3]),\n\t\t\tAsk: convert.F64ValOrZero(raw[4]),\n\t\t\tAskPeriod: convert.I64ValOrZero(raw[5]),\n\t\t\tAskSize: convert.F64ValOrZero(raw[6]),\n\t\t\tDailyChange: convert.F64ValOrZero(raw[7]),\n\t\t\tDailyChangeRelative: convert.F64ValOrZero(raw[8]),\n\t\t\tLastPrice: convert.F64ValOrZero(raw[9]),\n\t\t\tVolume: convert.F64ValOrZero(raw[10]),\n\t\t\tHigh: convert.F64ValOrZero(raw[11]),\n\t\t\tLow: convert.F64ValOrZero(raw[12]),\n\t\t\tFrrAmountAvailable: convert.F64ValOrZero(raw[15]),\n\t\t}\n\t\treturn\n\t}\n\n\terr = fmt.Errorf(\"unrecognized data slice format for pair:%s, date:%#v\", symbol, raw)\n\treturn\n}\n\nfunc FromRestRaw(raw []interface{}) (t *Ticker, err error) {\n\tif len(raw) == 0 {\n\t\treturn t, fmt.Errorf(\"data slice too short for ticker\")\n\t}\n\n\treturn FromRaw(raw[0].(string), raw[1:])\n}\n\n\/\/ FromWSRaw - based on condition will return snapshot of trades or single trade\nfunc FromWSRaw(symbol string, data []interface{}) (interface{}, error) {\n\t_, isSnapshot := data[0].([]interface{})\n\tif isSnapshot {\n\t\treturn SnapshotFromRaw(symbol, convert.ToInterfaceArray(data))\n\t}\n\treturn FromRaw(symbol, data)\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage nfs\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/container-storage-interface\/spec\/lib\/go\/csi\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/status\"\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/kubernetes\/pkg\/volume\"\n\t\"k8s.io\/utils\/mount\"\n)\n\n\/\/ NodeServer driver\ntype NodeServer struct {\n\tDriver *Driver\n\tmounter mount.Interface\n}\n\n\/\/ NodePublishVolume mount the volume\nfunc (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {\n\tif req.GetVolumeCapability() == nil {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Volume capability missing in request\")\n\t}\n\tvolumeID := req.GetVolumeId()\n\tif len(volumeID) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Volume ID missing in request\")\n\t}\n\ttargetPath := req.GetTargetPath()\n\tif len(targetPath) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Target path not provided\")\n\t}\n\n\tnotMnt, err := ns.mounter.IsLikelyNotMountPoint(targetPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tif err := os.MkdirAll(targetPath, 0750); err != nil {\n\t\t\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t\t\t}\n\t\t\tnotMnt = true\n\t\t} else {\n\t\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t\t}\n\t}\n\tif !notMnt {\n\t\treturn &csi.NodePublishVolumeResponse{}, nil\n\t}\n\n\tmountOptions := req.GetVolumeCapability().GetMount().GetMountFlags()\n\tif req.GetReadonly() {\n\t\tmountOptions = append(mountOptions, \"ro\")\n\t}\n\n\ts := req.GetVolumeContext()[paramServer]\n\tep := req.GetVolumeContext()[paramShare]\n\tsource := fmt.Sprintf(\"%s:%s\", s, ep)\n\n\tklog.V(2).Infof(\"NodePublishVolume: volumeID(%v) source(%s) targetPath(%s) mountflags(%v)\", volumeID, source, targetPath, mountOptions)\n\terr = ns.mounter.Mount(source, targetPath, \"nfs\", mountOptions)\n\tif err != nil {\n\t\tif os.IsPermission(err) {\n\t\t\treturn nil, status.Error(codes.PermissionDenied, err.Error())\n\t\t}\n\t\tif strings.Contains(err.Error(), \"invalid argument\") {\n\t\t\treturn nil, status.Error(codes.InvalidArgument, err.Error())\n\t\t}\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\tif ns.Driver.perm != nil {\n\t\tif err := os.Chmod(targetPath, os.FileMode(*ns.Driver.perm)); err != nil {\n\t\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t\t}\n\t}\n\n\treturn &csi.NodePublishVolumeResponse{}, nil\n}\n\n\/\/ NodeUnpublishVolume unmount the volume\nfunc (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {\n\tvolumeID := req.GetVolumeId()\n\tif len(volumeID) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Volume ID missing in request\")\n\t}\n\ttargetPath := req.GetTargetPath()\n\tif len(targetPath) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Target path missing in request\")\n\t}\n\tnotMnt, err := ns.mounter.IsLikelyNotMountPoint(targetPath)\n\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, status.Error(codes.NotFound, \"Targetpath not found\")\n\t\t}\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\tif notMnt {\n\t\treturn nil, status.Error(codes.NotFound, \"Volume not mounted\")\n\t}\n\n\tklog.V(2).Infof(\"NodeUnpublishVolume: CleanupMountPoint %s on volumeID(%s)\", targetPath, volumeID)\n\terr = mount.CleanupMountPoint(targetPath, ns.mounter, false)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\treturn &csi.NodeUnpublishVolumeResponse{}, nil\n}\n\n\/\/ NodeGetInfo return info of the node on which this plugin is running\nfunc (ns *NodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {\n\treturn &csi.NodeGetInfoResponse{\n\t\tNodeId: ns.Driver.nodeID,\n\t}, nil\n}\n\n\/\/ NodeGetCapabilities return the capabilities of the Node plugin\nfunc (ns *NodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {\n\treturn &csi.NodeGetCapabilitiesResponse{\n\t\tCapabilities: ns.Driver.nscap,\n\t}, nil\n}\n\n\/\/ NodeGetVolumeStats get volume stats\nfunc (ns *NodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tif len(req.VolumeId) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume ID was empty\")\n\t}\n\tif len(req.VolumePath) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume path was empty\")\n\t}\n\n\t_, err := os.Stat(req.VolumePath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, status.Errorf(codes.NotFound, \"path %s does not exist\", req.VolumePath)\n\t\t}\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to stat file %s: %v\", req.VolumePath, err)\n\t}\n\n\tvolumeMetrics, err := volume.NewMetricsStatFS(req.VolumePath).GetMetrics()\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to get metrics: %v\", err)\n\t}\n\n\tavailable, ok := volumeMetrics.Available.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume available size(%v)\", volumeMetrics.Available)\n\t}\n\tcapacity, ok := volumeMetrics.Capacity.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume capacity size(%v)\", volumeMetrics.Capacity)\n\t}\n\tused, ok := volumeMetrics.Used.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume used size(%v)\", volumeMetrics.Used)\n\t}\n\n\tinodesFree, ok := volumeMetrics.InodesFree.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes free(%v)\", volumeMetrics.InodesFree)\n\t}\n\tinodes, ok := volumeMetrics.Inodes.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes(%v)\", volumeMetrics.Inodes)\n\t}\n\tinodesUsed, ok := volumeMetrics.InodesUsed.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes used(%v)\", volumeMetrics.InodesUsed)\n\t}\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\tAvailable: available,\n\t\t\t\tTotal: capacity,\n\t\t\t\tUsed: used,\n\t\t\t},\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t\tAvailable: inodesFree,\n\t\t\t\tTotal: inodes,\n\t\t\t\tUsed: inodesUsed,\n\t\t\t},\n\t\t},\n\t}, nil\n}\n\n\/\/ NodeUnstageVolume unstage volume\nfunc (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {\n\treturn &csi.NodeUnstageVolumeResponse{}, nil\n}\n\n\/\/ NodeStageVolume stage volume\nfunc (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {\n\treturn &csi.NodeStageVolumeResponse{}, nil\n}\n\n\/\/ NodeExpandVolume node expand volume\nfunc (ns *NodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}\n\nfunc makeDir(pathname string) error {\n\terr := os.MkdirAll(pathname, os.FileMode(0755))\n\tif err != nil {\n\t\tif !os.IsExist(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\ncleanup: disable NodeStageVolume\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage nfs\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/container-storage-interface\/spec\/lib\/go\/csi\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/status\"\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/kubernetes\/pkg\/volume\"\n\t\"k8s.io\/utils\/mount\"\n)\n\n\/\/ NodeServer driver\ntype NodeServer struct {\n\tDriver *Driver\n\tmounter mount.Interface\n}\n\n\/\/ NodePublishVolume mount the volume\nfunc (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {\n\tif req.GetVolumeCapability() == nil {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Volume capability missing in request\")\n\t}\n\tvolumeID := req.GetVolumeId()\n\tif len(volumeID) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Volume ID missing in request\")\n\t}\n\ttargetPath := req.GetTargetPath()\n\tif len(targetPath) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Target path not provided\")\n\t}\n\n\tnotMnt, err := ns.mounter.IsLikelyNotMountPoint(targetPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tif err := os.MkdirAll(targetPath, 0750); err != nil {\n\t\t\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t\t\t}\n\t\t\tnotMnt = true\n\t\t} else {\n\t\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t\t}\n\t}\n\tif !notMnt {\n\t\treturn &csi.NodePublishVolumeResponse{}, nil\n\t}\n\n\tmountOptions := req.GetVolumeCapability().GetMount().GetMountFlags()\n\tif req.GetReadonly() {\n\t\tmountOptions = append(mountOptions, \"ro\")\n\t}\n\n\ts := req.GetVolumeContext()[paramServer]\n\tep := req.GetVolumeContext()[paramShare]\n\tsource := fmt.Sprintf(\"%s:%s\", s, ep)\n\n\tklog.V(2).Infof(\"NodePublishVolume: volumeID(%v) source(%s) targetPath(%s) mountflags(%v)\", volumeID, source, targetPath, mountOptions)\n\terr = ns.mounter.Mount(source, targetPath, \"nfs\", mountOptions)\n\tif err != nil {\n\t\tif os.IsPermission(err) {\n\t\t\treturn nil, status.Error(codes.PermissionDenied, err.Error())\n\t\t}\n\t\tif strings.Contains(err.Error(), \"invalid argument\") {\n\t\t\treturn nil, status.Error(codes.InvalidArgument, err.Error())\n\t\t}\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\tif ns.Driver.perm != nil {\n\t\tif err := os.Chmod(targetPath, os.FileMode(*ns.Driver.perm)); err != nil {\n\t\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t\t}\n\t}\n\n\treturn &csi.NodePublishVolumeResponse{}, nil\n}\n\n\/\/ NodeUnpublishVolume unmount the volume\nfunc (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {\n\tvolumeID := req.GetVolumeId()\n\tif len(volumeID) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Volume ID missing in request\")\n\t}\n\ttargetPath := req.GetTargetPath()\n\tif len(targetPath) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Target path missing in request\")\n\t}\n\tnotMnt, err := ns.mounter.IsLikelyNotMountPoint(targetPath)\n\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, status.Error(codes.NotFound, \"Targetpath not found\")\n\t\t}\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\tif notMnt {\n\t\treturn nil, status.Error(codes.NotFound, \"Volume not mounted\")\n\t}\n\n\tklog.V(2).Infof(\"NodeUnpublishVolume: CleanupMountPoint %s on volumeID(%s)\", targetPath, volumeID)\n\terr = mount.CleanupMountPoint(targetPath, ns.mounter, false)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\treturn &csi.NodeUnpublishVolumeResponse{}, nil\n}\n\n\/\/ NodeGetInfo return info of the node on which this plugin is running\nfunc (ns *NodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {\n\treturn &csi.NodeGetInfoResponse{\n\t\tNodeId: ns.Driver.nodeID,\n\t}, nil\n}\n\n\/\/ NodeGetCapabilities return the capabilities of the Node plugin\nfunc (ns *NodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {\n\treturn &csi.NodeGetCapabilitiesResponse{\n\t\tCapabilities: ns.Driver.nscap,\n\t}, nil\n}\n\n\/\/ NodeGetVolumeStats get volume stats\nfunc (ns *NodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tif len(req.VolumeId) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume ID was empty\")\n\t}\n\tif len(req.VolumePath) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume path was empty\")\n\t}\n\n\t_, err := os.Stat(req.VolumePath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, status.Errorf(codes.NotFound, \"path %s does not exist\", req.VolumePath)\n\t\t}\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to stat file %s: %v\", req.VolumePath, err)\n\t}\n\n\tvolumeMetrics, err := volume.NewMetricsStatFS(req.VolumePath).GetMetrics()\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to get metrics: %v\", err)\n\t}\n\n\tavailable, ok := volumeMetrics.Available.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume available size(%v)\", volumeMetrics.Available)\n\t}\n\tcapacity, ok := volumeMetrics.Capacity.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume capacity size(%v)\", volumeMetrics.Capacity)\n\t}\n\tused, ok := volumeMetrics.Used.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume used size(%v)\", volumeMetrics.Used)\n\t}\n\n\tinodesFree, ok := volumeMetrics.InodesFree.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes free(%v)\", volumeMetrics.InodesFree)\n\t}\n\tinodes, ok := volumeMetrics.Inodes.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes(%v)\", volumeMetrics.Inodes)\n\t}\n\tinodesUsed, ok := volumeMetrics.InodesUsed.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes used(%v)\", volumeMetrics.InodesUsed)\n\t}\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\tAvailable: available,\n\t\t\t\tTotal: capacity,\n\t\t\t\tUsed: used,\n\t\t\t},\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t\tAvailable: inodesFree,\n\t\t\t\tTotal: inodes,\n\t\t\t\tUsed: inodesUsed,\n\t\t\t},\n\t\t},\n\t}, nil\n}\n\n\/\/ NodeUnstageVolume unstage volume\nfunc (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}\n\n\/\/ NodeStageVolume stage volume\nfunc (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}\n\n\/\/ NodeExpandVolume node expand volume\nfunc (ns *NodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}\n\nfunc makeDir(pathname string) error {\n\terr := os.MkdirAll(pathname, os.FileMode(0755))\n\tif err != nil {\n\t\tif !os.IsExist(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/\/ SPDX-License-Identifier: Apache-2.0\n\/\/ Copyright Authors of Cilium\n\npackage modules\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t. \"gopkg.in\/check.v1\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/checker\"\n)\n\nconst (\n\tmodulesContent = `ebtable_nat 16384 1 - Live 0x0000000000000000\nebtable_broute 16384 1 - Live 0x0000000000000000\nbridge 172032 1 ebtable_broute, Live 0x0000000000000000\nip6table_nat 16384 1 - Live 0x0000000000000000\nnf_nat_ipv6 16384 1 ip6table_nat, Live 0x0000000000000000\nip6table_mangle 16384 1 - Live 0x0000000000000000\nip6table_raw 16384 1 - Live 0x0000000000000000\nip6table_security 16384 1 - Live 0x0000000000000000\niptable_nat 16384 1 - Live 0x0000000000000000\nnf_nat_ipv4 16384 1 iptable_nat, Live 0x0000000000000000\niptable_mangle 16384 1 - Live 0x0000000000000000\niptable_raw 16384 1 - Live 0x0000000000000000\niptable_security 16384 1 - Live 0x0000000000000000\nebtable_filter 16384 1 - Live 0x0000000000000000\nebtables 36864 3 ebtable_nat,ebtable_broute,ebtable_filter, Live 0x0000000000000000\nip6table_filter 16384 1 - Live 0x0000000000000000\nip6_tables 28672 5 ip6table_nat,ip6table_mangle,ip6table_raw,ip6table_security,ip6table_filter, Live 0x0000000000000000\niptable_filter 16384 1 - Live 0x0000000000000000\nip_tables 28672 5 iptable_nat,iptable_mangle,iptable_raw,iptable_security,iptable_filter, Live 0x0000000000000000\nx_tables 40960 23 xt_multiport,xt_nat,xt_addrtype,xt_mark,xt_comment,xt_CHECKSUM,ipt_MASQUERADE,xt_tcpudp,ip6t_rpfilter,ip6t_REJECT,ipt_REJECT,xt_conntrack,ip6table_mangle,ip6table_raw,ip6table_security,iptable_mangle,iptable_raw,iptable_security,ebtables,ip6table_filter,ip6_tables,iptable_filter,ip_tables, Live 0x0000000000000000`\n)\n\n\/\/ Hook up gocheck into the \"go test\" runner.\nfunc Test(t *testing.T) {\n\tTestingT(t)\n}\n\ntype ModulesTestSuite struct{}\n\nvar _ = Suite(&ModulesTestSuite{})\n\nfunc (s *ModulesTestSuite) TestInit(c *C) {\n\tmanager := &ModulesManager{}\n\tc.Assert(manager.modulesList, IsNil)\n\terr := manager.Init()\n\tc.Assert(err, IsNil)\n\tc.Assert(manager.modulesList, NotNil)\n}\n\nfunc (s *ModulesTestSuite) TestFindModules(c *C) {\n\tmanager := &ModulesManager{\n\t\tmodulesList: []string{\n\t\t\t\"ip6_tables\",\n\t\t\t\"ip6table_mangle\",\n\t\t\t\"ip6table_filter\",\n\t\t\t\"ip6table_security\",\n\t\t\t\"ip6table_raw\",\n\t\t\t\"ip6table_nat\",\n\t\t},\n\t}\n\ttestCases := []struct {\n\t\tmodulesToFind []string\n\t\tisSubset bool\n\t\texpectedDiff []string\n\t}{\n\t\t{\n\t\t\tmodulesToFind: []string{\n\t\t\t\t\"ip6_tables\",\n\t\t\t\t\"ip6table_mangle\",\n\t\t\t\t\"ip6table_filter\",\n\t\t\t\t\"ip6table_security\",\n\t\t\t\t\"ip6table_raw\",\n\t\t\t\t\"ip6table_nat\",\n\t\t\t},\n\t\t\tisSubset: true,\n\t\t\texpectedDiff: nil,\n\t\t},\n\t\t{\n\t\t\tmodulesToFind: []string{\n\t\t\t\t\"ip6_tables\",\n\t\t\t\t\"ip6table_mangle\",\n\t\t\t\t\"ip6table_raw\",\n\t\t\t},\n\t\t\tisSubset: true,\n\t\t\texpectedDiff: nil,\n\t\t},\n\t\t{\n\t\t\tmodulesToFind: []string{\n\t\t\t\t\"ip6_tables\",\n\t\t\t\t\"ip6table_mangle\",\n\t\t\t\t\"ip6table_raw\",\n\t\t\t\t\"foo_module\",\n\t\t\t},\n\t\t\tisSubset: false,\n\t\t\texpectedDiff: []string{\"foo_module\"},\n\t\t},\n\t\t{\n\t\t\tmodulesToFind: []string{\n\t\t\t\t\"foo_module\",\n\t\t\t\t\"bar_module\",\n\t\t\t},\n\t\t\tisSubset: false,\n\t\t\texpectedDiff: []string{\"foo_module\", \"bar_module\"},\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tfound, diff := manager.FindModules(tc.modulesToFind...)\n\t\tc.Assert(found, Equals, tc.isSubset)\n\t\tc.Assert(diff, checker.DeepEquals, tc.expectedDiff)\n\t}\n}\n\nfunc (s *ModulesTestSuite) TestParseModuleFile(c *C) {\n\texpectedLength := 20\n\texpectedModules := []string{\n\t\t\"ebtable_nat\",\n\t\t\"ebtable_broute\",\n\t\t\"bridge\",\n\t\t\"ip6table_nat\",\n\t\t\"nf_nat_ipv6\",\n\t\t\"ip6table_mangle\",\n\t\t\"ip6table_raw\",\n\t\t\"ip6table_security\",\n\t\t\"iptable_nat\",\n\t\t\"nf_nat_ipv4\",\n\t\t\"iptable_mangle\",\n\t\t\"iptable_raw\",\n\t\t\"iptable_security\",\n\t\t\"ebtable_filter\",\n\t\t\"ebtables\",\n\t\t\"ip6table_filter\",\n\t\t\"ip6_tables\",\n\t\t\"iptable_filter\",\n\t\t\"ip_tables\",\n\t\t\"x_tables\",\n\t}\n\n\tr := bytes.NewBuffer([]byte(modulesContent))\n\tmoduleInfos, err := parseModulesFile(r)\n\tc.Assert(err, IsNil)\n\tc.Assert(moduleInfos, HasLen, expectedLength)\n\tc.Assert(moduleInfos, checker.DeepEquals, expectedModules)\n}\n\nfunc (s *ModulesTestSuite) TestListModules(c *C) {\n\t_, err := listModules()\n\tc.Assert(err, IsNil)\n}\nmodules: pass TestInit on kernels without kernel modules\/\/ SPDX-License-Identifier: Apache-2.0\n\/\/ Copyright Authors of Cilium\n\npackage modules\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t. \"gopkg.in\/check.v1\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/checker\"\n)\n\nconst (\n\tmodulesContent = `ebtable_nat 16384 1 - Live 0x0000000000000000\nebtable_broute 16384 1 - Live 0x0000000000000000\nbridge 172032 1 ebtable_broute, Live 0x0000000000000000\nip6table_nat 16384 1 - Live 0x0000000000000000\nnf_nat_ipv6 16384 1 ip6table_nat, Live 0x0000000000000000\nip6table_mangle 16384 1 - Live 0x0000000000000000\nip6table_raw 16384 1 - Live 0x0000000000000000\nip6table_security 16384 1 - Live 0x0000000000000000\niptable_nat 16384 1 - Live 0x0000000000000000\nnf_nat_ipv4 16384 1 iptable_nat, Live 0x0000000000000000\niptable_mangle 16384 1 - Live 0x0000000000000000\niptable_raw 16384 1 - Live 0x0000000000000000\niptable_security 16384 1 - Live 0x0000000000000000\nebtable_filter 16384 1 - Live 0x0000000000000000\nebtables 36864 3 ebtable_nat,ebtable_broute,ebtable_filter, Live 0x0000000000000000\nip6table_filter 16384 1 - Live 0x0000000000000000\nip6_tables 28672 5 ip6table_nat,ip6table_mangle,ip6table_raw,ip6table_security,ip6table_filter, Live 0x0000000000000000\niptable_filter 16384 1 - Live 0x0000000000000000\nip_tables 28672 5 iptable_nat,iptable_mangle,iptable_raw,iptable_security,iptable_filter, Live 0x0000000000000000\nx_tables 40960 23 xt_multiport,xt_nat,xt_addrtype,xt_mark,xt_comment,xt_CHECKSUM,ipt_MASQUERADE,xt_tcpudp,ip6t_rpfilter,ip6t_REJECT,ipt_REJECT,xt_conntrack,ip6table_mangle,ip6table_raw,ip6table_security,iptable_mangle,iptable_raw,iptable_security,ebtables,ip6table_filter,ip6_tables,iptable_filter,ip_tables, Live 0x0000000000000000`\n)\n\n\/\/ Hook up gocheck into the \"go test\" runner.\nfunc Test(t *testing.T) {\n\tTestingT(t)\n}\n\ntype ModulesTestSuite struct{}\n\nvar _ = Suite(&ModulesTestSuite{})\n\nfunc (s *ModulesTestSuite) TestInit(c *C) {\n\tvar manager ModulesManager\n\tc.Assert(manager.Init(), IsNil)\n}\n\nfunc (s *ModulesTestSuite) TestFindModules(c *C) {\n\tmanager := &ModulesManager{\n\t\tmodulesList: []string{\n\t\t\t\"ip6_tables\",\n\t\t\t\"ip6table_mangle\",\n\t\t\t\"ip6table_filter\",\n\t\t\t\"ip6table_security\",\n\t\t\t\"ip6table_raw\",\n\t\t\t\"ip6table_nat\",\n\t\t},\n\t}\n\ttestCases := []struct {\n\t\tmodulesToFind []string\n\t\tisSubset bool\n\t\texpectedDiff []string\n\t}{\n\t\t{\n\t\t\tmodulesToFind: []string{\n\t\t\t\t\"ip6_tables\",\n\t\t\t\t\"ip6table_mangle\",\n\t\t\t\t\"ip6table_filter\",\n\t\t\t\t\"ip6table_security\",\n\t\t\t\t\"ip6table_raw\",\n\t\t\t\t\"ip6table_nat\",\n\t\t\t},\n\t\t\tisSubset: true,\n\t\t\texpectedDiff: nil,\n\t\t},\n\t\t{\n\t\t\tmodulesToFind: []string{\n\t\t\t\t\"ip6_tables\",\n\t\t\t\t\"ip6table_mangle\",\n\t\t\t\t\"ip6table_raw\",\n\t\t\t},\n\t\t\tisSubset: true,\n\t\t\texpectedDiff: nil,\n\t\t},\n\t\t{\n\t\t\tmodulesToFind: []string{\n\t\t\t\t\"ip6_tables\",\n\t\t\t\t\"ip6table_mangle\",\n\t\t\t\t\"ip6table_raw\",\n\t\t\t\t\"foo_module\",\n\t\t\t},\n\t\t\tisSubset: false,\n\t\t\texpectedDiff: []string{\"foo_module\"},\n\t\t},\n\t\t{\n\t\t\tmodulesToFind: []string{\n\t\t\t\t\"foo_module\",\n\t\t\t\t\"bar_module\",\n\t\t\t},\n\t\t\tisSubset: false,\n\t\t\texpectedDiff: []string{\"foo_module\", \"bar_module\"},\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tfound, diff := manager.FindModules(tc.modulesToFind...)\n\t\tc.Assert(found, Equals, tc.isSubset)\n\t\tc.Assert(diff, checker.DeepEquals, tc.expectedDiff)\n\t}\n}\n\nfunc (s *ModulesTestSuite) TestParseModuleFile(c *C) {\n\texpectedLength := 20\n\texpectedModules := []string{\n\t\t\"ebtable_nat\",\n\t\t\"ebtable_broute\",\n\t\t\"bridge\",\n\t\t\"ip6table_nat\",\n\t\t\"nf_nat_ipv6\",\n\t\t\"ip6table_mangle\",\n\t\t\"ip6table_raw\",\n\t\t\"ip6table_security\",\n\t\t\"iptable_nat\",\n\t\t\"nf_nat_ipv4\",\n\t\t\"iptable_mangle\",\n\t\t\"iptable_raw\",\n\t\t\"iptable_security\",\n\t\t\"ebtable_filter\",\n\t\t\"ebtables\",\n\t\t\"ip6table_filter\",\n\t\t\"ip6_tables\",\n\t\t\"iptable_filter\",\n\t\t\"ip_tables\",\n\t\t\"x_tables\",\n\t}\n\n\tr := bytes.NewBuffer([]byte(modulesContent))\n\tmoduleInfos, err := parseModulesFile(r)\n\tc.Assert(err, IsNil)\n\tc.Assert(moduleInfos, HasLen, expectedLength)\n\tc.Assert(moduleInfos, checker.DeepEquals, expectedModules)\n}\n\nfunc (s *ModulesTestSuite) TestListModules(c *C) {\n\t_, err := listModules()\n\tc.Assert(err, IsNil)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2016-2018 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage manager\n\nimport (\n\t\"math\"\n\t\"time\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/datapath\"\n\t\"github.com\/cilium\/cilium\/pkg\/lock\"\n\t\"github.com\/cilium\/cilium\/pkg\/metrics\"\n\t\"github.com\/cilium\/cilium\/pkg\/node\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n)\n\nvar (\n\tbaseBackgroundSyncInterval = time.Minute\n)\n\ntype nodeEntry struct {\n\t\/\/ mutex serves two purposes:\n\t\/\/ 1. Serialize any direct access to the node field in this entry.\n\t\/\/ 2. Serialize all calls do the datapath layer for a particular node.\n\t\/\/\n\t\/\/ See description of Manager.mutex for more details\n\t\/\/\n\t\/\/ If both the nodeEntry.mutex and Manager.mutex must be held, then the\n\t\/\/ Manager.mutex must *always* be acquired first.\n\tmutex lock.Mutex\n\tnode node.Node\n}\n\n\/\/ Manager is the entity that manages a collection of nodes\ntype Manager struct {\n\t\/\/ mutex is the lock protecting access to the nodes map. The mutex must\n\t\/\/ be held for any access of the nodes map.\n\t\/\/\n\t\/\/ The manager mutex works together with the entry mutex in the\n\t\/\/ following way to minimize the duration the manager mutex is held:\n\t\/\/\n\t\/\/ 1. Acquire manager mutex to safely access nodes map and to retrieve\n\t\/\/ node entry.\n\t\/\/ 2. Acquire mutex of the entry while the manager mutex is still held.\n\t\/\/ This guarantees that no change to the entry has happened.\n\t\/\/ 3. Release of the manager mutex to unblock changes or reads to other\n\t\/\/ node entries.\n\t\/\/ 4. Change of entry data or performing of datapath interactions\n\t\/\/ 5. Release of the entry mutex\n\t\/\/\n\t\/\/ If both the nodeEntry.mutex and Manager.mutex must be held, then the\n\t\/\/ Manager.mutex must *always* be acquired first.\n\tmutex lock.RWMutex\n\n\t\/\/ nodes is the list of nodes. Access must be protected via mutex.\n\tnodes map[node.Identity]*nodeEntry\n\n\t\/\/ datapath is the interface responsible for this node manager\n\tdatapath datapath.NodeHandler\n\n\t\/\/ closeChan is closed when the manager is closed\n\tcloseChan chan struct{}\n\n\t\/\/ name is the name of the manager. It must be unique and feasibility\n\t\/\/ to be used a prometheus metric name.\n\tname string\n\n\t\/\/ metricEventsReceived is the prometheus metric to track the number of\n\t\/\/ node events received\n\tmetricEventsReceived *prometheus.CounterVec\n\n\t\/\/ metricNumNodes is the prometheus metric to track the number of nodes\n\t\/\/ being managed\n\tmetricNumNodes prometheus.Gauge\n\n\t\/\/ metricDatapathValidations is the prometheus metric to track the\n\t\/\/ number of datapath node validation calls\n\tmetricDatapathValidations prometheus.Counter\n}\n\n\/\/ NewManager returns a new node manager\nfunc NewManager(name string, datapath datapath.NodeHandler) (*Manager, error) {\n\tm := &Manager{\n\t\tname: name,\n\t\tnodes: map[node.Identity]*nodeEntry{},\n\t\tdatapath: datapath,\n\t\tcloseChan: make(chan struct{}),\n\t}\n\n\tm.metricEventsReceived = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: metrics.Namespace,\n\t\tSubsystem: \"nodes\",\n\t\tName: name + \"_events_received_total\",\n\t\tHelp: \"Number of node events received\",\n\t}, []string{\"eventType\", \"source\"})\n\n\tm.metricNumNodes = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: metrics.Namespace,\n\t\tSubsystem: \"nodes\",\n\t\tName: name + \"_num\",\n\t\tHelp: \"Number of nodes managed\",\n\t})\n\n\tm.metricDatapathValidations = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: metrics.Namespace,\n\t\tSubsystem: \"nodes\",\n\t\tName: name + \"_datapath_validations_total\",\n\t\tHelp: \"Number of validation calls to implement the datapath implemention of a node\",\n\t})\n\n\terr := metrics.RegisterList([]prometheus.Collector{m.metricDatapathValidations, m.metricEventsReceived, m.metricNumNodes})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo m.backgroundSync()\n\n\treturn m, nil\n}\n\n\/\/ Close shuts down a node manager\nfunc (m *Manager) Close() {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\tclose(m.closeChan)\n\n\tmetrics.Unregister(m.metricNumNodes)\n\tmetrics.Unregister(m.metricEventsReceived)\n\tmetrics.Unregister(m.metricDatapathValidations)\n\n\t\/\/ delete all nodes to clean up the datapath for each node\n\tfor _, n := range m.nodes {\n\t\tn.mutex.Lock()\n\t\tm.datapath.NodeDelete(n.node)\n\t\tn.mutex.Unlock()\n\t}\n}\n\n\/\/ ClusterSizeDependantInterval returns a time.Duration that is dependant on\n\/\/ the cluster size, i.e. the number of nodes that have been discovered. This\n\/\/ can be used to control sync intervals of shared or centralized resources to\n\/\/ avoid overloading these resources as the cluster grows.\n\/\/\n\/\/ Example sync interval with baseInterval = 1 * time.Minute\n\/\/\n\/\/ nodes | sync interval\n\/\/ ------+-----------------\n\/\/ 1 | 41.588830833s\n\/\/ 2 | 1m05.916737320s\n\/\/ 4 | 1m36.566274746s\n\/\/ 8 | 2m11.833474640s\n\/\/ 16 | 2m49.992800643s\n\/\/ 32 | 3m29.790453687s\n\/\/ 64 | 4m10.463236193s\n\/\/ 128 | 4m51.588744261s\n\/\/ 256 | 5m32.944565093s\n\/\/ 512 | 6m14.416550710s\n\/\/ 1024 | 6m55.946873494s\n\/\/ 2048 | 7m37.506428894s\n\/\/ 4096 | 8m19.080616652s\n\/\/ 8192 | 9m00.662124608s\n\/\/ 16384 | 9m42.247293667s\nfunc (m *Manager) ClusterSizeDependantInterval(baseInterval time.Duration) time.Duration {\n\tm.mutex.RLock()\n\tnumNodes := len(m.nodes)\n\tm.mutex.RUnlock()\n\n\t\/\/ no nodes are being managed, no work will be performed, return\n\t\/\/ baseInterval to check again in a reasonable timeframe\n\tif numNodes == 0 {\n\t\treturn baseInterval\n\t}\n\n\twaitNanoseconds := float64(baseInterval.Nanoseconds()) * math.Log1p(float64(numNodes))\n\treturn time.Duration(int64(waitNanoseconds))\n\n}\n\nfunc (m *Manager) backgroundSyncInterval() time.Duration {\n\treturn m.ClusterSizeDependantInterval(baseBackgroundSyncInterval)\n}\n\nfunc (m *Manager) backgroundSync() {\n\tfor {\n\t\tsyncInterval := m.backgroundSyncInterval()\n\t\tlog.WithField(\"syncInterval\", syncInterval.String()).Debug(\"Performing regular background work\")\n\n\t\t\/\/ get a copy of the node identities to avoid locking the entire manager\n\t\t\/\/ throughout the process of running the datapath validation.\n\t\tnodes := m.GetNodeIdentities()\n\t\tfor _, nodeIdentity := range nodes {\n\t\t\t\/\/ Retrieve latest node information in case any event\n\t\t\t\/\/ changed the node since the call to GetNodes()\n\t\t\tm.mutex.RLock()\n\t\t\tentry, ok := m.nodes[nodeIdentity]\n\t\t\tif !ok {\n\t\t\t\tm.mutex.RUnlock()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tentry.mutex.Lock()\n\t\t\tm.mutex.RUnlock()\n\t\t\tm.datapath.NodeValidateImplementation(entry.node)\n\t\t\tentry.mutex.Unlock()\n\n\t\t\tm.metricDatapathValidations.Inc()\n\t\t}\n\n\t\tselect {\n\t\tcase <-m.closeChan:\n\t\t\treturn\n\t\tcase <-time.After(syncInterval):\n\t\t}\n\t}\n}\n\n\/\/ overwriteAllowed returns true if an update from newSource can overwrite a node owned by oldSource.\nfunc overwriteAllowed(oldSource, newSource node.Source) bool {\n\tswitch newSource {\n\t\/\/ the local node always takes precedence\n\tcase node.FromLocalNode:\n\t\treturn true\n\n\t\/\/ agent local updates can overwrite everything except for the local\n\t\/\/ node\n\tcase node.FromAgentLocal:\n\t\treturn oldSource != node.FromLocalNode\n\n\t\/\/ kvstore updates can overwrite everything except agent local updates and local node\n\tcase node.FromKVStore:\n\t\treturn oldSource != node.FromAgentLocal && oldSource != node.FromLocalNode\n\n\t\/\/ kubernetes updates can only overwrite kubernetes nodes\n\tcase node.FromKubernetes:\n\t\treturn oldSource != node.FromAgentLocal && oldSource != node.FromLocalNode && oldSource != node.FromKVStore\n\n\tdefault:\n\t\treturn false\n\t}\n}\n\n\/\/ NodeSoftUpdated is called after the information of a node has be upated but\n\/\/ unlike a NodeUpdated does not require the datapath to be updated.\nfunc (m *Manager) NodeSoftUpdated(n node.Node) {\n\tlog.Debugf(\"Received soft node update event from %s: %#v\", n.Source, n)\n\tm.nodeUpdated(n, false)\n}\n\n\/\/ NodeUpdated is called after the information of a node has been updated. The\n\/\/ node in the manager is added or updated if the source is allowed to update\n\/\/ the node. If an update or addition has occurred, NodeUpdate() of the datapath\n\/\/ interface is invoked.\nfunc (m *Manager) NodeUpdated(n node.Node) {\n\tlog.Debugf(\"Received node update event from %s: %#v\", n.Source, n)\n\tm.nodeUpdated(n, true)\n}\n\nfunc (m *Manager) nodeUpdated(n node.Node, dpUpdate bool) {\n\tnodeIdentity := n.Identity()\n\n\tm.mutex.Lock()\n\tentry, oldNodeExists := m.nodes[nodeIdentity]\n\tif oldNodeExists {\n\t\tm.metricEventsReceived.WithLabelValues(\"update\", string(n.Source)).Inc()\n\n\t\tif !overwriteAllowed(entry.node.Source, n.Source) {\n\t\t\tm.mutex.Unlock()\n\t\t\treturn\n\t\t}\n\n\t\tentry.mutex.Lock()\n\t\tm.mutex.Unlock()\n\t\toldNode := entry.node\n\t\tentry.node = n\n\t\tif dpUpdate {\n\t\t\tm.datapath.NodeUpdate(oldNode, entry.node)\n\t\t}\n\t\tentry.mutex.Unlock()\n\t} else {\n\t\tm.metricEventsReceived.WithLabelValues(\"add\", string(n.Source)).Inc()\n\t\tm.metricNumNodes.Inc()\n\n\t\tentry = &nodeEntry{node: n}\n\t\tentry.mutex.Lock()\n\t\tm.nodes[nodeIdentity] = entry\n\t\tm.mutex.Unlock()\n\t\tif dpUpdate {\n\t\t\tm.datapath.NodeAdd(entry.node)\n\t\t}\n\t\tentry.mutex.Unlock()\n\t}\n}\n\n\/\/ NodeDeleted is called after a node has been deleted. It removes the node\n\/\/ from the manager if the node is still owned by the source of which the event\n\/\/ orgins from. If the node was removed, NodeDelete() is invoked of the\n\/\/ datapath interface.\nfunc (m *Manager) NodeDeleted(n node.Node) {\n\tm.metricEventsReceived.WithLabelValues(\"delete\", string(n.Source)).Inc()\n\n\tlog.Debugf(\"Received node delete event from %s\", n.Source)\n\n\tnodeIdentity := n.Identity()\n\n\tm.mutex.Lock()\n\tentry, oldNodeExists := m.nodes[nodeIdentity]\n\tif !oldNodeExists {\n\t\tm.mutex.Unlock()\n\t\treturn\n\t}\n\n\t\/\/ If the source is Kubernetes and the node is the node we are running on\n\t\/\/ Kubernetes is giving us a hint it is about to delete our node. Close down\n\t\/\/ the agent gracefully in this case.\n\tif n.Source != entry.node.Source {\n\t\tm.mutex.Unlock()\n\t\tif n.IsLocal() && n.Source == node.FromKubernetes {\n\t\t\tlog.Debugf(\"Kubernetes is deleting local node, close manager\")\n\t\t\tm.Close()\n\t\t} else {\n\t\t\tlog.Debugf(\"Ignoring delete event of node %s from source %s. The node is owned by %s\",\n\t\t\t\tn.Name, n.Source, entry.node.Source)\n\t\t}\n\t\treturn\n\t}\n\n\tm.metricNumNodes.Dec()\n\n\tentry.mutex.Lock()\n\tdelete(m.nodes, nodeIdentity)\n\tm.mutex.Unlock()\n\tm.datapath.NodeDelete(n)\n\tentry.mutex.Unlock()\n}\n\n\/\/ Exists returns true if a node with the name exists\nfunc (m *Manager) Exists(id node.Identity) bool {\n\tm.mutex.RLock()\n\tdefer m.mutex.RUnlock()\n\t_, ok := m.nodes[id]\n\treturn ok\n}\n\n\/\/ GetNodeIdentities returns a list of all node identities store in node\n\/\/ manager.\nfunc (m *Manager) GetNodeIdentities() []node.Identity {\n\tm.mutex.RLock()\n\tdefer m.mutex.RUnlock()\n\n\tnodes := make([]node.Identity, 0, len(m.nodes))\n\tfor nodeIdentity := range m.nodes {\n\t\tnodes = append(nodes, nodeIdentity)\n\t}\n\n\treturn nodes\n}\n\n\/\/ GetNodes returns a copy of all of the nodes as a map from Identity to Node.\nfunc (m *Manager) GetNodes() map[node.Identity]node.Node {\n\tm.mutex.RLock()\n\tdefer m.mutex.RUnlock()\n\n\tnodes := make(map[node.Identity]node.Node)\n\tfor nodeIdentity, entry := range m.nodes {\n\t\tentry.mutex.Lock()\n\t\tnodes[nodeIdentity] = entry.node\n\t\tentry.mutex.Unlock()\n\t}\n\n\treturn nodes\n}\n\n\/\/ DeleteAllNodes deletes all nodes from the node maanger.\nfunc (m *Manager) DeleteAllNodes() {\n\tm.mutex.Lock()\n\tfor _, entry := range m.nodes {\n\t\tentry.mutex.Lock()\n\t\tm.datapath.NodeDelete(entry.node)\n\t\tentry.mutex.Unlock()\n\t}\n\tm.nodes = map[node.Identity]*nodeEntry{}\n\tm.mutex.Unlock()\n}\nnode\/manager: add a subscription event based mechanism for node events\/\/ Copyright 2016-2019 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage manager\n\nimport (\n\t\"math\"\n\t\"time\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/datapath\"\n\t\"github.com\/cilium\/cilium\/pkg\/lock\"\n\t\"github.com\/cilium\/cilium\/pkg\/metrics\"\n\t\"github.com\/cilium\/cilium\/pkg\/node\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n)\n\nvar (\n\tbaseBackgroundSyncInterval = time.Minute\n)\n\ntype nodeEntry struct {\n\t\/\/ mutex serves two purposes:\n\t\/\/ 1. Serialize any direct access to the node field in this entry.\n\t\/\/ 2. Serialize all calls do the datapath layer for a particular node.\n\t\/\/\n\t\/\/ See description of Manager.mutex for more details\n\t\/\/\n\t\/\/ If both the nodeEntry.mutex and Manager.mutex must be held, then the\n\t\/\/ Manager.mutex must *always* be acquired first.\n\tmutex lock.Mutex\n\tnode node.Node\n}\n\n\/\/ Manager is the entity that manages a collection of nodes\ntype Manager struct {\n\t\/\/ mutex is the lock protecting access to the nodes map. The mutex must\n\t\/\/ be held for any access of the nodes map.\n\t\/\/\n\t\/\/ The manager mutex works together with the entry mutex in the\n\t\/\/ following way to minimize the duration the manager mutex is held:\n\t\/\/\n\t\/\/ 1. Acquire manager mutex to safely access nodes map and to retrieve\n\t\/\/ node entry.\n\t\/\/ 2. Acquire mutex of the entry while the manager mutex is still held.\n\t\/\/ This guarantees that no change to the entry has happened.\n\t\/\/ 3. Release of the manager mutex to unblock changes or reads to other\n\t\/\/ node entries.\n\t\/\/ 4. Change of entry data or performing of datapath interactions\n\t\/\/ 5. Release of the entry mutex\n\t\/\/\n\t\/\/ If both the nodeEntry.mutex and Manager.mutex must be held, then the\n\t\/\/ Manager.mutex must *always* be acquired first.\n\tmutex lock.RWMutex\n\n\t\/\/ nodes is the list of nodes. Access must be protected via mutex.\n\tnodes map[node.Identity]*nodeEntry\n\n\t\/\/ nodeHandlersMu protects the nodeHandlers map against concurrent access.\n\tnodeHandlersMu lock.RWMutex\n\t\/\/ nodeHandlers has a slice containing all node handlers subscribed to node\n\t\/\/ events.\n\tnodeHandlers map[datapath.NodeHandler]struct{}\n\n\t\/\/ closeChan is closed when the manager is closed\n\tcloseChan chan struct{}\n\n\t\/\/ name is the name of the manager. It must be unique and feasibility\n\t\/\/ to be used a prometheus metric name.\n\tname string\n\n\t\/\/ metricEventsReceived is the prometheus metric to track the number of\n\t\/\/ node events received\n\tmetricEventsReceived *prometheus.CounterVec\n\n\t\/\/ metricNumNodes is the prometheus metric to track the number of nodes\n\t\/\/ being managed\n\tmetricNumNodes prometheus.Gauge\n\n\t\/\/ metricDatapathValidations is the prometheus metric to track the\n\t\/\/ number of datapath node validation calls\n\tmetricDatapathValidations prometheus.Counter\n}\n\n\/\/ Subscribe subscribes the given node handler to node events.\nfunc (m *Manager) Subscribe(nh datapath.NodeHandler) {\n\tm.nodeHandlersMu.Lock()\n\tm.nodeHandlers[nh] = struct{}{}\n\tm.nodeHandlersMu.Unlock()\n\t\/\/ Add all nodes already received by the manager.\n\tfor _, v := range m.nodes {\n\t\tv.mutex.Lock()\n\t\tnh.NodeAdd(v.node)\n\t\tv.mutex.Unlock()\n\t}\n}\n\n\/\/ Unsubscribe unsubscribes the given node handler with node events.\nfunc (m *Manager) Unsubscribe(nh datapath.NodeHandler) {\n\tm.nodeHandlersMu.Lock()\n\tdelete(m.nodeHandlers, nh)\n\tm.nodeHandlersMu.Unlock()\n}\n\n\/\/ Iter executes the given function in all subscribed node handlers.\nfunc (m *Manager) Iter(f func(nh datapath.NodeHandler)) {\n\tm.nodeHandlersMu.RLock()\n\tdefer m.nodeHandlersMu.RUnlock()\n\n\tfor nh := range m.nodeHandlers {\n\t\tf(nh)\n\t}\n}\n\n\/\/ NewManager returns a new node manager\nfunc NewManager(name string, dp datapath.NodeHandler) (*Manager, error) {\n\tm := &Manager{\n\t\tname: name,\n\t\tnodes: map[node.Identity]*nodeEntry{},\n\t\tnodeHandlers: map[datapath.NodeHandler]struct{}{},\n\t\tcloseChan: make(chan struct{}),\n\t}\n\tm.Subscribe(dp)\n\n\tm.metricEventsReceived = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: metrics.Namespace,\n\t\tSubsystem: \"nodes\",\n\t\tName: name + \"_events_received_total\",\n\t\tHelp: \"Number of node events received\",\n\t}, []string{\"eventType\", \"source\"})\n\n\tm.metricNumNodes = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: metrics.Namespace,\n\t\tSubsystem: \"nodes\",\n\t\tName: name + \"_num\",\n\t\tHelp: \"Number of nodes managed\",\n\t})\n\n\tm.metricDatapathValidations = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: metrics.Namespace,\n\t\tSubsystem: \"nodes\",\n\t\tName: name + \"_datapath_validations_total\",\n\t\tHelp: \"Number of validation calls to implement the datapath implemention of a node\",\n\t})\n\n\terr := metrics.RegisterList([]prometheus.Collector{m.metricDatapathValidations, m.metricEventsReceived, m.metricNumNodes})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo m.backgroundSync()\n\n\treturn m, nil\n}\n\n\/\/ Close shuts down a node manager\nfunc (m *Manager) Close() {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\tclose(m.closeChan)\n\n\tmetrics.Unregister(m.metricNumNodes)\n\tmetrics.Unregister(m.metricEventsReceived)\n\tmetrics.Unregister(m.metricDatapathValidations)\n\n\t\/\/ delete all nodes to clean up the datapath for each node\n\tfor _, n := range m.nodes {\n\t\tn.mutex.Lock()\n\t\tm.Iter(func(nh datapath.NodeHandler) {\n\t\t\tnh.NodeDelete(n.node)\n\t\t})\n\t\tn.mutex.Unlock()\n\t}\n}\n\n\/\/ ClusterSizeDependantInterval returns a time.Duration that is dependant on\n\/\/ the cluster size, i.e. the number of nodes that have been discovered. This\n\/\/ can be used to control sync intervals of shared or centralized resources to\n\/\/ avoid overloading these resources as the cluster grows.\n\/\/\n\/\/ Example sync interval with baseInterval = 1 * time.Minute\n\/\/\n\/\/ nodes | sync interval\n\/\/ ------+-----------------\n\/\/ 1 | 41.588830833s\n\/\/ 2 | 1m05.916737320s\n\/\/ 4 | 1m36.566274746s\n\/\/ 8 | 2m11.833474640s\n\/\/ 16 | 2m49.992800643s\n\/\/ 32 | 3m29.790453687s\n\/\/ 64 | 4m10.463236193s\n\/\/ 128 | 4m51.588744261s\n\/\/ 256 | 5m32.944565093s\n\/\/ 512 | 6m14.416550710s\n\/\/ 1024 | 6m55.946873494s\n\/\/ 2048 | 7m37.506428894s\n\/\/ 4096 | 8m19.080616652s\n\/\/ 8192 | 9m00.662124608s\n\/\/ 16384 | 9m42.247293667s\nfunc (m *Manager) ClusterSizeDependantInterval(baseInterval time.Duration) time.Duration {\n\tm.mutex.RLock()\n\tnumNodes := len(m.nodes)\n\tm.mutex.RUnlock()\n\n\t\/\/ no nodes are being managed, no work will be performed, return\n\t\/\/ baseInterval to check again in a reasonable timeframe\n\tif numNodes == 0 {\n\t\treturn baseInterval\n\t}\n\n\twaitNanoseconds := float64(baseInterval.Nanoseconds()) * math.Log1p(float64(numNodes))\n\treturn time.Duration(int64(waitNanoseconds))\n\n}\n\nfunc (m *Manager) backgroundSyncInterval() time.Duration {\n\treturn m.ClusterSizeDependantInterval(baseBackgroundSyncInterval)\n}\n\nfunc (m *Manager) backgroundSync() {\n\tfor {\n\t\tsyncInterval := m.backgroundSyncInterval()\n\t\tlog.WithField(\"syncInterval\", syncInterval.String()).Debug(\"Performing regular background work\")\n\n\t\t\/\/ get a copy of the node identities to avoid locking the entire manager\n\t\t\/\/ throughout the process of running the datapath validation.\n\t\tnodes := m.GetNodeIdentities()\n\t\tfor _, nodeIdentity := range nodes {\n\t\t\t\/\/ Retrieve latest node information in case any event\n\t\t\t\/\/ changed the node since the call to GetNodes()\n\t\t\tm.mutex.RLock()\n\t\t\tentry, ok := m.nodes[nodeIdentity]\n\t\t\tif !ok {\n\t\t\t\tm.mutex.RUnlock()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tentry.mutex.Lock()\n\t\t\tm.mutex.RUnlock()\n\t\t\tm.Iter(func(nh datapath.NodeHandler) {\n\t\t\t\tnh.NodeValidateImplementation(entry.node)\n\t\t\t})\n\t\t\tentry.mutex.Unlock()\n\n\t\t\tm.metricDatapathValidations.Inc()\n\t\t}\n\n\t\tselect {\n\t\tcase <-m.closeChan:\n\t\t\treturn\n\t\tcase <-time.After(syncInterval):\n\t\t}\n\t}\n}\n\n\/\/ overwriteAllowed returns true if an update from newSource can overwrite a node owned by oldSource.\nfunc overwriteAllowed(oldSource, newSource node.Source) bool {\n\tswitch newSource {\n\t\/\/ the local node always takes precedence\n\tcase node.FromLocalNode:\n\t\treturn true\n\n\t\/\/ agent local updates can overwrite everything except for the local\n\t\/\/ node\n\tcase node.FromAgentLocal:\n\t\treturn oldSource != node.FromLocalNode\n\n\t\/\/ kvstore updates can overwrite everything except agent local updates and local node\n\tcase node.FromKVStore:\n\t\treturn oldSource != node.FromAgentLocal && oldSource != node.FromLocalNode\n\n\t\/\/ kubernetes updates can only overwrite kubernetes nodes\n\tcase node.FromKubernetes:\n\t\treturn oldSource != node.FromAgentLocal && oldSource != node.FromLocalNode && oldSource != node.FromKVStore\n\n\tdefault:\n\t\treturn false\n\t}\n}\n\n\/\/ NodeSoftUpdated is called after the information of a node has be upated but\n\/\/ unlike a NodeUpdated does not require the datapath to be updated.\nfunc (m *Manager) NodeSoftUpdated(n node.Node) {\n\tlog.Debugf(\"Received soft node update event from %s: %#v\", n.Source, n)\n\tm.nodeUpdated(n, false)\n}\n\n\/\/ NodeUpdated is called after the information of a node has been updated. The\n\/\/ node in the manager is added or updated if the source is allowed to update\n\/\/ the node. If an update or addition has occurred, NodeUpdate() of the datapath\n\/\/ interface is invoked.\nfunc (m *Manager) NodeUpdated(n node.Node) {\n\tlog.Debugf(\"Received node update event from %s: %#v\", n.Source, n)\n\tm.nodeUpdated(n, true)\n}\n\nfunc (m *Manager) nodeUpdated(n node.Node, dpUpdate bool) {\n\tnodeIdentity := n.Identity()\n\n\tm.mutex.Lock()\n\tentry, oldNodeExists := m.nodes[nodeIdentity]\n\tif oldNodeExists {\n\t\tm.metricEventsReceived.WithLabelValues(\"update\", string(n.Source)).Inc()\n\n\t\tif !overwriteAllowed(entry.node.Source, n.Source) {\n\t\t\tm.mutex.Unlock()\n\t\t\treturn\n\t\t}\n\n\t\tentry.mutex.Lock()\n\t\tm.mutex.Unlock()\n\t\toldNode := entry.node\n\t\tentry.node = n\n\t\tif dpUpdate {\n\t\t\tm.Iter(func(nh datapath.NodeHandler) {\n\t\t\t\tnh.NodeUpdate(oldNode, entry.node)\n\t\t\t})\n\t\t}\n\t\tentry.mutex.Unlock()\n\t} else {\n\t\tm.metricEventsReceived.WithLabelValues(\"add\", string(n.Source)).Inc()\n\t\tm.metricNumNodes.Inc()\n\n\t\tentry = &nodeEntry{node: n}\n\t\tentry.mutex.Lock()\n\t\tm.nodes[nodeIdentity] = entry\n\t\tm.mutex.Unlock()\n\t\tif dpUpdate {\n\t\t\tm.Iter(func(nh datapath.NodeHandler) {\n\t\t\t\tnh.NodeAdd(entry.node)\n\t\t\t})\n\t\t}\n\t\tentry.mutex.Unlock()\n\t}\n}\n\n\/\/ NodeDeleted is called after a node has been deleted. It removes the node\n\/\/ from the manager if the node is still owned by the source of which the event\n\/\/ orgins from. If the node was removed, NodeDelete() is invoked of the\n\/\/ datapath interface.\nfunc (m *Manager) NodeDeleted(n node.Node) {\n\tm.metricEventsReceived.WithLabelValues(\"delete\", string(n.Source)).Inc()\n\n\tlog.Debugf(\"Received node delete event from %s\", n.Source)\n\n\tnodeIdentity := n.Identity()\n\n\tm.mutex.Lock()\n\tentry, oldNodeExists := m.nodes[nodeIdentity]\n\tif !oldNodeExists {\n\t\tm.mutex.Unlock()\n\t\treturn\n\t}\n\n\t\/\/ If the source is Kubernetes and the node is the node we are running on\n\t\/\/ Kubernetes is giving us a hint it is about to delete our node. Close down\n\t\/\/ the agent gracefully in this case.\n\tif n.Source != entry.node.Source {\n\t\tm.mutex.Unlock()\n\t\tif n.IsLocal() && n.Source == node.FromKubernetes {\n\t\t\tlog.Debugf(\"Kubernetes is deleting local node, close manager\")\n\t\t\tm.Close()\n\t\t} else {\n\t\t\tlog.Debugf(\"Ignoring delete event of node %s from source %s. The node is owned by %s\",\n\t\t\t\tn.Name, n.Source, entry.node.Source)\n\t\t}\n\t\treturn\n\t}\n\n\tm.metricNumNodes.Dec()\n\n\tentry.mutex.Lock()\n\tdelete(m.nodes, nodeIdentity)\n\tm.mutex.Unlock()\n\tm.Iter(func(nh datapath.NodeHandler) {\n\t\tnh.NodeDelete(n)\n\t})\n\tentry.mutex.Unlock()\n}\n\n\/\/ Exists returns true if a node with the name exists\nfunc (m *Manager) Exists(id node.Identity) bool {\n\tm.mutex.RLock()\n\tdefer m.mutex.RUnlock()\n\t_, ok := m.nodes[id]\n\treturn ok\n}\n\n\/\/ GetNodeIdentities returns a list of all node identities store in node\n\/\/ manager.\nfunc (m *Manager) GetNodeIdentities() []node.Identity {\n\tm.mutex.RLock()\n\tdefer m.mutex.RUnlock()\n\n\tnodes := make([]node.Identity, 0, len(m.nodes))\n\tfor nodeIdentity := range m.nodes {\n\t\tnodes = append(nodes, nodeIdentity)\n\t}\n\n\treturn nodes\n}\n\n\/\/ GetNodes returns a copy of all of the nodes as a map from Identity to Node.\nfunc (m *Manager) GetNodes() map[node.Identity]node.Node {\n\tm.mutex.RLock()\n\tdefer m.mutex.RUnlock()\n\n\tnodes := make(map[node.Identity]node.Node)\n\tfor nodeIdentity, entry := range m.nodes {\n\t\tentry.mutex.Lock()\n\t\tnodes[nodeIdentity] = entry.node\n\t\tentry.mutex.Unlock()\n\t}\n\n\treturn nodes\n}\n\n\/\/ DeleteAllNodes deletes all nodes from the node maanger.\nfunc (m *Manager) DeleteAllNodes() {\n\tm.mutex.Lock()\n\tfor _, entry := range m.nodes {\n\t\tentry.mutex.Lock()\n\t\tm.Iter(func(nh datapath.NodeHandler) {\n\t\t\tnh.NodeDelete(entry.node)\n\t\t})\n\t\tentry.mutex.Unlock()\n\t}\n\tm.nodes = map[node.Identity]*nodeEntry{}\n\tm.mutex.Unlock()\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2014 Google Inc. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage scheduler\n\nimport (\n\t\"math\"\n\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/labels\"\n\t\"github.com\/golang\/glog\"\n)\n\n\/\/ the unused capacity is calculated on a scale of 0-10\n\/\/ 0 being the lowest priority and 10 being the highest\nfunc calculateScore(requested, capacity int64, node string) int {\n\tif capacity == 0 {\n\t\treturn 0\n\t}\n\tif requested > capacity {\n\t\tglog.Errorf(\"Combined requested resources from existing pods exceeds capacity on minion: %s\", node)\n\t\treturn 0\n\t}\n\treturn int(((capacity - requested) * 10) \/ capacity)\n}\n\n\/\/ Calculate the occupancy on a node. 'node' has information about the resources on the node.\n\/\/ 'pods' is a list of pods currently scheduled on the node.\nfunc calculateOccupancy(pod api.Pod, node api.Node, pods []api.Pod) HostPriority {\n\ttotalMilliCPU := int64(0)\n\ttotalMemory := int64(0)\n\tfor _, existingPod := range pods {\n\t\tfor _, container := range existingPod.Spec.Containers {\n\t\t\ttotalMilliCPU += container.Resources.Limits.Cpu().MilliValue()\n\t\t\ttotalMemory += container.Resources.Limits.Memory().Value()\n\t\t}\n\t}\n\t\/\/ Add the resources requested by the current pod being scheduled.\n\t\/\/ This also helps differentiate between differently sized, but empty, minions.\n\tfor _, container := range pod.Spec.Containers {\n\t\ttotalMilliCPU += container.Resources.Limits.Cpu().MilliValue()\n\t\ttotalMemory += container.Resources.Limits.Memory().Value()\n\t}\n\n\tcapacityMilliCPU := node.Status.Capacity.Cpu().MilliValue()\n\tcapacityMemory := node.Status.Capacity.Memory().Value()\n\n\tcpuScore := calculateScore(totalMilliCPU, capacityMilliCPU, node.Name)\n\tmemoryScore := calculateScore(totalMemory, capacityMemory, node.Name)\n\tglog.V(4).Infof(\n\t\t\"%v -> %v: Least Requested Priority, AbsoluteRequested: (%d, %d) \/ (%d, %d) Score: (%d, %d)\",\n\t\tpod.Name, node.Name,\n\t\ttotalMilliCPU, totalMemory,\n\t\tcapacityMilliCPU, capacityMemory,\n\t\tcpuScore, memoryScore,\n\t)\n\n\treturn HostPriority{\n\t\thost: node.Name,\n\t\tscore: int((cpuScore + memoryScore) \/ 2),\n\t}\n}\n\n\/\/ LeastRequestedPriority is a priority function that favors nodes with fewer requested resources.\n\/\/ It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes\n\/\/ based on the minimum of the average of the fraction of requested to capacity.\n\/\/ Details: (Sum(requested cpu) \/ Capacity + Sum(requested memory) \/ Capacity) * 50\nfunc LeastRequestedPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {\n\tnodes, err := minionLister.List()\n\tif err != nil {\n\t\treturn HostPriorityList{}, err\n\t}\n\tpodsToMachines, err := MapPodsToMachines(podLister)\n\n\tlist := HostPriorityList{}\n\tfor _, node := range nodes.Items {\n\t\tlist = append(list, calculateOccupancy(pod, node, podsToMachines[node.Name]))\n\t}\n\treturn list, nil\n}\n\ntype NodeLabelPrioritizer struct {\n\tlabel string\n\tpresence bool\n}\n\nfunc NewNodeLabelPriority(label string, presence bool) PriorityFunction {\n\tlabelPrioritizer := &NodeLabelPrioritizer{\n\t\tlabel: label,\n\t\tpresence: presence,\n\t}\n\treturn labelPrioritizer.CalculateNodeLabelPriority\n}\n\n\/\/ CalculateNodeLabelPriority checks whether a particular label exists on a minion or not, regardless of its value.\n\/\/ If presence is true, prioritizes minions that have the specified label, regardless of value.\n\/\/ If presence is false, prioritizes minions that do not have the specified label.\nfunc (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {\n\tvar score int\n\tminions, err := minionLister.List()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlabeledMinions := map[string]bool{}\n\tfor _, minion := range minions.Items {\n\t\texists := labels.Set(minion.Labels).Has(n.label)\n\t\tlabeledMinions[minion.Name] = (exists && n.presence) || (!exists && !n.presence)\n\t}\n\n\tresult := []HostPriority{}\n\t\/\/score int - scale of 0-10\n\t\/\/ 0 being the lowest priority and 10 being the highest\n\tfor minionName, success := range labeledMinions {\n\t\tif success {\n\t\t\tscore = 10\n\t\t} else {\n\t\t\tscore = 0\n\t\t}\n\t\tresult = append(result, HostPriority{host: minionName, score: score})\n\t}\n\treturn result, nil\n}\n\n\/\/ BalancedResourceAllocation favors nodes with balanced resource usage rate.\n\/\/ BalancedResourceAllocation should **NOT** be used alone, and **MUST** be used together with LeastRequestedPriority.\n\/\/ It calculates the difference between the cpu and memory fracion of capacity, and prioritizes the host based on how\n\/\/ close the two metrics are to each other.\n\/\/ Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:\n\/\/ \"Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization\"\nfunc BalancedResourceAllocation(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {\n\tnodes, err := minionLister.List()\n\tif err != nil {\n\t\treturn HostPriorityList{}, err\n\t}\n\tpodsToMachines, err := MapPodsToMachines(podLister)\n\n\tlist := HostPriorityList{}\n\tfor _, node := range nodes.Items {\n\t\tlist = append(list, calculateBalancedResourceAllocation(pod, node, podsToMachines[node.Name]))\n\t}\n\treturn list, nil\n}\n\nfunc calculateBalancedResourceAllocation(pod api.Pod, node api.Node, pods []api.Pod) HostPriority {\n\ttotalMilliCPU := int64(0)\n\ttotalMemory := int64(0)\n\tscore := int(0)\n\tfor _, existingPod := range pods {\n\t\tfor _, container := range existingPod.Spec.Containers {\n\t\t\ttotalMilliCPU += container.Resources.Limits.Cpu().MilliValue()\n\t\t\ttotalMemory += container.Resources.Limits.Memory().Value()\n\t\t}\n\t}\n\t\/\/ Add the resources requested by the current pod being scheduled.\n\t\/\/ This also helps differentiate between differently sized, but empty, minions.\n\tfor _, container := range pod.Spec.Containers {\n\t\ttotalMilliCPU += container.Resources.Limits.Cpu().MilliValue()\n\t\ttotalMemory += container.Resources.Limits.Memory().Value()\n\t}\n\n\tcapacityMilliCPU := node.Status.Capacity.Cpu().MilliValue()\n\tcapacityMemory := node.Status.Capacity.Memory().Value()\n\n\tcpuFraction := fractionOfCapacity(totalMilliCPU, capacityMilliCPU, node.Name)\n\tmemoryFraction := fractionOfCapacity(totalMemory, capacityMemory, node.Name)\n\tif cpuFraction >= 1 || memoryFraction >= 1 {\n\t\t\/\/ if requested >= capacity, the corresponding host should never be preferrred.\n\t\tscore = 0\n\t} else {\n\t\t\/\/ Upper and lower boundary of difference between cpuFraction and memoryFraction are -1 and 1\n\t\t\/\/ respectively. Multilying the absolute value of the difference by 10 scales the value to\n\t\t\/\/ 0-10 with 0 representing well balanced allocation and 10 poorly balanced. Subtracting it from\n\t\t\/\/ 10 leads to the score which also scales from 0 to 10 while 10 representing well balanced.\n\t\tdiff := math.Abs(cpuFraction - memoryFraction)\n\t\tscore = int(10 - diff*10)\n\t}\n\tglog.V(4).Infof(\n\t\t\"%v -> %v: Balanced Resource Allocation, Absolute\/Requested: (%d, %d) \/ (%d, %d) Score: (%d)\",\n\t\tpod.Name, node.Name,\n\t\ttotalMilliCPU, totalMemory,\n\t\tcapacityMilliCPU, capacityMemory,\n\t\tscore,\n\t)\n\n\treturn HostPriority{\n\t\thost: node.Name,\n\t\tscore: score,\n\t}\n}\n\nfunc fractionOfCapacity(requested, capacity int64, node string) float64 {\n\tif capacity == 0 {\n\t\treturn 1\n\t}\n\treturn float64(requested) \/ float64(capacity)\n}\nUpdate priorities.go\/*\nCopyright 2014 Google Inc. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage scheduler\n\nimport (\n\t\"math\"\n\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/labels\"\n\t\"github.com\/golang\/glog\"\n)\n\n\/\/ the unused capacity is calculated on a scale of 0-10\n\/\/ 0 being the lowest priority and 10 being the highest\nfunc calculateScore(requested, capacity int64, node string) int {\n\tif capacity == 0 {\n\t\treturn 0\n\t}\n\tif requested > capacity {\n\t\tglog.Infof(\"Combined requested resources from existing pods exceeds capacity on minion: %s\", node)\n\t\treturn 0\n\t}\n\treturn int(((capacity - requested) * 10) \/ capacity)\n}\n\n\/\/ Calculate the occupancy on a node. 'node' has information about the resources on the node.\n\/\/ 'pods' is a list of pods currently scheduled on the node.\nfunc calculateOccupancy(pod api.Pod, node api.Node, pods []api.Pod) HostPriority {\n\ttotalMilliCPU := int64(0)\n\ttotalMemory := int64(0)\n\tfor _, existingPod := range pods {\n\t\tfor _, container := range existingPod.Spec.Containers {\n\t\t\ttotalMilliCPU += container.Resources.Limits.Cpu().MilliValue()\n\t\t\ttotalMemory += container.Resources.Limits.Memory().Value()\n\t\t}\n\t}\n\t\/\/ Add the resources requested by the current pod being scheduled.\n\t\/\/ This also helps differentiate between differently sized, but empty, minions.\n\tfor _, container := range pod.Spec.Containers {\n\t\ttotalMilliCPU += container.Resources.Limits.Cpu().MilliValue()\n\t\ttotalMemory += container.Resources.Limits.Memory().Value()\n\t}\n\n\tcapacityMilliCPU := node.Status.Capacity.Cpu().MilliValue()\n\tcapacityMemory := node.Status.Capacity.Memory().Value()\n\n\tcpuScore := calculateScore(totalMilliCPU, capacityMilliCPU, node.Name)\n\tmemoryScore := calculateScore(totalMemory, capacityMemory, node.Name)\n\tglog.V(4).Infof(\n\t\t\"%v -> %v: Least Requested Priority, Absolute\/Requested: (%d, %d) \/ (%d, %d) Score: (%d, %d)\",\n\t\tpod.Name, node.Name,\n\t\ttotalMilliCPU, totalMemory,\n\t\tcapacityMilliCPU, capacityMemory,\n\t\tcpuScore, memoryScore,\n\t)\n\n\treturn HostPriority{\n\t\thost: node.Name,\n\t\tscore: int((cpuScore + memoryScore) \/ 2),\n\t}\n}\n\n\/\/ LeastRequestedPriority is a priority function that favors nodes with fewer requested resources.\n\/\/ It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes\n\/\/ based on the minimum of the average of the fraction of requested to capacity.\n\/\/ Details: (Sum(requested cpu) \/ Capacity + Sum(requested memory) \/ Capacity) * 50\nfunc LeastRequestedPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {\n\tnodes, err := minionLister.List()\n\tif err != nil {\n\t\treturn HostPriorityList{}, err\n\t}\n\tpodsToMachines, err := MapPodsToMachines(podLister)\n\n\tlist := HostPriorityList{}\n\tfor _, node := range nodes.Items {\n\t\tlist = append(list, calculateOccupancy(pod, node, podsToMachines[node.Name]))\n\t}\n\treturn list, nil\n}\n\ntype NodeLabelPrioritizer struct {\n\tlabel string\n\tpresence bool\n}\n\nfunc NewNodeLabelPriority(label string, presence bool) PriorityFunction {\n\tlabelPrioritizer := &NodeLabelPrioritizer{\n\t\tlabel: label,\n\t\tpresence: presence,\n\t}\n\treturn labelPrioritizer.CalculateNodeLabelPriority\n}\n\n\/\/ CalculateNodeLabelPriority checks whether a particular label exists on a minion or not, regardless of its value.\n\/\/ If presence is true, prioritizes minions that have the specified label, regardless of value.\n\/\/ If presence is false, prioritizes minions that do not have the specified label.\nfunc (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {\n\tvar score int\n\tminions, err := minionLister.List()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlabeledMinions := map[string]bool{}\n\tfor _, minion := range minions.Items {\n\t\texists := labels.Set(minion.Labels).Has(n.label)\n\t\tlabeledMinions[minion.Name] = (exists && n.presence) || (!exists && !n.presence)\n\t}\n\n\tresult := []HostPriority{}\n\t\/\/score int - scale of 0-10\n\t\/\/ 0 being the lowest priority and 10 being the highest\n\tfor minionName, success := range labeledMinions {\n\t\tif success {\n\t\t\tscore = 10\n\t\t} else {\n\t\t\tscore = 0\n\t\t}\n\t\tresult = append(result, HostPriority{host: minionName, score: score})\n\t}\n\treturn result, nil\n}\n\n\/\/ BalancedResourceAllocation favors nodes with balanced resource usage rate.\n\/\/ BalancedResourceAllocation should **NOT** be used alone, and **MUST** be used together with LeastRequestedPriority.\n\/\/ It calculates the difference between the cpu and memory fracion of capacity, and prioritizes the host based on how\n\/\/ close the two metrics are to each other.\n\/\/ Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:\n\/\/ \"Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization\"\nfunc BalancedResourceAllocation(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {\n\tnodes, err := minionLister.List()\n\tif err != nil {\n\t\treturn HostPriorityList{}, err\n\t}\n\tpodsToMachines, err := MapPodsToMachines(podLister)\n\n\tlist := HostPriorityList{}\n\tfor _, node := range nodes.Items {\n\t\tlist = append(list, calculateBalancedResourceAllocation(pod, node, podsToMachines[node.Name]))\n\t}\n\treturn list, nil\n}\n\nfunc calculateBalancedResourceAllocation(pod api.Pod, node api.Node, pods []api.Pod) HostPriority {\n\ttotalMilliCPU := int64(0)\n\ttotalMemory := int64(0)\n\tscore := int(0)\n\tfor _, existingPod := range pods {\n\t\tfor _, container := range existingPod.Spec.Containers {\n\t\t\ttotalMilliCPU += container.Resources.Limits.Cpu().MilliValue()\n\t\t\ttotalMemory += container.Resources.Limits.Memory().Value()\n\t\t}\n\t}\n\t\/\/ Add the resources requested by the current pod being scheduled.\n\t\/\/ This also helps differentiate between differently sized, but empty, minions.\n\tfor _, container := range pod.Spec.Containers {\n\t\ttotalMilliCPU += container.Resources.Limits.Cpu().MilliValue()\n\t\ttotalMemory += container.Resources.Limits.Memory().Value()\n\t}\n\n\tcapacityMilliCPU := node.Status.Capacity.Cpu().MilliValue()\n\tcapacityMemory := node.Status.Capacity.Memory().Value()\n\n\tcpuFraction := fractionOfCapacity(totalMilliCPU, capacityMilliCPU, node.Name)\n\tmemoryFraction := fractionOfCapacity(totalMemory, capacityMemory, node.Name)\n\tif cpuFraction >= 1 || memoryFraction >= 1 {\n\t\t\/\/ if requested >= capacity, the corresponding host should never be preferrred.\n\t\tscore = 0\n\t} else {\n\t\t\/\/ Upper and lower boundary of difference between cpuFraction and memoryFraction are -1 and 1\n\t\t\/\/ respectively. Multilying the absolute value of the difference by 10 scales the value to\n\t\t\/\/ 0-10 with 0 representing well balanced allocation and 10 poorly balanced. Subtracting it from\n\t\t\/\/ 10 leads to the score which also scales from 0 to 10 while 10 representing well balanced.\n\t\tdiff := math.Abs(cpuFraction - memoryFraction)\n\t\tscore = int(10 - diff*10)\n\t}\n\tglog.V(4).Infof(\n\t\t\"%v -> %v: Balanced Resource Allocation, Absolute\/Requested: (%d, %d) \/ (%d, %d) Score: (%d)\",\n\t\tpod.Name, node.Name,\n\t\ttotalMilliCPU, totalMemory,\n\t\tcapacityMilliCPU, capacityMemory,\n\t\tscore,\n\t)\n\n\treturn HostPriority{\n\t\thost: node.Name,\n\t\tscore: score,\n\t}\n}\n\nfunc fractionOfCapacity(requested, capacity int64, node string) float64 {\n\tif capacity == 0 {\n\t\treturn 1\n\t}\n\treturn float64(requested) \/ float64(capacity)\n}\n<|endoftext|>"} {"text":"package cli\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/bitrise-io\/bitrise\/bitrise\"\n\tmodels \"github.com\/bitrise-io\/bitrise\/models\/models_1_0_0\"\n\tenvmanModels \"github.com\/bitrise-io\/envman\/models\"\n\t\"github.com\/bitrise-io\/go-pathutil\/pathutil\"\n\t\"github.com\/bitrise-io\/goinp\/goinp\"\n\tstepmanModels \"github.com\/bitrise-io\/stepman\/models\"\n\t\"github.com\/codegangsta\/cli\"\n)\n\nconst (\n\tdefaultStepLibSource = \"https:\/\/bitbucket.org\/bitrise-team\/bitrise-new-steps-spec\"\n\t\/\/\n\tdefaultSecretsContent = `envs:\n- MY_HOME: $HOME\n- MY_SECRET_PASSWORD: XyZ\n opts:\n # You can include some options as well if you\n # want to change how the value is passed to a command.\n is_expand: no\n # For example you can use is_expand: no\n # if you want to make it sure that\n # the value is preserved as-it-is, and won't be\n # expanded before use.\n # For example if your password contains the dollar sign ($)\n # it would (by default) be expanded as an environment variable,\n # just like $HOME would be expanded\/replaced with your home\n # directory path.\n # You can prevent this with is_expand: no`\n)\n\nfunc doInit(c *cli.Context) {\n\tPrintBitriseHeaderASCIIArt()\n\n\tbitriseConfigFileRelPath := \".\/\" + DefaultBitriseConfigFileName\n\tbitriseSecretsFileRelPath := \".\/\" + DefaultSecretsFileName\n\n\tif exists, err := pathutil.IsPathExists(bitriseConfigFileRelPath); err != nil {\n\t\tlog.Fatalln(\"Error:\", err)\n\t} else if exists {\n\t\task := fmt.Sprintf(\"A config file already exists at %s - do you want to overwrite it?\", bitriseConfigFileRelPath)\n\t\tif val, err := goinp.AskForBool(ask); err != nil {\n\t\t\tlog.Fatalln(\"Error:\", err)\n\t\t} else if !val {\n\t\t\tlog.Infoln(\"Init canceled, existing file won't be overwritten.\")\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\n\tdefaultExpand := true\n\tprojectSettingsEnvs := []envmanModels.EnvironmentItemModel{}\n\tif val, err := goinp.AskForString(\"What's the BITRISE_PROJECT_TITLE?\"); err != nil {\n\t\tlog.Fatalln(err)\n\t} else {\n\t\tprojectTitleEnv := envmanModels.EnvironmentItemModel{\n\t\t\t\"BITRISE_PROJECT_TITLE\": val,\n\t\t\t\"opts\": envmanModels.EnvironmentItemOptionsModel{\n\t\t\t\tIsExpand: &defaultExpand,\n\t\t\t},\n\t\t}\n\t\tprojectSettingsEnvs = append(projectSettingsEnvs, projectTitleEnv)\n\t}\n\tif val, err := goinp.AskForString(\"What's your primary development branch's name?\"); err != nil {\n\t\tlog.Fatalln(err)\n\t} else {\n\t\tdevBranchEnv := envmanModels.EnvironmentItemModel{\n\t\t\t\"BITRISE_DEV_BRANCH\": val,\n\t\t\t\"opts\": envmanModels.EnvironmentItemOptionsModel{\n\t\t\t\tIsExpand: &defaultExpand,\n\t\t\t},\n\t\t}\n\t\tprojectSettingsEnvs = append(projectSettingsEnvs, devBranchEnv)\n\t}\n\n\t\/\/ TODO:\n\t\/\/ generate a couple of base steps\n\t\/\/ * timestamp gen\n\t\/\/ * bash script - hello world\n\n\tscriptStepTitle := \"Hello Bitrise!\"\n\tscriptStepContent := `#!\/bin\/bash\necho \"Welcome to Bitrise!\"`\n\tbitriseConf := models.BitriseDataModel{\n\t\tFormatVersion: c.App.Version,\n\t\tDefaultStepLibSource: defaultStepLibSource,\n\t\tApp: models.AppModel{\n\t\t\tEnvironments: projectSettingsEnvs,\n\t\t},\n\t\tWorkflows: map[string]models.WorkflowModel{\n\t\t\t\"primary\": models.WorkflowModel{\n\t\t\t\tSteps: []models.StepListItemModel{\n\t\t\t\t\tmodels.StepListItemModel{\n\t\t\t\t\t\t\"script\": stepmanModels.StepModel{\n\t\t\t\t\t\t\tTitle: &scriptStepTitle,\n\t\t\t\t\t\t\tInputs: []envmanModels.EnvironmentItemModel{\n\t\t\t\t\t\t\t\tenvmanModels.EnvironmentItemModel{\n\t\t\t\t\t\t\t\t\t\"content\": scriptStepContent,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif err := bitrise.SaveConfigToFile(bitriseConfigFileRelPath, bitriseConf); err != nil {\n\t\tlog.Fatalln(\"Failed to init the bitrise config file:\", err)\n\t} else {\n\t\tfmt.Println()\n\t\tfmt.Println(\"# NOTES about the \" + DefaultBitriseConfigFileName + \" config file:\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"We initialized a \" + DefaultBitriseConfigFileName + \" config file for you.\")\n\t\tfmt.Println(\"If you're in this folder you can use this config file\")\n\t\tfmt.Println(\" with bitrise automatically, you don't have to\")\n\t\tfmt.Println(\" specify it's path.\")\n\t\tfmt.Println()\n\t}\n\n\tif initialized, err := saveSecretsToFile(bitriseSecretsFileRelPath, defaultSecretsContent); err != nil {\n\t\tlog.Fatalln(\"Failed to init the secrets file:\", err)\n\t} else if initialized {\n\t\tfmt.Println()\n\t\tfmt.Println(\"# NOTES about the \" + DefaultSecretsFileName + \" secrets file:\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"We also created a \" + DefaultSecretsFileName + \" file\")\n\t\tfmt.Println(\" in this directory, to keep your passwords, absolute path configurations\")\n\t\tfmt.Println(\" and other secrets separate from your\")\n\t\tfmt.Println(\" main configuration file.\")\n\t\tfmt.Println(\"This way you can safely commit and share your configuration file\")\n\t\tfmt.Println(\" and ignore this secrets file, so nobody else will\")\n\t\tfmt.Println(\" know about your secrets.\")\n\t\tfmt.Println(\"You should NEVER commit this secrets file into your repository!!\")\n\t\tfmt.Println()\n\t}\n\n\tfmt.Println()\n\tfmt.Println(\"Hurray, you're good to go!\")\n\tfmt.Println(\"You can simply run:\")\n\tfmt.Println(\"-> bitrise run primary\")\n\tfmt.Println(\"to test the sample configuration (which contains\")\n\tfmt.Println(\"an example workflow called 'primary').\")\n\tfmt.Println()\n\tfmt.Println(\"Once you tested this sample setup you can\")\n\tfmt.Println(\" open the \" + DefaultBitriseConfigFileName + \" config file,\")\n\tfmt.Println(\" modify it and then run a workflow with:\")\n\tfmt.Println(\"-> bitrise run YOUR-WORKFLOW-NAME\")\n}\n\nfunc saveSecretsToFile(pth, secretsStr string) (bool, error) {\n\tif exists, err := pathutil.IsPathExists(pth); err != nil {\n\t\treturn false, err\n\t} else if exists {\n\t\task := fmt.Sprintf(\"A secrets file already exists at %s - do you want to overwrite it?\", pth)\n\t\tif val, err := goinp.AskForBool(ask); err != nil {\n\t\t\treturn false, err\n\t\t} else if !val {\n\t\t\tlog.Infoln(\"Init canceled, existing file (\" + pth + \") won't be overwritten.\")\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\tif err := bitrise.WriteStringToFile(pth, secretsStr); err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\ninit highligthpackage cli\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/bitrise-io\/bitrise\/bitrise\"\n\t\"github.com\/bitrise-io\/bitrise\/colorstring\"\n\tmodels \"github.com\/bitrise-io\/bitrise\/models\/models_1_0_0\"\n\tenvmanModels \"github.com\/bitrise-io\/envman\/models\"\n\t\"github.com\/bitrise-io\/go-pathutil\/pathutil\"\n\t\"github.com\/bitrise-io\/goinp\/goinp\"\n\tstepmanModels \"github.com\/bitrise-io\/stepman\/models\"\n\t\"github.com\/codegangsta\/cli\"\n)\n\nconst (\n\tdefaultStepLibSource = \"https:\/\/bitbucket.org\/bitrise-team\/bitrise-new-steps-spec\"\n\t\/\/\n\tdefaultSecretsContent = `envs:\n- MY_HOME: $HOME\n- MY_SECRET_PASSWORD: XyZ\n opts:\n # You can include some options as well if you\n # want to change how the value is passed to a command.\n is_expand: no\n # For example you can use is_expand: no\n # if you want to make it sure that\n # the value is preserved as-it-is, and won't be\n # expanded before use.\n # For example if your password contains the dollar sign ($)\n # it would (by default) be expanded as an environment variable,\n # just like $HOME would be expanded\/replaced with your home\n # directory path.\n # You can prevent this with is_expand: no`\n)\n\nfunc doInit(c *cli.Context) {\n\tPrintBitriseHeaderASCIIArt()\n\n\tbitriseConfigFileRelPath := \".\/\" + DefaultBitriseConfigFileName\n\tbitriseSecretsFileRelPath := \".\/\" + DefaultSecretsFileName\n\n\tif exists, err := pathutil.IsPathExists(bitriseConfigFileRelPath); err != nil {\n\t\tlog.Fatalln(\"Error:\", err)\n\t} else if exists {\n\t\task := fmt.Sprintf(\"A config file already exists at %s - do you want to overwrite it?\", bitriseConfigFileRelPath)\n\t\tif val, err := goinp.AskForBool(ask); err != nil {\n\t\t\tlog.Fatalln(\"Error:\", err)\n\t\t} else if !val {\n\t\t\tlog.Infoln(\"Init canceled, existing file won't be overwritten.\")\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\n\tdefaultExpand := true\n\tprojectSettingsEnvs := []envmanModels.EnvironmentItemModel{}\n\tif val, err := goinp.AskForString(\"What's the BITRISE_PROJECT_TITLE?\"); err != nil {\n\t\tlog.Fatalln(err)\n\t} else {\n\t\tprojectTitleEnv := envmanModels.EnvironmentItemModel{\n\t\t\t\"BITRISE_PROJECT_TITLE\": val,\n\t\t\t\"opts\": envmanModels.EnvironmentItemOptionsModel{\n\t\t\t\tIsExpand: &defaultExpand,\n\t\t\t},\n\t\t}\n\t\tprojectSettingsEnvs = append(projectSettingsEnvs, projectTitleEnv)\n\t}\n\tif val, err := goinp.AskForString(\"What's your primary development branch's name?\"); err != nil {\n\t\tlog.Fatalln(err)\n\t} else {\n\t\tdevBranchEnv := envmanModels.EnvironmentItemModel{\n\t\t\t\"BITRISE_DEV_BRANCH\": val,\n\t\t\t\"opts\": envmanModels.EnvironmentItemOptionsModel{\n\t\t\t\tIsExpand: &defaultExpand,\n\t\t\t},\n\t\t}\n\t\tprojectSettingsEnvs = append(projectSettingsEnvs, devBranchEnv)\n\t}\n\n\t\/\/ TODO:\n\t\/\/ generate a couple of base steps\n\t\/\/ * timestamp gen\n\t\/\/ * bash script - hello world\n\n\tscriptStepTitle := \"Hello Bitrise!\"\n\tscriptStepContent := `#!\/bin\/bash\necho \"Welcome to Bitrise!\"`\n\tbitriseConf := models.BitriseDataModel{\n\t\tFormatVersion: c.App.Version,\n\t\tDefaultStepLibSource: defaultStepLibSource,\n\t\tApp: models.AppModel{\n\t\t\tEnvironments: projectSettingsEnvs,\n\t\t},\n\t\tWorkflows: map[string]models.WorkflowModel{\n\t\t\t\"primary\": models.WorkflowModel{\n\t\t\t\tSteps: []models.StepListItemModel{\n\t\t\t\t\tmodels.StepListItemModel{\n\t\t\t\t\t\t\"script\": stepmanModels.StepModel{\n\t\t\t\t\t\t\tTitle: &scriptStepTitle,\n\t\t\t\t\t\t\tInputs: []envmanModels.EnvironmentItemModel{\n\t\t\t\t\t\t\t\tenvmanModels.EnvironmentItemModel{\n\t\t\t\t\t\t\t\t\t\"content\": scriptStepContent,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif err := bitrise.SaveConfigToFile(bitriseConfigFileRelPath, bitriseConf); err != nil {\n\t\tlog.Fatalln(\"Failed to init the bitrise config file:\", err)\n\t} else {\n\t\tfmt.Println()\n\t\tfmt.Println(\"# NOTES about the \" + DefaultBitriseConfigFileName + \" config file:\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"We initialized a \" + DefaultBitriseConfigFileName + \" config file for you.\")\n\t\tfmt.Println(\"If you're in this folder you can use this config file\")\n\t\tfmt.Println(\" with bitrise automatically, you don't have to\")\n\t\tfmt.Println(\" specify it's path.\")\n\t\tfmt.Println()\n\t}\n\n\tif initialized, err := saveSecretsToFile(bitriseSecretsFileRelPath, defaultSecretsContent); err != nil {\n\t\tlog.Fatalln(\"Failed to init the secrets file:\", err)\n\t} else if initialized {\n\t\tfmt.Println()\n\t\tfmt.Println(\"# NOTES about the \" + DefaultSecretsFileName + \" secrets file:\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"We also created a \" + DefaultSecretsFileName + \" file\")\n\t\tfmt.Println(\" in this directory, to keep your passwords, absolute path configurations\")\n\t\tfmt.Println(\" and other secrets separate from your\")\n\t\tfmt.Println(\" main configuration file.\")\n\t\tfmt.Println(\"This way you can safely commit and share your configuration file\")\n\t\tfmt.Println(\" and ignore this secrets file, so nobody else will\")\n\t\tfmt.Println(\" know about your secrets.\")\n\t\tfmt.Println(colorstring.Yellow(\"You should NEVER commit this secrets file into your repository!!\"))\n\t\tfmt.Println()\n\t}\n\n\tfmt.Println()\n\tfmt.Println(\"Hurray, you're good to go!\")\n\tfmt.Println(\"You can simply run:\")\n\tfmt.Println(\"-> bitrise run primary\")\n\tfmt.Println(\"to test the sample configuration (which contains\")\n\tfmt.Println(\"an example workflow called 'primary').\")\n\tfmt.Println()\n\tfmt.Println(\"Once you tested this sample setup you can\")\n\tfmt.Println(\" open the \" + DefaultBitriseConfigFileName + \" config file,\")\n\tfmt.Println(\" modify it and then run a workflow with:\")\n\tfmt.Println(\"-> bitrise run YOUR-WORKFLOW-NAME\")\n}\n\nfunc saveSecretsToFile(pth, secretsStr string) (bool, error) {\n\tif exists, err := pathutil.IsPathExists(pth); err != nil {\n\t\treturn false, err\n\t} else if exists {\n\t\task := fmt.Sprintf(\"A secrets file already exists at %s - do you want to overwrite it?\", pth)\n\t\tif val, err := goinp.AskForBool(ask); err != nil {\n\t\t\treturn false, err\n\t\t} else if !val {\n\t\t\tlog.Infoln(\"Init canceled, existing file (\" + pth + \") won't be overwritten.\")\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\tif err := bitrise.WriteStringToFile(pth, secretsStr); err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n<|endoftext|>"} {"text":"\/\/\n\/\/ Package ep is a collection of structures and functions for working with the EPrints REST API\n\/\/\n\/\/ @author R. S. Doiel, \n\/\/\n\/\/ Copyright (c) 2017, Caltech\n\/\/ All rights not granted herein are expressly reserved by Caltech.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n\/\/\n\/\/ 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n\/\/\n\/\/ 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and\/or other materials provided with the distribution.\n\/\/\n\/\/ 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\/\/\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\/\/ Caltech Library Packages\n\t\"github.com\/caltechlibrary\/cli\"\n\tep \"github.com\/caltechlibrary\/eprinttools\"\n)\n\nvar (\n\t\/\/ cli help text\n\tusage = `USAGE %s [OPTIONS] [EP_EPRINTS_URL|ONE_OR_MORE_EPRINT_ID]`\n\n\tdescription = `\nSYNOPSIS\n\n%s wraps the REST API for EPrints 3.3 or better. It can return a list \nof uri, a JSON view of the XML presentation as well as generates feeds \nand web pages.\n\nCONFIGURATION\n\nep can be configured with following environment variables\n\nEP_EPRINTS_URL the URL to your EPrints installation\n\nEP_DATASET the dataset and collection name for exporting, site building, and content retrieval`\n\n\texamples = `\nEXAMPLE\n\n %s -export all\n\nWould export the entire EPrints repository public content defined by the\nenvironment virables EP_API_URL, EP_DATASET.\n\n %s -export 2000\n\nWould export 2000 EPrints from the repository with the heighest ID values.\n\n %s -export-modified 2017-07-01\n\nWould export the EPrint records modified since July 1, 2017.\n\n %s -export-modified 2017-07-01,2017-07-31 \\\n -export-save-keys=july-keys.txt \n\nWould export the EPrint records with modified times in July 2017 and\nsave the keys for the records exported with one key per line. \n`\n\n\t\/\/ Standard Options\n\tshowHelp bool\n\tshowVersion bool\n\tshowLicense bool\n\tshowExamples bool\n\toutputFName string\n\tverbose bool\n\n\t\/\/ App Options\n\tuseAPI bool\n\tprettyPrint bool\n\n\tapiURL string\n\tdatasetName string\n\n\tupdatedSince string\n\texportEPrints string\n\texportEPrintsModified string\n\texportSaveKeys string\n\tfeedSize int\n\n\tauthMethod string\n\tuserName string\n\tuserSecret string\n)\n\nfunc init() {\n\t\/\/ Setup options\n\tfeedSize = ep.DefaultFeedSize\n\n\tflag.BoolVar(&showHelp, \"h\", false, \"display help\")\n\tflag.BoolVar(&showHelp, \"help\", false, \"display help\")\n\tflag.BoolVar(&showLicense, \"l\", false, \"display license\")\n\tflag.BoolVar(&showLicense, \"license\", false, \"display license\")\n\tflag.BoolVar(&showVersion, \"v\", false, \"display version\")\n\tflag.BoolVar(&showVersion, \"version\", false, \"display version\")\n\tflag.BoolVar(&showExamples, \"example\", false, \"display example(s)\")\n\tflag.StringVar(&outputFName, \"o\", \"\", \"output filename (logging)\")\n\tflag.StringVar(&outputFName, \"output\", \"\", \"output filename (logging)\")\n\tflag.BoolVar(&verbose, \"verbose\", true, \"verbose logging\")\n\n\t\/\/ App Specific options\n\tflag.StringVar(&authMethod, \"auth\", \"\", \"set the authentication method (e.g. none, basic, oauth, shib)\")\n\tflag.StringVar(&userName, \"username\", \"\", \"set the username\")\n\tflag.StringVar(&userName, \"un\", \"\", \"set the username\")\n\tflag.StringVar(&userSecret, \"pw\", \"\", \"set the password\")\n\n\tflag.StringVar(&apiURL, \"api\", \"\", \"url for EPrints API\")\n\tflag.StringVar(&datasetName, \"dataset\", \"\", \"dataset\/collection name\")\n\n\tflag.BoolVar(&prettyPrint, \"p\", false, \"pretty print JSON output\")\n\tflag.BoolVar(&prettyPrint, \"pretty\", false, \"pretty print JSON output\")\n\tflag.BoolVar(&useAPI, \"read-api\", false, \"read the contents from the API without saving in the database\")\n\tflag.StringVar(&exportEPrints, \"export\", \"\", \"export N EPrints from highest ID to lowest\")\n\tflag.StringVar(&exportEPrintsModified, \"export-modified\", \"\", \"export records by date or date range (e.g. 2017-07-01)\")\n\tflag.StringVar(&exportSaveKeys, \"export-save-keys\", \"\", \"save the keys exported in a file with provided filename\")\n\tflag.StringVar(&updatedSince, \"updated-since\", \"\", \"list EPrint IDs updated since a given date (e.g 2017-07-01)\")\n}\n\nfunc check(cfg *cli.Config, key, value string) string {\n\tif value == \"\" {\n\t\tlog.Fatalf(\"Missing %s_%s\", cfg.EnvPrefix, strings.ToUpper(key))\n\t\treturn \"\"\n\t}\n\treturn value\n}\n\nfunc main() {\n\tappName := path.Base(os.Args[0])\n\tflag.Parse()\n\targs := flag.Args()\n\n\t\/\/ Populate cfg from the environment\n\tcfg := cli.New(appName, \"EP\", ep.Version)\n\tcfg.LicenseText = fmt.Sprintf(ep.LicenseText, appName, ep.Version)\n\tcfg.UsageText = fmt.Sprintf(usage, appName)\n\tcfg.DescriptionText = fmt.Sprintf(description, appName, appName)\n\tcfg.OptionText = \"OPTIONS\"\n\tcfg.ExampleText = fmt.Sprintf(examples, appName, appName)\n\n\t\/\/ Handle the default options\n\tif showHelp == true {\n\t\tif len(args) > 0 {\n\t\t\tfmt.Println(cfg.Help(args...))\n\t\t} else {\n\t\t\tfmt.Println(cfg.Usage())\n\t\t}\n\t\tos.Exit(0)\n\t}\n\n\tif showExamples == true {\n\t\tif len(args) > 0 {\n\t\t\tfmt.Println(cfg.Example(args...))\n\t\t} else {\n\t\t\tfmt.Println(cfg.ExampleText)\n\t\t}\n\t\tos.Exit(0)\n\t}\n\n\tif showVersion == true {\n\t\tfmt.Println(cfg.Version())\n\t\tos.Exit(0)\n\t}\n\tif showLicense == true {\n\t\tfmt.Println(cfg.License())\n\t\tos.Exit(0)\n\t}\n\n\tout, err := cli.Create(outputFName, os.Stdout)\n\tif err != nil {\n\t\tfmt.Fprint(os.Stderr, \"%s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tdefer cli.CloseFile(outputFName, out)\n\n\t\/\/ Log to out\n\tlog.SetOutput(out)\n\n\t\/\/ Required configuration\n\tapiURL = check(cfg, \"eprint_url\", cfg.MergeEnv(\"eprint_url\", apiURL))\n\tdatasetName = check(cfg, \"dataset\", cfg.MergeEnv(\"dataset\", datasetName))\n\n\t\/\/ Optional configuration\n\tauthMethod = cfg.MergeEnv(\"auth_method\", authMethod)\n\tuserName = cfg.MergeEnv(\"username\", userName)\n\tuserSecret = cfg.MergeEnv(\"password\", userSecret)\n\n\t\/\/ This will read in any settings from the environment\n\tapi, err := ep.New(cfg)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif exportEPrints != \"\" {\n\t\tt0 := time.Now()\n\t\texportNo := -1\n\t\tif exportEPrints != \"all\" {\n\t\t\texportNo, err = strconv.Atoi(exportEPrints)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Export count should be %q or an integer, %s\", exportEPrints, err)\n\t\t\t}\n\t\t}\n\t\tlog.Printf(\"%s %s (pid %d)\", appName, ep.Version, os.Getpid())\n\t\tlog.Printf(\"Export started, %s\", t0)\n\t\tif err := api.ExportEPrints(exportNo, exportSaveKeys, verbose); err != nil {\n\t\t\tlog.Printf(\"%s\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tlog.Printf(\"Export completed, running time %s\", time.Now().Sub(t0))\n\t\tos.Exit(0)\n\t}\n\tif exportEPrintsModified != \"\" {\n\t\ts := exportEPrintsModified\n\t\te := time.Now().Format(\"2006-01-02\")\n\t\tif strings.Contains(s, \",\") {\n\t\t\tp := strings.SplitN(s, \",\", 2)\n\t\t\ts, e = p[0], p[1]\n\t\t}\n\t\tstart, err := time.Parse(\"2006-01-02\", s)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tend, err := time.Parse(\"2006-01-02\", e)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tt0 := time.Now()\n\t\tlog.Printf(\"%s %s (pid %d)\", appName, ep.Version, os.Getpid())\n\t\tlog.Printf(\"Export from %s to %s, started %s\", start.Format(\"2006-01-02\"), end.Format(\"2006-01-02\"), t0.Format(\"2006-01-02 15:04:05 MST\"))\n\t\tif err := api.ExportModifiedEPrints(start, end, exportSaveKeys, verbose); err != nil {\n\t\t\tlog.Printf(\"%s\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tlog.Printf(\"Export completed, running time %s\", time.Now().Sub(t0))\n\t\tos.Exit(0)\n\t}\n\n\t\/\/\n\t\/\/ Generate JSON output\n\t\/\/\n\tvar (\n\t\tsrc []byte\n\t\tdata interface{}\n\t)\n\tswitch {\n\tcase updatedSince != \"\":\n\t\t\/\/ date should be formatted YYYY-MM-DD, 2006-01-02\n\t\tend := time.Now()\n\t\tstart, err := time.Parse(\"2006-01-02\", updatedSince)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"updated since %q, %s\", updatedSince, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tdata, err = api.ListModifiedEPrintURI(start, end, verbose)\n\tcase useAPI == true:\n\t\tif len(args) == 1 {\n\t\t\tdata, _, err = api.GetEPrint(args[0])\n\t\t} else {\n\t\t\tdata, err = api.ListEPrintsURI()\n\t\t}\n\tdefault:\n\t\tif len(args) == 1 {\n\t\t\tdata, err = api.Get(args[0])\n\t\t} else if len(args) > 1 {\n\t\t\trecords := []*ep.Record{}\n\t\t\tfor _, id := range args {\n\t\t\t\tif rec, err := api.Get(id); err == nil {\n\t\t\t\t\trecords = append(records, rec)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"Can't read EPrint id %s, %s\\n\", id, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tdata = records\n\t\t} else {\n\t\t\tdata, err = api.ListID(0, -1)\n\t\t}\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif prettyPrint == true {\n\t\tsrc, _ = json.MarshalIndent(data, \"\", \" \")\n\t} else {\n\t\tsrc, _ = json.Marshal(data)\n\t}\n\tfmt.Printf(\"%s\", src)\n}\nfixed formatting\/\/\n\/\/ Package ep is a collection of structures and functions for working with the EPrints REST API\n\/\/\n\/\/ @author R. S. Doiel, \n\/\/\n\/\/ Copyright (c) 2017, Caltech\n\/\/ All rights not granted herein are expressly reserved by Caltech.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n\/\/\n\/\/ 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n\/\/\n\/\/ 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and\/or other materials provided with the distribution.\n\/\/\n\/\/ 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\/\/\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\/\/ Caltech Library Packages\n\t\"github.com\/caltechlibrary\/cli\"\n\tep \"github.com\/caltechlibrary\/eprinttools\"\n)\n\nvar (\n\t\/\/ cli help text\n\tusage = `USAGE %s [OPTIONS] [EP_EPRINTS_URL|ONE_OR_MORE_EPRINT_ID]`\n\n\tdescription = `\nSYNOPSIS\n\n%s wraps the REST API for EPrints 3.3 or better. It can return a list \nof uri, a JSON view of the XML presentation as well as generates feeds \nand web pages.\n\nCONFIGURATION\n\nep can be configured with following environment variables\n\nEP_EPRINTS_URL the URL to your EPrints installation\n\nEP_DATASET the dataset and collection name for exporting, site building, and content retrieval`\n\n\texamples = `\nEXAMPLE\n\n %s -export all\n\nWould export the entire EPrints repository public content defined by the\nenvironment virables EP_API_URL, EP_DATASET.\n\n %s -export 2000\n\nWould export 2000 EPrints from the repository with the heighest ID values.\n\n %s -export-modified 2017-07-01\n\nWould export the EPrint records modified since July 1, 2017.\n\n %s -export-modified 2017-07-01,2017-07-31 \\\n -export-save-keys=july-keys.txt \n\nWould export the EPrint records with modified times in July 2017 and\nsave the keys for the records exported with one key per line. \n`\n\n\t\/\/ Standard Options\n\tshowHelp bool\n\tshowVersion bool\n\tshowLicense bool\n\tshowExamples bool\n\toutputFName string\n\tverbose bool\n\n\t\/\/ App Options\n\tuseAPI bool\n\tprettyPrint bool\n\n\tapiURL string\n\tdatasetName string\n\n\tupdatedSince string\n\texportEPrints string\n\texportEPrintsModified string\n\texportSaveKeys string\n\tfeedSize int\n\n\tauthMethod string\n\tuserName string\n\tuserSecret string\n)\n\nfunc init() {\n\t\/\/ Setup options\n\tfeedSize = ep.DefaultFeedSize\n\n\tflag.BoolVar(&showHelp, \"h\", false, \"display help\")\n\tflag.BoolVar(&showHelp, \"help\", false, \"display help\")\n\tflag.BoolVar(&showLicense, \"l\", false, \"display license\")\n\tflag.BoolVar(&showLicense, \"license\", false, \"display license\")\n\tflag.BoolVar(&showVersion, \"v\", false, \"display version\")\n\tflag.BoolVar(&showVersion, \"version\", false, \"display version\")\n\tflag.BoolVar(&showExamples, \"example\", false, \"display example(s)\")\n\tflag.StringVar(&outputFName, \"o\", \"\", \"output filename (logging)\")\n\tflag.StringVar(&outputFName, \"output\", \"\", \"output filename (logging)\")\n\tflag.BoolVar(&verbose, \"verbose\", true, \"verbose logging\")\n\n\t\/\/ App Specific options\n\tflag.StringVar(&authMethod, \"auth\", \"\", \"set the authentication method (e.g. none, basic, oauth, shib)\")\n\tflag.StringVar(&userName, \"username\", \"\", \"set the username\")\n\tflag.StringVar(&userName, \"un\", \"\", \"set the username\")\n\tflag.StringVar(&userSecret, \"pw\", \"\", \"set the password\")\n\n\tflag.StringVar(&apiURL, \"api\", \"\", \"url for EPrints API\")\n\tflag.StringVar(&datasetName, \"dataset\", \"\", \"dataset\/collection name\")\n\n\tflag.BoolVar(&prettyPrint, \"p\", false, \"pretty print JSON output\")\n\tflag.BoolVar(&prettyPrint, \"pretty\", false, \"pretty print JSON output\")\n\tflag.BoolVar(&useAPI, \"read-api\", false, \"read the contents from the API without saving in the database\")\n\tflag.StringVar(&exportEPrints, \"export\", \"\", \"export N EPrints from highest ID to lowest\")\n\tflag.StringVar(&exportEPrintsModified, \"export-modified\", \"\", \"export records by date or date range (e.g. 2017-07-01)\")\n\tflag.StringVar(&exportSaveKeys, \"export-save-keys\", \"\", \"save the keys exported in a file with provided filename\")\n\tflag.StringVar(&updatedSince, \"updated-since\", \"\", \"list EPrint IDs updated since a given date (e.g 2017-07-01)\")\n}\n\nfunc check(cfg *cli.Config, key, value string) string {\n\tif value == \"\" {\n\t\tlog.Fatalf(\"Missing %s_%s\", cfg.EnvPrefix, strings.ToUpper(key))\n\t\treturn \"\"\n\t}\n\treturn value\n}\n\nfunc main() {\n\tappName := path.Base(os.Args[0])\n\tflag.Parse()\n\targs := flag.Args()\n\n\t\/\/ Populate cfg from the environment\n\tcfg := cli.New(appName, \"EP\", ep.Version)\n\tcfg.LicenseText = fmt.Sprintf(ep.LicenseText, appName, ep.Version)\n\tcfg.UsageText = fmt.Sprintf(usage, appName)\n\tcfg.DescriptionText = fmt.Sprintf(description, appName, appName)\n\tcfg.OptionText = \"OPTIONS\"\n\tcfg.ExampleText = fmt.Sprintf(examples, appName, appName, appName, appName)\n\n\t\/\/ Handle the default options\n\tif showHelp == true {\n\t\tif len(args) > 0 {\n\t\t\tfmt.Println(cfg.Help(args...))\n\t\t} else {\n\t\t\tfmt.Println(cfg.Usage())\n\t\t}\n\t\tos.Exit(0)\n\t}\n\n\tif showExamples == true {\n\t\tif len(args) > 0 {\n\t\t\tfmt.Println(cfg.Example(args...))\n\t\t} else {\n\t\t\tfmt.Println(cfg.ExampleText)\n\t\t}\n\t\tos.Exit(0)\n\t}\n\n\tif showVersion == true {\n\t\tfmt.Println(cfg.Version())\n\t\tos.Exit(0)\n\t}\n\tif showLicense == true {\n\t\tfmt.Println(cfg.License())\n\t\tos.Exit(0)\n\t}\n\n\tout, err := cli.Create(outputFName, os.Stdout)\n\tif err != nil {\n\t\tfmt.Fprint(os.Stderr, \"%s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tdefer cli.CloseFile(outputFName, out)\n\n\t\/\/ Log to out\n\tlog.SetOutput(out)\n\n\t\/\/ Required configuration\n\tapiURL = check(cfg, \"eprint_url\", cfg.MergeEnv(\"eprint_url\", apiURL))\n\tdatasetName = check(cfg, \"dataset\", cfg.MergeEnv(\"dataset\", datasetName))\n\n\t\/\/ Optional configuration\n\tauthMethod = cfg.MergeEnv(\"auth_method\", authMethod)\n\tuserName = cfg.MergeEnv(\"username\", userName)\n\tuserSecret = cfg.MergeEnv(\"password\", userSecret)\n\n\t\/\/ This will read in any settings from the environment\n\tapi, err := ep.New(cfg)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif exportEPrints != \"\" {\n\t\tt0 := time.Now()\n\t\texportNo := -1\n\t\tif exportEPrints != \"all\" {\n\t\t\texportNo, err = strconv.Atoi(exportEPrints)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Export count should be %q or an integer, %s\", exportEPrints, err)\n\t\t\t}\n\t\t}\n\t\tlog.Printf(\"%s %s (pid %d)\", appName, ep.Version, os.Getpid())\n\t\tlog.Printf(\"Export started, %s\", t0)\n\t\tif err := api.ExportEPrints(exportNo, exportSaveKeys, verbose); err != nil {\n\t\t\tlog.Printf(\"%s\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tlog.Printf(\"Export completed, running time %s\", time.Now().Sub(t0))\n\t\tos.Exit(0)\n\t}\n\tif exportEPrintsModified != \"\" {\n\t\ts := exportEPrintsModified\n\t\te := time.Now().Format(\"2006-01-02\")\n\t\tif strings.Contains(s, \",\") {\n\t\t\tp := strings.SplitN(s, \",\", 2)\n\t\t\ts, e = p[0], p[1]\n\t\t}\n\t\tstart, err := time.Parse(\"2006-01-02\", s)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tend, err := time.Parse(\"2006-01-02\", e)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tt0 := time.Now()\n\t\tlog.Printf(\"%s %s (pid %d)\", appName, ep.Version, os.Getpid())\n\t\tlog.Printf(\"Export from %s to %s, started %s\", start.Format(\"2006-01-02\"), end.Format(\"2006-01-02\"), t0.Format(\"2006-01-02 15:04:05 MST\"))\n\t\tif err := api.ExportModifiedEPrints(start, end, exportSaveKeys, verbose); err != nil {\n\t\t\tlog.Printf(\"%s\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tlog.Printf(\"Export completed, running time %s\", time.Now().Sub(t0))\n\t\tos.Exit(0)\n\t}\n\n\t\/\/\n\t\/\/ Generate JSON output\n\t\/\/\n\tvar (\n\t\tsrc []byte\n\t\tdata interface{}\n\t)\n\tswitch {\n\tcase updatedSince != \"\":\n\t\t\/\/ date should be formatted YYYY-MM-DD, 2006-01-02\n\t\tend := time.Now()\n\t\tstart, err := time.Parse(\"2006-01-02\", updatedSince)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"updated since %q, %s\", updatedSince, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tdata, err = api.ListModifiedEPrintURI(start, end, verbose)\n\tcase useAPI == true:\n\t\tif len(args) == 1 {\n\t\t\tdata, _, err = api.GetEPrint(args[0])\n\t\t} else {\n\t\t\tdata, err = api.ListEPrintsURI()\n\t\t}\n\tdefault:\n\t\tif len(args) == 1 {\n\t\t\tdata, err = api.Get(args[0])\n\t\t} else if len(args) > 1 {\n\t\t\trecords := []*ep.Record{}\n\t\t\tfor _, id := range args {\n\t\t\t\tif rec, err := api.Get(id); err == nil {\n\t\t\t\t\trecords = append(records, rec)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"Can't read EPrint id %s, %s\\n\", id, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tdata = records\n\t\t} else {\n\t\t\tdata, err = api.ListID(0, -1)\n\t\t}\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif prettyPrint == true {\n\t\tsrc, _ = json.MarshalIndent(data, \"\", \" \")\n\t} else {\n\t\tsrc, _ = json.Marshal(data)\n\t}\n\tfmt.Printf(\"%s\", src)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright (c) 2017 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage matcher\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/m3db\/m3cluster\/client\"\n\t\"github.com\/m3db\/m3cluster\/kv\"\n\t\"github.com\/m3db\/m3metrics\/filters\"\n\t\"github.com\/m3db\/m3metrics\/metric\/id\"\n\t\"github.com\/m3db\/m3metrics\/metric\/id\/m3\"\n\t\"github.com\/m3db\/m3metrics\/policy\"\n\t\"github.com\/m3db\/m3metrics\/rules\"\n\t\"github.com\/m3db\/m3x\/clock\"\n\t\"github.com\/m3db\/m3x\/instrument\"\n\t\"github.com\/m3db\/m3x\/pool\"\n)\n\n\/\/ Configuration is config used to create a Matcher.\ntype Configuration struct {\n\tInitWatchTimeout time.Duration `yaml:\"initWatchTimeout\"`\n\tRulesKVConfig kv.Configuration `yaml:\"rulesKVConfig\"`\n\tNamespacesKey string `yaml:\"namespacesKey\" validate:\"nonzero\"`\n\tRuleSetKeyFmt string `yaml:\"ruleSetKeyFmt\" validate:\"nonzero\"`\n\tNamespaceTag string `yaml:\"namespaceTag\" validate:\"nonzero\"`\n\tDefaultNamespace string `yaml:\"defaultNamespace\" validate:\"nonzero\"`\n\tNameTagKey string `yaml:\"nameTagKey\" validate:\"nonzero\"`\n\tMatchRangePast *time.Duration `yaml:\"matchRangePast\"`\n\tSortedTagIteratorPool pool.ObjectPoolConfiguration `yaml:\"sortedTagIteratorPool\"`\n\tAggregationTypes policy.AggregationTypesConfiguration `yaml:\"aggregationTypes\"`\n}\n\n\/\/ NewNamespaces creates a matcher.Namespaces.\nfunc (cfg *Configuration) NewNamespaces(\n\tkvCluster client.Client,\n\tclockOpts clock.Options,\n\tinstrumentOpts instrument.Options,\n) (Namespaces, error) {\n\topts, err := cfg.NewOptions(kvCluster, clockOpts, instrumentOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnamespaces := NewNamespaces(opts.NamespacesKey(), opts)\n\treturn namespaces, nil\n}\n\n\/\/ NewMatcher creates a Matcher.\nfunc (cfg *Configuration) NewMatcher(\n\tcache Cache,\n\tkvCluster client.Client,\n\tclockOpts clock.Options,\n\tinstrumentOpts instrument.Options,\n) (Matcher, error) {\n\topts, err := cfg.NewOptions(kvCluster, clockOpts, instrumentOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewMatcher(cache, opts)\n}\n\n\/\/ NewOptions creates a Options.\nfunc (cfg *Configuration) NewOptions(\n\tkvCluster client.Client,\n\tclockOpts clock.Options,\n\tinstrumentOpts instrument.Options,\n) (Options, error) {\n\t\/\/ Configure rules kv store.\n\tkvOpts, err := cfg.RulesKVConfig.NewOptions()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := kvOpts.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\trulesStore, err := kvCluster.Store(kvOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Configure rules options.\n\tscope := instrumentOpts.MetricsScope().SubScope(\"sorted-tag-iterator-pool\")\n\tpoolOpts := cfg.SortedTagIteratorPool.NewObjectPoolOptions(instrumentOpts.SetMetricsScope(scope))\n\tsortedTagIteratorPool := id.NewSortedTagIteratorPool(poolOpts)\n\tsortedTagIteratorPool.Init(func() id.SortedTagIterator {\n\t\treturn m3.NewPooledSortedTagIterator(nil, sortedTagIteratorPool)\n\t})\n\tsortedTagIteratorFn := func(tagPairs []byte) id.SortedTagIterator {\n\t\tit := sortedTagIteratorPool.Get()\n\t\tit.Reset(tagPairs)\n\t\treturn it\n\t}\n\ttagsFilterOptions := filters.TagsFilterOptions{\n\t\tNameTagKey: []byte(cfg.NameTagKey),\n\t\tNameAndTagsFn: m3.NameAndTags,\n\t\tSortedTagIteratorFn: sortedTagIteratorFn,\n\t}\n\n\tisRollupIDFn := func(name []byte, tags []byte) bool {\n\t\treturn m3.IsRollupID(name, tags, sortedTagIteratorPool)\n\t}\n\n\taggTypeOpts, err := cfg.AggregationTypes.NewOptions(instrumentOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\truleSetOpts := rules.NewOptions().\n\t\tSetTagsFilterOptions(tagsFilterOptions).\n\t\tSetNewRollupIDFn(m3.NewRollupID).\n\t\tSetIsRollupIDFn(isRollupIDFn).\n\t\tSetAggregationTypesOptions(aggTypeOpts)\n\n\t\/\/ Configure ruleset key function.\n\truleSetKeyFn := func(namespace []byte) string {\n\t\treturn fmt.Sprintf(cfg.RuleSetKeyFmt, namespace)\n\t}\n\n\topts := NewOptions().\n\t\tSetClockOptions(clockOpts).\n\t\tSetInstrumentOptions(instrumentOpts).\n\t\tSetRuleSetOptions(ruleSetOpts).\n\t\tSetKVStore(rulesStore).\n\t\tSetNamespacesKey(cfg.NamespacesKey).\n\t\tSetRuleSetKeyFn(ruleSetKeyFn).\n\t\tSetNamespaceTag([]byte(cfg.NamespaceTag)).\n\t\tSetDefaultNamespace([]byte(cfg.DefaultNamespace))\n\n\tif cfg.InitWatchTimeout != 0 {\n\t\topts = opts.SetInitWatchTimeout(cfg.InitWatchTimeout)\n\t}\n\tif cfg.MatchRangePast != nil {\n\t\topts = opts.SetMatchRangePast(*cfg.MatchRangePast)\n\t}\n\n\treturn opts, nil\n}\nRemove options validation since it's done inside m3cluster (#118)\/\/ Copyright (c) 2017 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage matcher\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/m3db\/m3cluster\/client\"\n\t\"github.com\/m3db\/m3cluster\/kv\"\n\t\"github.com\/m3db\/m3metrics\/filters\"\n\t\"github.com\/m3db\/m3metrics\/metric\/id\"\n\t\"github.com\/m3db\/m3metrics\/metric\/id\/m3\"\n\t\"github.com\/m3db\/m3metrics\/policy\"\n\t\"github.com\/m3db\/m3metrics\/rules\"\n\t\"github.com\/m3db\/m3x\/clock\"\n\t\"github.com\/m3db\/m3x\/instrument\"\n\t\"github.com\/m3db\/m3x\/pool\"\n)\n\n\/\/ Configuration is config used to create a Matcher.\ntype Configuration struct {\n\tInitWatchTimeout time.Duration `yaml:\"initWatchTimeout\"`\n\tRulesKVConfig kv.Configuration `yaml:\"rulesKVConfig\"`\n\tNamespacesKey string `yaml:\"namespacesKey\" validate:\"nonzero\"`\n\tRuleSetKeyFmt string `yaml:\"ruleSetKeyFmt\" validate:\"nonzero\"`\n\tNamespaceTag string `yaml:\"namespaceTag\" validate:\"nonzero\"`\n\tDefaultNamespace string `yaml:\"defaultNamespace\" validate:\"nonzero\"`\n\tNameTagKey string `yaml:\"nameTagKey\" validate:\"nonzero\"`\n\tMatchRangePast *time.Duration `yaml:\"matchRangePast\"`\n\tSortedTagIteratorPool pool.ObjectPoolConfiguration `yaml:\"sortedTagIteratorPool\"`\n\tAggregationTypes policy.AggregationTypesConfiguration `yaml:\"aggregationTypes\"`\n}\n\n\/\/ NewNamespaces creates a matcher.Namespaces.\nfunc (cfg *Configuration) NewNamespaces(\n\tkvCluster client.Client,\n\tclockOpts clock.Options,\n\tinstrumentOpts instrument.Options,\n) (Namespaces, error) {\n\topts, err := cfg.NewOptions(kvCluster, clockOpts, instrumentOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnamespaces := NewNamespaces(opts.NamespacesKey(), opts)\n\treturn namespaces, nil\n}\n\n\/\/ NewMatcher creates a Matcher.\nfunc (cfg *Configuration) NewMatcher(\n\tcache Cache,\n\tkvCluster client.Client,\n\tclockOpts clock.Options,\n\tinstrumentOpts instrument.Options,\n) (Matcher, error) {\n\topts, err := cfg.NewOptions(kvCluster, clockOpts, instrumentOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewMatcher(cache, opts)\n}\n\n\/\/ NewOptions creates a Options.\nfunc (cfg *Configuration) NewOptions(\n\tkvCluster client.Client,\n\tclockOpts clock.Options,\n\tinstrumentOpts instrument.Options,\n) (Options, error) {\n\t\/\/ Configure rules kv store.\n\tkvOpts, err := cfg.RulesKVConfig.NewOptions()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trulesStore, err := kvCluster.Store(kvOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Configure rules options.\n\tscope := instrumentOpts.MetricsScope().SubScope(\"sorted-tag-iterator-pool\")\n\tpoolOpts := cfg.SortedTagIteratorPool.NewObjectPoolOptions(instrumentOpts.SetMetricsScope(scope))\n\tsortedTagIteratorPool := id.NewSortedTagIteratorPool(poolOpts)\n\tsortedTagIteratorPool.Init(func() id.SortedTagIterator {\n\t\treturn m3.NewPooledSortedTagIterator(nil, sortedTagIteratorPool)\n\t})\n\tsortedTagIteratorFn := func(tagPairs []byte) id.SortedTagIterator {\n\t\tit := sortedTagIteratorPool.Get()\n\t\tit.Reset(tagPairs)\n\t\treturn it\n\t}\n\ttagsFilterOptions := filters.TagsFilterOptions{\n\t\tNameTagKey: []byte(cfg.NameTagKey),\n\t\tNameAndTagsFn: m3.NameAndTags,\n\t\tSortedTagIteratorFn: sortedTagIteratorFn,\n\t}\n\n\tisRollupIDFn := func(name []byte, tags []byte) bool {\n\t\treturn m3.IsRollupID(name, tags, sortedTagIteratorPool)\n\t}\n\n\taggTypeOpts, err := cfg.AggregationTypes.NewOptions(instrumentOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\truleSetOpts := rules.NewOptions().\n\t\tSetTagsFilterOptions(tagsFilterOptions).\n\t\tSetNewRollupIDFn(m3.NewRollupID).\n\t\tSetIsRollupIDFn(isRollupIDFn).\n\t\tSetAggregationTypesOptions(aggTypeOpts)\n\n\t\/\/ Configure ruleset key function.\n\truleSetKeyFn := func(namespace []byte) string {\n\t\treturn fmt.Sprintf(cfg.RuleSetKeyFmt, namespace)\n\t}\n\n\topts := NewOptions().\n\t\tSetClockOptions(clockOpts).\n\t\tSetInstrumentOptions(instrumentOpts).\n\t\tSetRuleSetOptions(ruleSetOpts).\n\t\tSetKVStore(rulesStore).\n\t\tSetNamespacesKey(cfg.NamespacesKey).\n\t\tSetRuleSetKeyFn(ruleSetKeyFn).\n\t\tSetNamespaceTag([]byte(cfg.NamespaceTag)).\n\t\tSetDefaultNamespace([]byte(cfg.DefaultNamespace))\n\n\tif cfg.InitWatchTimeout != 0 {\n\t\topts = opts.SetInitWatchTimeout(cfg.InitWatchTimeout)\n\t}\n\tif cfg.MatchRangePast != nil {\n\t\topts = opts.SetMatchRangePast(*cfg.MatchRangePast)\n\t}\n\n\treturn opts, nil\n}\n<|endoftext|>"} {"text":"package cli\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"gopkg.in\/yaml.v2\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/bitrise-io\/bitrise-cli\/bitrise\"\n\tmodels \"github.com\/bitrise-io\/bitrise-cli\/models\/models_1_0_0\"\n\t\"github.com\/bitrise-io\/go-pathutil\/pathutil\"\n\t\"github.com\/bitrise-io\/goinp\/goinp\"\n\t\"github.com\/codegangsta\/cli\"\n)\n\nvar defaultSecretsContent = `envs:\n- MY_HOME: $HOME\n- MY_SECRET_PASSWORD: XyZ\n is_expand: no\n # Hint: You can use is_expand: no\n # if you want to make it sure that\n # the value is preserved as-it-is, and won't be\n # expanded before use.\n # For example if your password contains the dollar sign ($)\n # it would (by default) be expanded as an environment variable.\n # You can prevent this with is_expand: no`\n\nfunc doInit(c *cli.Context) {\n\tbitriseConfigFileRelPath := \".\/\" + DefaultBitriseConfigFileName\n\tbitriseSecretsFileRelPath := \".\/\" + DefaultSecretsFileName\n\n\tif exists, err := pathutil.IsPathExists(bitriseConfigFileRelPath); err != nil {\n\t\tlog.Fatalln(\"Error:\", err)\n\t} else if exists {\n\t\task := fmt.Sprintf(\"A config file already exists at %s - do you want to overwrite it?\", bitriseConfigFileRelPath)\n\t\tif val, err := goinp.AskForBool(ask); err != nil {\n\t\t\tlog.Fatalln(\"Error:\", err)\n\t\t} else if !val {\n\t\t\tlog.Infoln(\"Init canceled, existing file won't be overwritten.\")\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\n\tdefaultExpand := true\n\tprojectSettingsEnvs := []models.InputModel{}\n\tif val, err := goinp.AskForString(\"What's the BITRISE_PROJECT_TITLE?\"); err != nil {\n\t\tlog.Fatalln(err)\n\t} else {\n\t\tprojectTitleEnv := models.InputModel{MappedTo: \"BITRISE_PROJECT_TITLE\", Value: val, IsExpand: &defaultExpand}\n\t\tprojectSettingsEnvs = append(projectSettingsEnvs, projectTitleEnv)\n\t}\n\tif val, err := goinp.AskForString(\"What's your primary development branch's name?\"); err != nil {\n\t\tlog.Fatalln(err)\n\t} else {\n\t\tdevBranchEnv := models.InputModel{MappedTo: \"BITRISE_DEV_BRANCH\", Value: val, IsExpand: &defaultExpand}\n\t\tprojectSettingsEnvs = append(projectSettingsEnvs, devBranchEnv)\n\t}\n\n\t\/\/ TODO:\n\t\/\/ generate a couple of base steps\n\t\/\/ * timestamp gen\n\t\/\/ * bash script - hello world\n\n\tbitriseConf := models.BitriseConfigModel{\n\t\tFormatVersion: \"1.0.0\", \/\/ TODO: move this into a project config file!\n\t\tApp: models.AppModel{\n\t\t\tEnvironments: projectSettingsEnvs,\n\t\t},\n\t\tWorkflows: map[string]models.WorkflowModel{\n\t\t\t\"primary\": models.WorkflowModel{},\n\t\t},\n\t}\n\n\tif err := saveConfigToFile(bitriseConfigFileRelPath, bitriseConf); err != nil {\n\t\tlog.Fatalln(\"Failed to init the bitrise config file:\", err)\n\t} else {\n\t\tfmt.Println()\n\t\tfmt.Println(\"# NOTES about the \" + DefaultBitriseConfigFileName + \" config file:\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"We initialized a \" + DefaultBitriseConfigFileName + \" config file for you.\")\n\t\tfmt.Println(\"If you're in this folder you can use this config file\")\n\t\tfmt.Println(\" with bitrise-cli automatically, you don't have to\")\n\t\tfmt.Println(\" specify it's path.\")\n\t\tfmt.Println()\n\t}\n\n\tif initialized, err := saveSecretsToFile(bitriseSecretsFileRelPath, defaultSecretsContent); err != nil {\n\t\tlog.Fatalln(\"Failed to init the secrets file:\", err)\n\t} else if initialized {\n\t\tfmt.Println()\n\t\tfmt.Println(\"# NOTES about the \" + DefaultSecretsFileName + \" secrets file:\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"We also created a \" + DefaultSecretsFileName + \" file\")\n\t\tfmt.Println(\" in this directory, to keep your passwords, absolute path configurations\")\n\t\tfmt.Println(\" and other secrets separate from your\")\n\t\tfmt.Println(\" main configuration file.\")\n\t\tfmt.Println(\"This way you can safely commit and share your configuration file\")\n\t\tfmt.Println(\" and ignore this secrets file, so nobody else will\")\n\t\tfmt.Println(\" know about your secrets.\")\n\t\tfmt.Println(\"You should NEVER commit this secrets file into your repository!!\")\n\t\tfmt.Println()\n\t}\n\n\tfmt.Println()\n\tfmt.Println(\"Hurray, you're good to go!\")\n\tfmt.Println(\"You can simply run:\")\n\tfmt.Println(\"-> bitrise-cli run primary\")\n\tfmt.Println(\"to test the sample configuration (which contains\")\n\tfmt.Println(\"an example workflow called 'primary').\")\n\tfmt.Println()\n\tfmt.Println(\"Once you tested this sample setup you can\")\n\tfmt.Println(\" open the \" + DefaultBitriseConfigFileName + \" config file,\")\n\tfmt.Println(\" modify it and then run a workflow with:\")\n\tfmt.Println(\"-> bitrise-cli run YOUR-WORKFLOW-NAME\")\n}\n\nfunc saveSecretsToFile(pth, secretsStr string) (bool, error) {\n\tif exists, err := pathutil.IsPathExists(pth); err != nil {\n\t\treturn false, err\n\t} else if exists {\n\t\task := fmt.Sprintf(\"A secrets file already exists at %s - do you want to overwrite it?\", pth)\n\t\tif val, err := goinp.AskForBool(ask); err != nil {\n\t\t\treturn false, err\n\t\t} else if !val {\n\t\t\tlog.Infoln(\"Init canceled, existing file (\" + pth + \") won't be overwritten.\")\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\tif err := bitrise.WriteStringToFile(pth, secretsStr); err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\nfunc saveConfigToFile(pth string, bitriseConf models.BitriseConfigModel) error {\n\tcontBytes, err := generateYAML(bitriseConf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := bitrise.WriteBytesToFile(pth, contBytes); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Println()\n\tlog.Infoln(\"=> Init success!\")\n\tlog.Infoln(\"File created at path:\", pth)\n\tlog.Infoln(\"With the content:\")\n\tlog.Infoln(string(contBytes))\n\n\treturn nil\n}\n\nfunc generateYAML(v interface{}) ([]byte, error) {\n\tbytes, err := yaml.Marshal(v)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\treturn bytes, nil\n}\ncode stylepackage cli\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"gopkg.in\/yaml.v2\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/bitrise-io\/bitrise-cli\/bitrise\"\n\tmodels \"github.com\/bitrise-io\/bitrise-cli\/models\/models_1_0_0\"\n\t\"github.com\/bitrise-io\/go-pathutil\/pathutil\"\n\t\"github.com\/bitrise-io\/goinp\/goinp\"\n\t\"github.com\/codegangsta\/cli\"\n)\n\nvar defaultSecretsContent = `envs:\n- MY_HOME: $HOME\n- MY_SECRET_PASSWORD: XyZ\n is_expand: no\n # Hint: You can use is_expand: no\n # if you want to make it sure that\n # the value is preserved as-it-is, and won't be\n # expanded before use.\n # For example if your password contains the dollar sign ($)\n # it would (by default) be expanded as an environment variable.\n # You can prevent this with is_expand: no`\n\nfunc doInit(c *cli.Context) {\n\tbitriseConfigFileRelPath := \".\/\" + DefaultBitriseConfigFileName\n\tbitriseSecretsFileRelPath := \".\/\" + DefaultSecretsFileName\n\n\tif exists, err := pathutil.IsPathExists(bitriseConfigFileRelPath); err != nil {\n\t\tlog.Fatalln(\"Error:\", err)\n\t} else if exists {\n\t\task := fmt.Sprintf(\"A config file already exists at %s - do you want to overwrite it?\", bitriseConfigFileRelPath)\n\t\tif val, err := goinp.AskForBool(ask); err != nil {\n\t\t\tlog.Fatalln(\"Error:\", err)\n\t\t} else if !val {\n\t\t\tlog.Infoln(\"Init canceled, existing file won't be overwritten.\")\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\n\tdefaultExpand := true\n\tprojectSettingsEnvs := []models.InputModel{}\n\tif val, err := goinp.AskForString(\"What's the BITRISE_PROJECT_TITLE?\"); err != nil {\n\t\tlog.Fatalln(err)\n\t} else {\n\t\tprojectTitleEnv := models.InputModel{\n\t\t\tMappedTo: \"BITRISE_PROJECT_TITLE\",\n\t\t\tValue: val,\n\t\t\tIsExpand: &defaultExpand,\n\t\t}\n\t\tprojectSettingsEnvs = append(projectSettingsEnvs, projectTitleEnv)\n\t}\n\tif val, err := goinp.AskForString(\"What's your primary development branch's name?\"); err != nil {\n\t\tlog.Fatalln(err)\n\t} else {\n\t\tdevBranchEnv := models.InputModel{\n\t\t\tMappedTo: \"BITRISE_DEV_BRANCH\",\n\t\t\tValue: val,\n\t\t\tIsExpand: &defaultExpand,\n\t\t}\n\t\tprojectSettingsEnvs = append(projectSettingsEnvs, devBranchEnv)\n\t}\n\n\t\/\/ TODO:\n\t\/\/ generate a couple of base steps\n\t\/\/ * timestamp gen\n\t\/\/ * bash script - hello world\n\n\tbitriseConf := models.BitriseConfigModel{\n\t\tFormatVersion: \"1.0.0\", \/\/ TODO: move this into a project config file!\n\t\tApp: models.AppModel{\n\t\t\tEnvironments: projectSettingsEnvs,\n\t\t},\n\t\tWorkflows: map[string]models.WorkflowModel{\n\t\t\t\"primary\": models.WorkflowModel{},\n\t\t},\n\t}\n\n\tif err := saveConfigToFile(bitriseConfigFileRelPath, bitriseConf); err != nil {\n\t\tlog.Fatalln(\"Failed to init the bitrise config file:\", err)\n\t} else {\n\t\tfmt.Println()\n\t\tfmt.Println(\"# NOTES about the \" + DefaultBitriseConfigFileName + \" config file:\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"We initialized a \" + DefaultBitriseConfigFileName + \" config file for you.\")\n\t\tfmt.Println(\"If you're in this folder you can use this config file\")\n\t\tfmt.Println(\" with bitrise-cli automatically, you don't have to\")\n\t\tfmt.Println(\" specify it's path.\")\n\t\tfmt.Println()\n\t}\n\n\tif initialized, err := saveSecretsToFile(bitriseSecretsFileRelPath, defaultSecretsContent); err != nil {\n\t\tlog.Fatalln(\"Failed to init the secrets file:\", err)\n\t} else if initialized {\n\t\tfmt.Println()\n\t\tfmt.Println(\"# NOTES about the \" + DefaultSecretsFileName + \" secrets file:\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"We also created a \" + DefaultSecretsFileName + \" file\")\n\t\tfmt.Println(\" in this directory, to keep your passwords, absolute path configurations\")\n\t\tfmt.Println(\" and other secrets separate from your\")\n\t\tfmt.Println(\" main configuration file.\")\n\t\tfmt.Println(\"This way you can safely commit and share your configuration file\")\n\t\tfmt.Println(\" and ignore this secrets file, so nobody else will\")\n\t\tfmt.Println(\" know about your secrets.\")\n\t\tfmt.Println(\"You should NEVER commit this secrets file into your repository!!\")\n\t\tfmt.Println()\n\t}\n\n\tfmt.Println()\n\tfmt.Println(\"Hurray, you're good to go!\")\n\tfmt.Println(\"You can simply run:\")\n\tfmt.Println(\"-> bitrise-cli run primary\")\n\tfmt.Println(\"to test the sample configuration (which contains\")\n\tfmt.Println(\"an example workflow called 'primary').\")\n\tfmt.Println()\n\tfmt.Println(\"Once you tested this sample setup you can\")\n\tfmt.Println(\" open the \" + DefaultBitriseConfigFileName + \" config file,\")\n\tfmt.Println(\" modify it and then run a workflow with:\")\n\tfmt.Println(\"-> bitrise-cli run YOUR-WORKFLOW-NAME\")\n}\n\nfunc saveSecretsToFile(pth, secretsStr string) (bool, error) {\n\tif exists, err := pathutil.IsPathExists(pth); err != nil {\n\t\treturn false, err\n\t} else if exists {\n\t\task := fmt.Sprintf(\"A secrets file already exists at %s - do you want to overwrite it?\", pth)\n\t\tif val, err := goinp.AskForBool(ask); err != nil {\n\t\t\treturn false, err\n\t\t} else if !val {\n\t\t\tlog.Infoln(\"Init canceled, existing file (\" + pth + \") won't be overwritten.\")\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\tif err := bitrise.WriteStringToFile(pth, secretsStr); err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\nfunc saveConfigToFile(pth string, bitriseConf models.BitriseConfigModel) error {\n\tcontBytes, err := generateYAML(bitriseConf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := bitrise.WriteBytesToFile(pth, contBytes); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Println()\n\tlog.Infoln(\"=> Init success!\")\n\tlog.Infoln(\"File created at path:\", pth)\n\tlog.Infoln(\"With the content:\")\n\tlog.Infoln(string(contBytes))\n\n\treturn nil\n}\n\nfunc generateYAML(v interface{}) ([]byte, error) {\n\tbytes, err := yaml.Marshal(v)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\treturn bytes, nil\n}\n<|endoftext|>"} {"text":"\/**\n * Copyright (C) 2015 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\npackage cmds\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"path\/filepath\"\n\n\t\"github.com\/fabric8io\/gofabric8\/util\"\n\t\"github.com\/kardianos\/osext\"\n\t\"github.com\/spf13\/cobra\"\n\tclient \"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n\tcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n)\n\nconst (\n\tmemory = \"memory\"\n\tvmDriver = \"vm-driver\"\n\tcpus = \"cpus\"\n\tconsole = \"console\"\n\tipaas = \"ipaas\"\n\tdiskSize = \"disk-size\"\n)\n\n\/\/ NewCmdStart starts a local cloud environment\nfunc NewCmdStart(f *cmdutil.Factory) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"start\",\n\t\tShort: \"Starts a local cloud development environment\",\n\t\tLong: `Starts a local cloud development environment`,\n\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\t\tflag := cmd.Flags().Lookup(minishift)\n\t\t\tisOpenshift := false\n\t\t\tif flag != nil {\n\t\t\t\tisOpenshift = flag.Value.String() == \"true\"\n\t\t\t}\n\n\t\t\tflag = cmd.Flags().Lookup(ipaas)\n\t\t\tisIPaaS := false\n\t\t\tif flag != nil && flag.Value.String() == \"true\" {\n\t\t\t\tisOpenshift = true\n\t\t\t\tisIPaaS = true\n\t\t\t}\n\n\t\t\tif !isInstalled(isOpenshift) {\n\t\t\t\tinstall(isOpenshift)\n\t\t\t}\n\t\t\tkubeBinary := minikube\n\t\t\tif isOpenshift {\n\t\t\t\tkubeBinary = minishift\n\t\t\t}\n\n\t\t\tif runtime.GOOS == \"windows\" && !strings.HasSuffix(kubeBinary, \".exe\") {\n\t\t\t\tkubeBinary += \".exe\"\n\t\t\t}\n\n\t\t\tbinaryFile := resolveBinaryLocation(kubeBinary)\n\n\t\t\t\/\/ check if already running\n\t\t\tout, err := exec.Command(binaryFile, \"status\").Output()\n\t\t\tif err != nil {\n\t\t\t\tutil.Fatalf(\"Unable to get status %v\", err)\n\t\t\t}\n\n\t\t\tif err == nil && strings.Contains(string(out), \"Running\") {\n\t\t\t\t\/\/ already running\n\t\t\t\tutil.Successf(\"%s already running\\n\", kubeBinary)\n\n\t\t\t\tkubectlBinaryFile := resolveBinaryLocation(kubectl)\n\n\t\t\t\t\/\/ setting context\n\t\t\t\tif kubeBinary == minikube {\n\t\t\t\t\te := exec.Command(kubectlBinaryFile, \"config\", \"use-context\", kubeBinary)\n\t\t\t\t\te.Stdout = os.Stdout\n\t\t\t\t\te.Stderr = os.Stderr\n\t\t\t\t\terr = e.Run()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tutil.Errorf(\"Unable to start %v\", err)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ minishift context has changed, we need to work it out now\n\t\t\t\t\tutil.Info(\"minishift is already running, you can switch to the context\\n\")\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\targs := []string{\"start\"}\n\n\t\t\t\tvmDriverValue := cmd.Flags().Lookup(vmDriver).Value.String()\n\t\t\t\tif len(vmDriverValue) == 0 {\n\t\t\t\t\tswitch runtime.GOOS {\n\t\t\t\t\tcase \"darwin\":\n\t\t\t\t\t\tvmDriverValue = \"xhyve\"\n\t\t\t\t\tcase \"windows\":\n\t\t\t\t\t\tvmDriverValue = \"hyperv\"\n\t\t\t\t\tcase \"linux\":\n\t\t\t\t\t\tvmDriverValue = \"kvm\"\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tvmDriverValue = \"virtualbox\"\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t\targs = append(args, \"--vm-driver=\"+vmDriverValue)\n\n\t\t\t\t\/\/ set memory flag\n\t\t\t\tmemoryValue := cmd.Flags().Lookup(memory).Value.String()\n\t\t\t\targs = append(args, \"--memory=\"+memoryValue)\n\n\t\t\t\t\/\/ set cpu flag\n\t\t\t\tcpusValue := cmd.Flags().Lookup(cpus).Value.String()\n\t\t\t\targs = append(args, \"--cpus=\"+cpusValue)\n\n\t\t\t\t\/\/ set disk-size flag\n\t\t\t\tdiskSizeValue := cmd.Flags().Lookup(diskSize).Value.String()\n\t\t\t\targs = append(args, \"--disk-size=\"+diskSizeValue)\n\n\t\t\t\t\/\/ start the local VM\n\t\t\t\tlogCommand(binaryFile, args)\n\t\t\t\te := exec.Command(binaryFile, args...)\n\t\t\t\te.Stdout = os.Stdout\n\t\t\t\te.Stderr = os.Stderr\n\t\t\t\terr = e.Run()\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.Errorf(\"Unable to start %v\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif isOpenshift {\n\t\t\t\t\/\/ deploy fabric8\n\t\t\t\te := exec.Command(\"oc\", \"login\", \"--username=\"+minishiftDefaultUsername, \"--password=\"+minishiftDefaultPassword)\n\t\t\t\te.Stdout = os.Stdout\n\t\t\t\te.Stderr = os.Stderr\n\t\t\t\terr = e.Run()\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.Errorf(\"Unable to login %v\", err)\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t\/\/ now check that fabric8 is running, if not deploy it\n\t\t\tc, err := keepTryingToGetClient(f)\n\t\t\tif err != nil {\n\t\t\t\tutil.Fatalf(\"Unable to connect to %s %v\", kubeBinary, err)\n\t\t\t}\n\n\t\t\t\/\/ deploy fabric8 if its not already running\n\t\t\tns, _, _ := f.DefaultNamespace()\n\t\t\t_, err = c.Services(ns).Get(\"fabric8\")\n\t\t\tif err != nil {\n\n\t\t\t\t\/\/ deploy fabric8\n\t\t\t\td := GetDefaultFabric8Deployment()\n\t\t\t\tflag := cmd.Flags().Lookup(console)\n\t\t\t\tif isIPaaS {\n\t\t\t\t\td.packageName = \"ipaas\"\n\t\t\t\t} else if flag != nil && flag.Value.String() == \"true\" {\n\t\t\t\t\td.packageName = \"console\"\n\t\t\t\t} else {\n\t\t\t\t\td.packageName = cmd.Flags().Lookup(packageFlag).Value.String()\n\t\t\t\t}\n\t\t\t\td.versionPlatform = cmd.Flags().Lookup(versionPlatformFlag).Value.String()\n\t\t\t\td.versioniPaaS = cmd.Flags().Lookup(versioniPaaSFlag).Value.String()\n\t\t\t\td.pv = cmd.Flags().Lookup(pvFlag).Value.String() == \"true\"\n\t\t\t\td.useIngress = cmd.Flags().Lookup(useIngressFlag).Value.String() == \"true\"\n\t\t\t\td.useLoadbalancer = cmd.Flags().Lookup(useLoadbalancerFlag).Value.String() == \"true\"\n\t\t\t\tdeploy(f, d)\n\n\t\t\t} else {\n\t\t\t\topenService(ns, \"fabric8\", c, false)\n\t\t\t}\n\t\t},\n\t}\n\tcmd.PersistentFlags().BoolP(minishift, \"\", false, \"start the openshift flavour of Kubernetes\")\n\tcmd.PersistentFlags().BoolP(console, \"\", false, \"start only the fabric8 console\")\n\tcmd.PersistentFlags().BoolP(ipaas, \"\", false, \"start the fabric8 iPaaS\")\n\tcmd.PersistentFlags().StringP(memory, \"\", \"6144\", \"amount of RAM allocated to the VM\")\n\tcmd.PersistentFlags().StringP(vmDriver, \"\", \"\", \"the VM driver used to spin up the VM. Possible values (hyperv, xhyve, kvm, virtualbox, vmwarefusion)\")\n\tcmd.PersistentFlags().StringP(diskSize, \"\", \"20g\", \"the size of the disk allocated to the VM\")\n\tcmd.PersistentFlags().StringP(cpus, \"\", \"1\", \"number of CPUs allocated to the VM\")\n\tcmd.PersistentFlags().String(packageFlag, \"platform\", \"The name of the package to startup such as 'platform', 'console', 'ipaas'. Otherwise specify a URL or local file of the YAML to install\")\n\tcmd.PersistentFlags().String(versionPlatformFlag, \"latest\", \"The version to use for the Fabric8 Platform packages\")\n\tcmd.PersistentFlags().String(versioniPaaSFlag, \"latest\", \"The version to use for the Fabric8 iPaaS templates\")\n\tcmd.PersistentFlags().Bool(pvFlag, true, \"if false will convert deployments to use Kubernetes emptyDir and disable persistence for core apps\")\n\tcmd.PersistentFlags().Bool(useIngressFlag, true, \"Should Ingress NGINX controller be enabled by default when deploying to Kubernetes?\")\n\tcmd.PersistentFlags().Bool(useLoadbalancerFlag, false, \"Should Cloud Provider LoadBalancer be used to expose services when running to Kubernetes? (overrides ingress)\")\n\treturn cmd\n}\n\nfunc logCommand(executable string, args []string) {\n\tutil.Infof(\"running: %s %s\\n\", executable, strings.Join(args, \" \"))\n}\n\n\/\/ lets find the executable on the PATH or in the fabric8 directory\nfunc resolveBinaryLocation(executable string) string {\n\tpath, err := exec.LookPath(executable)\n\tif err != nil || fileNotExist(path) {\n\t\thome := os.Getenv(\"HOME\")\n\t\tif home == \"\" {\n\t\t\tutil.Error(\"No $HOME environment variable found\")\n\t\t}\n\t\twriteFileLocation := getFabric8BinLocation()\n\n\t\t\/\/ lets try in the fabric8 folder\n\t\tpath = filepath.Join(writeFileLocation, executable)\n\t\tif fileNotExist(path) {\n\t\t\tpath = executable\n\t\t\t\/\/ lets try in the folder where we found the gofabric8 executable\n\t\t\tfolder, err := osext.ExecutableFolder()\n\t\t\tif err != nil {\n\t\t\t\tutil.Errorf(\"Failed to find executable folder: %v\\n\", err)\n\t\t\t} else {\n\t\t\t\tpath = filepath.Join(folder, executable)\n\t\t\t\tif fileNotExist(path) {\n\t\t\t\t\tutil.Infof(\"Could not find executable at %v\\n\", path)\n\t\t\t\t\tpath = executable\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tutil.Infof(\"using the executable %s\\n\", path)\n\treturn path\n}\n\nfunc findExecutable(file string) error {\n\td, err := os.Stat(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif m := d.Mode(); !m.IsDir() {\n\t\treturn nil\n\t}\n\treturn os.ErrPermission\n}\n\nfunc fileNotExist(path string) bool {\n\treturn findExecutable(path) != nil\n}\n\nfunc keepTryingToGetClient(f *cmdutil.Factory) (*client.Client, error) {\n\ttimeout := time.After(2 * time.Minute)\n\ttick := time.Tick(1 * time.Second)\n\t\/\/ Keep trying until we're timed out or got a result or got an error\n\tfor {\n\t\tselect {\n\t\t\/\/ Got a timeout! fail with a timeout error\n\t\tcase <-timeout:\n\t\t\treturn nil, errors.New(\"timed out\")\n\t\t\/\/ Got a tick, try and get teh client\n\t\tcase <-tick:\n\t\t\tc, _ := getClient(f)\n\t\t\t\/\/ return if we have a client\n\t\t\tif c != nil {\n\t\t\t\treturn c, nil\n\t\t\t}\n\t\t\tutil.Info(\"Cannot connect to api server, retrying...\\n\")\n\t\t\t\/\/ retry\n\t\t}\n\t}\n}\n\nfunc getClient(f *cmdutil.Factory) (*client.Client, error) {\n\tvar err error\n\tcfg, err := f.ClientConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc, err := client.New(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\nlets allow the opening of the console to be optional\/**\n * Copyright (C) 2015 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\npackage cmds\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"path\/filepath\"\n\n\t\"github.com\/fabric8io\/gofabric8\/util\"\n\t\"github.com\/kardianos\/osext\"\n\t\"github.com\/spf13\/cobra\"\n\tclient \"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n\tcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n)\n\nconst (\n\tmemory = \"memory\"\n\tvmDriver = \"vm-driver\"\n\tcpus = \"cpus\"\n\tconsole = \"console\"\n\tipaas = \"ipaas\"\n\tdiskSize = \"disk-size\"\n\n\topenConsoleFlag = \"open-console\"\n)\n\n\/\/ NewCmdStart starts a local cloud environment\nfunc NewCmdStart(f *cmdutil.Factory) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"start\",\n\t\tShort: \"Starts a local cloud development environment\",\n\t\tLong: `Starts a local cloud development environment`,\n\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\t\tflag := cmd.Flags().Lookup(minishift)\n\t\t\tisOpenshift := false\n\t\t\tif flag != nil {\n\t\t\t\tisOpenshift = flag.Value.String() == \"true\"\n\t\t\t}\n\n\t\t\tflag = cmd.Flags().Lookup(ipaas)\n\t\t\tisIPaaS := false\n\t\t\tif flag != nil && flag.Value.String() == \"true\" {\n\t\t\t\tisOpenshift = true\n\t\t\t\tisIPaaS = true\n\t\t\t}\n\n\t\t\tif !isInstalled(isOpenshift) {\n\t\t\t\tinstall(isOpenshift)\n\t\t\t}\n\t\t\tkubeBinary := minikube\n\t\t\tif isOpenshift {\n\t\t\t\tkubeBinary = minishift\n\t\t\t}\n\n\t\t\tif runtime.GOOS == \"windows\" && !strings.HasSuffix(kubeBinary, \".exe\") {\n\t\t\t\tkubeBinary += \".exe\"\n\t\t\t}\n\n\t\t\tbinaryFile := resolveBinaryLocation(kubeBinary)\n\n\t\t\t\/\/ check if already running\n\t\t\tout, err := exec.Command(binaryFile, \"status\").Output()\n\t\t\tif err != nil {\n\t\t\t\tutil.Fatalf(\"Unable to get status %v\", err)\n\t\t\t}\n\n\t\t\tif err == nil && strings.Contains(string(out), \"Running\") {\n\t\t\t\t\/\/ already running\n\t\t\t\tutil.Successf(\"%s already running\\n\", kubeBinary)\n\n\t\t\t\tkubectlBinaryFile := resolveBinaryLocation(kubectl)\n\n\t\t\t\t\/\/ setting context\n\t\t\t\tif kubeBinary == minikube {\n\t\t\t\t\te := exec.Command(kubectlBinaryFile, \"config\", \"use-context\", kubeBinary)\n\t\t\t\t\te.Stdout = os.Stdout\n\t\t\t\t\te.Stderr = os.Stderr\n\t\t\t\t\terr = e.Run()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tutil.Errorf(\"Unable to start %v\", err)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ minishift context has changed, we need to work it out now\n\t\t\t\t\tutil.Info(\"minishift is already running, you can switch to the context\\n\")\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\targs := []string{\"start\"}\n\n\t\t\t\tvmDriverValue := cmd.Flags().Lookup(vmDriver).Value.String()\n\t\t\t\tif len(vmDriverValue) == 0 {\n\t\t\t\t\tswitch runtime.GOOS {\n\t\t\t\t\tcase \"darwin\":\n\t\t\t\t\t\tvmDriverValue = \"xhyve\"\n\t\t\t\t\tcase \"windows\":\n\t\t\t\t\t\tvmDriverValue = \"hyperv\"\n\t\t\t\t\tcase \"linux\":\n\t\t\t\t\t\tvmDriverValue = \"kvm\"\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tvmDriverValue = \"virtualbox\"\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t\targs = append(args, \"--vm-driver=\"+vmDriverValue)\n\n\t\t\t\t\/\/ set memory flag\n\t\t\t\tmemoryValue := cmd.Flags().Lookup(memory).Value.String()\n\t\t\t\targs = append(args, \"--memory=\"+memoryValue)\n\n\t\t\t\t\/\/ set cpu flag\n\t\t\t\tcpusValue := cmd.Flags().Lookup(cpus).Value.String()\n\t\t\t\targs = append(args, \"--cpus=\"+cpusValue)\n\n\t\t\t\t\/\/ set disk-size flag\n\t\t\t\tdiskSizeValue := cmd.Flags().Lookup(diskSize).Value.String()\n\t\t\t\targs = append(args, \"--disk-size=\"+diskSizeValue)\n\n\t\t\t\t\/\/ start the local VM\n\t\t\t\tlogCommand(binaryFile, args)\n\t\t\t\te := exec.Command(binaryFile, args...)\n\t\t\t\te.Stdout = os.Stdout\n\t\t\t\te.Stderr = os.Stderr\n\t\t\t\terr = e.Run()\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.Errorf(\"Unable to start %v\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif isOpenshift {\n\t\t\t\t\/\/ deploy fabric8\n\t\t\t\te := exec.Command(\"oc\", \"login\", \"--username=\"+minishiftDefaultUsername, \"--password=\"+minishiftDefaultPassword)\n\t\t\t\te.Stdout = os.Stdout\n\t\t\t\te.Stderr = os.Stderr\n\t\t\t\terr = e.Run()\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.Errorf(\"Unable to login %v\", err)\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t\/\/ now check that fabric8 is running, if not deploy it\n\t\t\tc, err := keepTryingToGetClient(f)\n\t\t\tif err != nil {\n\t\t\t\tutil.Fatalf(\"Unable to connect to %s %v\", kubeBinary, err)\n\t\t\t}\n\n\t\t\t\/\/ deploy fabric8 if its not already running\n\t\t\tns, _, _ := f.DefaultNamespace()\n\t\t\t_, err = c.Services(ns).Get(\"fabric8\")\n\t\t\tif err != nil {\n\n\t\t\t\t\/\/ deploy fabric8\n\t\t\t\td := GetDefaultFabric8Deployment()\n\t\t\t\tflag := cmd.Flags().Lookup(console)\n\t\t\t\tif isIPaaS {\n\t\t\t\t\td.packageName = \"ipaas\"\n\t\t\t\t} else if flag != nil && flag.Value.String() == \"true\" {\n\t\t\t\t\td.packageName = \"console\"\n\t\t\t\t} else {\n\t\t\t\t\td.packageName = cmd.Flags().Lookup(packageFlag).Value.String()\n\t\t\t\t}\n\t\t\t\td.versionPlatform = cmd.Flags().Lookup(versionPlatformFlag).Value.String()\n\t\t\t\td.versioniPaaS = cmd.Flags().Lookup(versioniPaaSFlag).Value.String()\n\t\t\t\td.pv = cmd.Flags().Lookup(pvFlag).Value.String() == \"true\"\n\t\t\t\td.useIngress = cmd.Flags().Lookup(useIngressFlag).Value.String() == \"true\"\n\t\t\t\td.useLoadbalancer = cmd.Flags().Lookup(useLoadbalancerFlag).Value.String() == \"true\"\n\t\t\t\tdeploy(f, d)\n\n\t\t\t} else {\n\t\t\t\tflag := cmd.Flags().Lookup(openConsoleFlag)\n\t\t\t\tif flag != nil && flag.Value.String() == \"true\" {\n\t\t\t\t\topenService(ns, \"fabric8\", c, false)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\tcmd.PersistentFlags().BoolP(minishift, \"\", false, \"start the openshift flavour of Kubernetes\")\n\tcmd.PersistentFlags().BoolP(console, \"\", false, \"start only the fabric8 console\")\n\tcmd.PersistentFlags().BoolP(ipaas, \"\", false, \"start the fabric8 iPaaS\")\n\tcmd.PersistentFlags().StringP(memory, \"\", \"6144\", \"amount of RAM allocated to the VM\")\n\tcmd.PersistentFlags().StringP(vmDriver, \"\", \"\", \"the VM driver used to spin up the VM. Possible values (hyperv, xhyve, kvm, virtualbox, vmwarefusion)\")\n\tcmd.PersistentFlags().StringP(diskSize, \"\", \"20g\", \"the size of the disk allocated to the VM\")\n\tcmd.PersistentFlags().StringP(cpus, \"\", \"1\", \"number of CPUs allocated to the VM\")\n\tcmd.PersistentFlags().String(packageFlag, \"platform\", \"The name of the package to startup such as 'platform', 'console', 'ipaas'. Otherwise specify a URL or local file of the YAML to install\")\n\tcmd.PersistentFlags().String(versionPlatformFlag, \"latest\", \"The version to use for the Fabric8 Platform packages\")\n\tcmd.PersistentFlags().String(versioniPaaSFlag, \"latest\", \"The version to use for the Fabric8 iPaaS templates\")\n\tcmd.PersistentFlags().Bool(pvFlag, true, \"if false will convert deployments to use Kubernetes emptyDir and disable persistence for core apps\")\n\tcmd.PersistentFlags().Bool(useIngressFlag, true, \"Should Ingress NGINX controller be enabled by default when deploying to Kubernetes?\")\n\tcmd.PersistentFlags().Bool(useLoadbalancerFlag, false, \"Should Cloud Provider LoadBalancer be used to expose services when running to Kubernetes? (overrides ingress)\")\n\tcmd.PersistentFlags().Bool(openConsoleFlag, true, \"Should we wait an open the console?\")\n\treturn cmd\n}\n\nfunc logCommand(executable string, args []string) {\n\tutil.Infof(\"running: %s %s\\n\", executable, strings.Join(args, \" \"))\n}\n\n\/\/ lets find the executable on the PATH or in the fabric8 directory\nfunc resolveBinaryLocation(executable string) string {\n\tpath, err := exec.LookPath(executable)\n\tif err != nil || fileNotExist(path) {\n\t\thome := os.Getenv(\"HOME\")\n\t\tif home == \"\" {\n\t\t\tutil.Error(\"No $HOME environment variable found\")\n\t\t}\n\t\twriteFileLocation := getFabric8BinLocation()\n\n\t\t\/\/ lets try in the fabric8 folder\n\t\tpath = filepath.Join(writeFileLocation, executable)\n\t\tif fileNotExist(path) {\n\t\t\tpath = executable\n\t\t\t\/\/ lets try in the folder where we found the gofabric8 executable\n\t\t\tfolder, err := osext.ExecutableFolder()\n\t\t\tif err != nil {\n\t\t\t\tutil.Errorf(\"Failed to find executable folder: %v\\n\", err)\n\t\t\t} else {\n\t\t\t\tpath = filepath.Join(folder, executable)\n\t\t\t\tif fileNotExist(path) {\n\t\t\t\t\tutil.Infof(\"Could not find executable at %v\\n\", path)\n\t\t\t\t\tpath = executable\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tutil.Infof(\"using the executable %s\\n\", path)\n\treturn path\n}\n\nfunc findExecutable(file string) error {\n\td, err := os.Stat(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif m := d.Mode(); !m.IsDir() {\n\t\treturn nil\n\t}\n\treturn os.ErrPermission\n}\n\nfunc fileNotExist(path string) bool {\n\treturn findExecutable(path) != nil\n}\n\nfunc keepTryingToGetClient(f *cmdutil.Factory) (*client.Client, error) {\n\ttimeout := time.After(2 * time.Minute)\n\ttick := time.Tick(1 * time.Second)\n\t\/\/ Keep trying until we're timed out or got a result or got an error\n\tfor {\n\t\tselect {\n\t\t\/\/ Got a timeout! fail with a timeout error\n\t\tcase <-timeout:\n\t\t\treturn nil, errors.New(\"timed out\")\n\t\t\/\/ Got a tick, try and get teh client\n\t\tcase <-tick:\n\t\t\tc, _ := getClient(f)\n\t\t\t\/\/ return if we have a client\n\t\t\tif c != nil {\n\t\t\t\treturn c, nil\n\t\t\t}\n\t\t\tutil.Info(\"Cannot connect to api server, retrying...\\n\")\n\t\t\t\/\/ retry\n\t\t}\n\t}\n}\n\nfunc getClient(f *cmdutil.Factory) (*client.Client, error) {\n\tvar err error\n\tcfg, err := f.ClientConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc, err := client.New(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Stuart Glenn, OMRF. All rights reserved.\n\/\/ Use of this code is governed by a 3 clause BSD style license\n\/\/ Full license details in LICENSE file distributed with this software\n\npackage matcher\n\nimport (\n\t\"encoding\/csv\"\n\t\"io\"\n\t\"strconv\"\n)\n\n\/\/ A Record holds a data to be matched based on attributes in Atts\ntype Record struct {\n\tID string\n\tAtts []Atter\n}\n\n\/\/ IsMatch returns true if Record a matches b exactly in columns given by positions\nfunc (a *Record) IsMatch(b *Record, positions ...int) bool {\n\tif len(positions) <= 0 {\n\t\tpositions = make([]int, len(a.Atts))\n\t\tfor i := range positions {\n\t\t\tpositions[i] = i\n\t\t}\n\t}\n\te := make([]Atter, len(positions))\n\treturn a.IsMatchWithRanges(b, e, positions...)\n}\n\n\/\/ IsMatchWithRanges returns true if Record a matches b in columns specified in\n\/\/ positions. e is a slice of Atters to use for +\/- range comparisons in columns\n\/\/ of the same index\nfunc (a *Record) IsMatchWithRanges(b *Record, e []Atter, positions ...int) bool {\n\tif len(a.Atts) != len(b.Atts) {\n\t\treturn false\n\t}\n\tif len(positions) <= 0 {\n\t\tpositions = make([]int, len(a.Atts))\n\t\tfor i := range positions {\n\t\t\tpositions[i] = i\n\t\t}\n\t}\n\tif len(positions) > len(e) {\n\t\treturn false\n\t}\n\tmatches := make([]bool, len(positions))\n\tfor i, n := range positions {\n\t\tmatches[i] = a.isMatchAt(b, e[i], n)\n\t}\n\tfor _, m := range matches {\n\t\tif !m {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ isMatchAt returns true if single attribute column in i matches between\n\/\/ a & b with given +\/- range e\nfunc (a *Record) isMatchAt(b *Record, e Atter, i int) bool {\n\tif i >= 0 && i < len(a.Atts) && i < len(b.Atts) {\n\t\treturn a.Atts[i].Equal(b.Atts[i], e)\n\t}\n\treturn false\n}\n\n\/\/ Records is just a slice of Record types\ntype Records []Record\n\n\/\/NewRecordsFromCSV parses an CSV formatted io.Reader to create\n\/\/Records for matching. We assume the first line is a header row which\n\/\/is skipped.\n\/\/TODO we should make this more robust with checking number of columns etc\nfunc NewRecordsFromCSV(in io.Reader, skipHeader bool) (r Records, err error) {\n\tcsv := csv.NewReader(in)\n\tlineno := 0\n\n\tfor {\n\t\tlineno++\n\t\tline, err := csv.Read()\n\t\tif io.EOF == err {\n\t\t\terr = nil\n\t\t\tbreak\n\t\t} else if nil != err {\n\t\t\treturn nil, err\n\t\t}\n\t\tif skipHeader && 1 == lineno {\n\t\t\tcontinue \/\/skip header\n\t\t}\n\t\ta := []Atter{}\n\t\tfor _, v := range line[1:] {\n\t\t\tn, err := strconv.ParseFloat(v, 64)\n\t\t\tif nil == err {\n\t\t\t\ta = append(a, NumericAtt{n})\n\t\t\t} else {\n\t\t\t\ta = append(a, TextAtt{v})\n\t\t\t}\n\t\t}\n\t\tr = append(r, Record{ID: line[0], Atts: a})\n\t}\n\n\treturn r, nil\n}\n\nfunc (r *Records) Get(t string) Record {\n\tfor _, v := range *r {\n\t\tif v.ID == t {\n\t\t\treturn v\n\t\t}\n\t}\n\treturn Record{}\n}\n\n\/\/ MatchesAll returns a slice containing the indices of r that match to a with\n\/\/ the given +\/- ranges in e\nfunc (a *Record) MatchesAll(r Records, e ...Atter) []int {\n\tpositions := make([]int, len(a.Atts))\n\tfor i := range a.Atts {\n\t\tpositions[i] = i\n\t}\n\treturn a.Matches(r, positions, e...)\n}\n\n\/\/ Matches retruns a slice containing the indices from r that match to a at\n\/\/ attributes in positions with any given +\/- ranges in e\nfunc (a *Record) Matches(r Records, positions []int, e ...Atter) (matches []int) {\n\tif len(e) <= 0 {\n\t\te = make([]Atter, len(positions))\n\t}\n\tfor i, b := range r {\n\t\tif a.IsMatchWithRanges(&b, e, positions...) {\n\t\t\tmatches = append(matches, i)\n\t\t}\n\t}\n\treturn\n}\nAdd a new Reader wrapper to make CRs into LFs\/\/ Copyright 2015 Stuart Glenn, OMRF. All rights reserved.\n\/\/ Use of this code is governed by a 3 clause BSD style license\n\/\/ Full license details in LICENSE file distributed with this software\n\npackage matcher\n\nimport (\n\t\"bufio\"\n\t\"encoding\/csv\"\n\t\"io\"\n\t\"strconv\"\n)\n\n\/\/ A Record holds a data to be matched based on attributes in Atts\ntype Record struct {\n\tID string\n\tAtts []Atter\n}\n\n\/\/ IsMatch returns true if Record a matches b exactly in columns given by positions\nfunc (a *Record) IsMatch(b *Record, positions ...int) bool {\n\tif len(positions) <= 0 {\n\t\tpositions = make([]int, len(a.Atts))\n\t\tfor i := range positions {\n\t\t\tpositions[i] = i\n\t\t}\n\t}\n\te := make([]Atter, len(positions))\n\treturn a.IsMatchWithRanges(b, e, positions...)\n}\n\n\/\/ IsMatchWithRanges returns true if Record a matches b in columns specified in\n\/\/ positions. e is a slice of Atters to use for +\/- range comparisons in columns\n\/\/ of the same index\nfunc (a *Record) IsMatchWithRanges(b *Record, e []Atter, positions ...int) bool {\n\tif len(a.Atts) != len(b.Atts) {\n\t\treturn false\n\t}\n\tif len(positions) <= 0 {\n\t\tpositions = make([]int, len(a.Atts))\n\t\tfor i := range positions {\n\t\t\tpositions[i] = i\n\t\t}\n\t}\n\tif len(positions) > len(e) {\n\t\treturn false\n\t}\n\tmatches := make([]bool, len(positions))\n\tfor i, n := range positions {\n\t\tmatches[i] = a.isMatchAt(b, e[i], n)\n\t}\n\tfor _, m := range matches {\n\t\tif !m {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ isMatchAt returns true if single attribute column in i matches between\n\/\/ a & b with given +\/- range e\nfunc (a *Record) isMatchAt(b *Record, e Atter, i int) bool {\n\tif i >= 0 && i < len(a.Atts) && i < len(b.Atts) {\n\t\treturn a.Atts[i].Equal(b.Atts[i], e)\n\t}\n\treturn false\n}\n\n\/\/ Records is just a slice of Record types\ntype Records []Record\n\ntype crReader struct {\n\tr *bufio.Reader\n}\n\nfunc newcrReader(r io.Reader) io.Reader {\n\treturn crReader{bufio.NewReader(r)}\n}\n\nfunc (r crReader) Read(b []byte) (int, error) {\n\tn, err := r.r.Read(b)\n\tif n <= 0 {\n\t\treturn n, err\n\t}\n\tb = b[:n]\n\tfor i := range b {\n\t\tif b[i] == '\\r' {\n\t\t\tvar next byte\n\t\t\tif j := i + 1; j < len(b) {\n\t\t\t\tnext = b[j]\n\t\t\t} else {\n\t\t\t\tnext, err = r.r.ReadByte()\n\t\t\t\tif err == nil {\n\t\t\t\t\tr.r.UnreadByte()\n\t\t\t\t}\n\t\t\t}\n\t\t\tif next != '\\n' {\n\t\t\t\tb[i] = '\\n'\n\t\t\t}\n\t\t}\n\t}\n\treturn n, err\n}\n\n\/\/NewRecordsFromCSV parses an CSV formatted io.Reader to create\n\/\/Records for matching. We assume the first line is a header row which\n\/\/is skipped.\n\/\/TODO we should make this more robust with checking number of columns etc\nfunc NewRecordsFromCSV(in io.Reader, skipHeader bool) (r Records, err error) {\n\tcsv := csv.NewReader(newcrReader(in))\n\tlineno := 0\n\n\tfor {\n\t\tlineno++\n\t\tline, err := csv.Read()\n\t\tif io.EOF == err {\n\t\t\terr = nil\n\t\t\tbreak\n\t\t} else if nil != err {\n\t\t\treturn nil, err\n\t\t}\n\t\tif skipHeader && 1 == lineno {\n\t\t\tcontinue \/\/skip header\n\t\t}\n\t\ta := []Atter{}\n\t\tfor _, v := range line[1:] {\n\t\t\tn, err := strconv.ParseFloat(v, 64)\n\t\t\tif nil == err {\n\t\t\t\ta = append(a, NumericAtt{n})\n\t\t\t} else {\n\t\t\t\ta = append(a, TextAtt{v})\n\t\t\t}\n\t\t}\n\t\tr = append(r, Record{ID: line[0], Atts: a})\n\t}\n\n\treturn r, nil\n}\n\nfunc (r *Records) Get(t string) Record {\n\tfor _, v := range *r {\n\t\tif v.ID == t {\n\t\t\treturn v\n\t\t}\n\t}\n\treturn Record{}\n}\n\n\/\/ MatchesAll returns a slice containing the indices of r that match to a with\n\/\/ the given +\/- ranges in e\nfunc (a *Record) MatchesAll(r Records, e ...Atter) []int {\n\tpositions := make([]int, len(a.Atts))\n\tfor i := range a.Atts {\n\t\tpositions[i] = i\n\t}\n\treturn a.Matches(r, positions, e...)\n}\n\n\/\/ Matches retruns a slice containing the indices from r that match to a at\n\/\/ attributes in positions with any given +\/- ranges in e\nfunc (a *Record) Matches(r Records, positions []int, e ...Atter) (matches []int) {\n\tif len(e) <= 0 {\n\t\te = make([]Atter, len(positions))\n\t}\n\tfor i, b := range r {\n\t\tif a.IsMatchWithRanges(&b, e, positions...) {\n\t\t\tmatches = append(matches, i)\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2019 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage core\n\nimport (\n\t\"github.com\/pingcap\/parser\/ast\"\n\t\"github.com\/pingcap\/parser\/model\"\n\t\"github.com\/pingcap\/tidb\/kv\"\n\t\"github.com\/pingcap\/tidb\/sessionctx\"\n\tutilhint \"github.com\/pingcap\/tidb\/util\/hint\"\n)\n\n\/\/ GenHintsFromPhysicalPlan generates hints from physical plan.\nfunc GenHintsFromPhysicalPlan(p Plan) []*ast.TableOptimizerHint {\n\tvar hints []*ast.TableOptimizerHint\n\tswitch pp := p.(type) {\n\tcase *Explain:\n\t\treturn GenHintsFromPhysicalPlan(pp.TargetPlan)\n\tcase *Update:\n\t\thints = genHintsFromPhysicalPlan(pp.SelectPlan, utilhint.TypeUpdate)\n\tcase *Delete:\n\t\thints = genHintsFromPhysicalPlan(pp.SelectPlan, utilhint.TypeDelete)\n\t\/\/ For Insert, we only generate hints that would be used in select query block.\n\tcase *Insert:\n\t\thints = genHintsFromPhysicalPlan(pp.SelectPlan, utilhint.TypeSelect)\n\tcase PhysicalPlan:\n\t\thints = genHintsFromPhysicalPlan(pp, utilhint.TypeSelect)\n\t}\n\treturn hints\n}\n\nfunc getTableName(tblName model.CIStr, asName *model.CIStr) model.CIStr {\n\tif asName != nil && asName.L != \"\" {\n\t\treturn *asName\n\t}\n\treturn tblName\n}\n\nfunc extractTableAsName(p PhysicalPlan) (*model.CIStr, *model.CIStr) {\n\t_, isProj := p.(*PhysicalProjection)\n\t_, isUnionScan := p.(*PhysicalUnionScan)\n\tif isProj || isUnionScan {\n\t\treturn extractTableAsName(p.Children()[0])\n\t}\n\tif len(p.Children()) > 1 {\n\t\treturn nil, nil\n\t}\n\tswitch x := p.(type) {\n\tcase *PhysicalTableReader:\n\t\tts := x.TablePlans[0].(*PhysicalTableScan)\n\t\tif ts.TableAsName.L != \"\" {\n\t\t\treturn &ts.DBName, ts.TableAsName\n\t\t}\n\t\treturn &ts.DBName, &ts.Table.Name\n\tcase *PhysicalIndexReader:\n\t\tis := x.IndexPlans[0].(*PhysicalIndexScan)\n\t\tif is.TableAsName.L != \"\" {\n\t\t\treturn &is.DBName, is.TableAsName\n\t\t}\n\t\treturn &is.DBName, &is.Table.Name\n\tcase *PhysicalIndexLookUpReader:\n\t\tis := x.IndexPlans[0].(*PhysicalIndexScan)\n\t\tif is.TableAsName.L != \"\" {\n\t\t\treturn &is.DBName, is.TableAsName\n\t\t}\n\t\treturn &is.DBName, &is.Table.Name\n\t}\n\treturn nil, nil\n}\n\nfunc getJoinHints(sctx sessionctx.Context, joinType string, parentOffset int, nodeType utilhint.NodeType, children ...PhysicalPlan) (res []*ast.TableOptimizerHint) {\n\tif parentOffset == -1 {\n\t\treturn res\n\t}\n\tfor _, child := range children {\n\t\tblockOffset := child.SelectBlockOffset()\n\t\tif blockOffset == -1 {\n\t\t\tcontinue\n\t\t}\n\t\tvar dbName, tableName *model.CIStr\n\t\tif child.SelectBlockOffset() != parentOffset {\n\t\t\thintTable := sctx.GetSessionVars().PlannerSelectBlockAsName[child.SelectBlockOffset()]\n\t\t\t\/\/ For sub-queries like `(select * from t) t1`, t1 should belong to its surrounding select block.\n\t\t\tdbName, tableName, blockOffset = &hintTable.DBName, &hintTable.TableName, parentOffset\n\t\t} else {\n\t\t\tdbName, tableName = extractTableAsName(child)\n\t\t}\n\t\tif tableName == nil || tableName.L == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tqbName, err := utilhint.GenerateQBName(nodeType, blockOffset)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\tQBName: qbName,\n\t\t\tHintName: model.NewCIStr(joinType),\n\t\t\tTables: []ast.HintTable{{DBName: *dbName, TableName: *tableName}},\n\t\t})\n\t\tbreak\n\t}\n\treturn res\n}\n\nfunc genHintsFromPhysicalPlan(p PhysicalPlan, nodeType utilhint.NodeType) (res []*ast.TableOptimizerHint) {\n\tif p == nil {\n\t\treturn res\n\t}\n\tfor _, child := range p.Children() {\n\t\tres = append(res, genHintsFromPhysicalPlan(child, nodeType)...)\n\t}\n\tqbName, err := utilhint.GenerateQBName(nodeType, p.SelectBlockOffset())\n\tif err != nil {\n\t\treturn res\n\t}\n\tswitch pp := p.(type) {\n\tcase *PhysicalTableReader:\n\t\ttbl := pp.TablePlans[0].(*PhysicalTableScan)\n\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\tQBName: qbName,\n\t\t\tHintName: model.NewCIStr(HintUseIndex),\n\t\t\tTables: []ast.HintTable{{DBName: tbl.DBName, TableName: getTableName(tbl.Table.Name, tbl.TableAsName)}},\n\t\t})\n\t\tif tbl.StoreType == kv.TiFlash {\n\t\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\t\tQBName: qbName,\n\t\t\t\tHintName: model.NewCIStr(HintReadFromStorage),\n\t\t\t\tHintData: model.NewCIStr(kv.TiFlash.Name()),\n\t\t\t\tTables: []ast.HintTable{{DBName: tbl.DBName, TableName: getTableName(tbl.Table.Name, tbl.TableAsName)}},\n\t\t\t})\n\t\t}\n\tcase *PhysicalIndexLookUpReader:\n\t\tindex := pp.IndexPlans[0].(*PhysicalIndexScan)\n\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\tQBName: qbName,\n\t\t\tHintName: model.NewCIStr(HintUseIndex),\n\t\t\tTables: []ast.HintTable{{DBName: index.DBName, TableName: getTableName(index.Table.Name, index.TableAsName)}},\n\t\t\tIndexes: []model.CIStr{index.Index.Name},\n\t\t})\n\tcase *PhysicalIndexReader:\n\t\tindex := pp.IndexPlans[0].(*PhysicalIndexScan)\n\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\tQBName: qbName,\n\t\t\tHintName: model.NewCIStr(HintUseIndex),\n\t\t\tTables: []ast.HintTable{{DBName: index.DBName, TableName: getTableName(index.Table.Name, index.TableAsName)}},\n\t\t\tIndexes: []model.CIStr{index.Index.Name},\n\t\t})\n\tcase *PhysicalIndexMergeReader:\n\t\tIndexs := make([]model.CIStr, 0, 2)\n\t\tvar tableName model.CIStr\n\t\tvar tableAsName *model.CIStr\n\t\tfor _, partialPlan := range pp.PartialPlans {\n\t\t\tif index, ok := partialPlan[0].(*PhysicalIndexScan); ok {\n\t\t\t\tIndexs = append(Indexs, index.Index.Name)\n\t\t\t\ttableName = index.Table.Name\n\t\t\t\ttableAsName = index.TableAsName\n\t\t\t} else {\n\t\t\t\tindexName := model.NewCIStr(\"PRIMARY\")\n\t\t\t\tIndexs = append(Indexs, indexName)\n\t\t\t}\n\t\t}\n\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\tQBName: qbName,\n\t\t\tHintName: model.NewCIStr(HintIndexMerge),\n\t\t\tTables: []ast.HintTable{{TableName: getTableName(tableName, tableAsName)}},\n\t\t\tIndexes: Indexs,\n\t\t})\n\tcase *PhysicalHashAgg:\n\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\tQBName: qbName,\n\t\t\tHintName: model.NewCIStr(HintHashAgg),\n\t\t})\n\tcase *PhysicalStreamAgg:\n\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\tQBName: qbName,\n\t\t\tHintName: model.NewCIStr(HintStreamAgg),\n\t\t})\n\tcase *PhysicalMergeJoin:\n\t\tres = append(res, getJoinHints(p.SCtx(), HintSMJ, p.SelectBlockOffset(), nodeType, pp.children...)...)\n\tcase *PhysicalHashJoin:\n\t\tres = append(res, getJoinHints(p.SCtx(), HintHJ, p.SelectBlockOffset(), nodeType, pp.children...)...)\n\tcase *PhysicalIndexJoin:\n\t\tres = append(res, getJoinHints(p.SCtx(), HintINLJ, p.SelectBlockOffset(), nodeType, pp.children[pp.InnerChildIdx])...)\n\tcase *PhysicalIndexMergeJoin:\n\t\tres = append(res, getJoinHints(p.SCtx(), HintINLMJ, p.SelectBlockOffset(), nodeType, pp.children[pp.InnerChildIdx])...)\n\tcase *PhysicalIndexHashJoin:\n\t\tres = append(res, getJoinHints(p.SCtx(), HintINLHJ, p.SelectBlockOffset(), nodeType, pp.children[pp.InnerChildIdx])...)\n\t}\n\treturn res\n}\nplanner: avoid potential panic when generating hints from joins (#22515)\/\/ Copyright 2019 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage core\n\nimport (\n\t\"github.com\/pingcap\/parser\/ast\"\n\t\"github.com\/pingcap\/parser\/model\"\n\t\"github.com\/pingcap\/tidb\/kv\"\n\t\"github.com\/pingcap\/tidb\/sessionctx\"\n\tutilhint \"github.com\/pingcap\/tidb\/util\/hint\"\n)\n\n\/\/ GenHintsFromPhysicalPlan generates hints from physical plan.\nfunc GenHintsFromPhysicalPlan(p Plan) []*ast.TableOptimizerHint {\n\tvar hints []*ast.TableOptimizerHint\n\tswitch pp := p.(type) {\n\tcase *Explain:\n\t\treturn GenHintsFromPhysicalPlan(pp.TargetPlan)\n\tcase *Update:\n\t\thints = genHintsFromPhysicalPlan(pp.SelectPlan, utilhint.TypeUpdate)\n\tcase *Delete:\n\t\thints = genHintsFromPhysicalPlan(pp.SelectPlan, utilhint.TypeDelete)\n\t\/\/ For Insert, we only generate hints that would be used in select query block.\n\tcase *Insert:\n\t\thints = genHintsFromPhysicalPlan(pp.SelectPlan, utilhint.TypeSelect)\n\tcase PhysicalPlan:\n\t\thints = genHintsFromPhysicalPlan(pp, utilhint.TypeSelect)\n\t}\n\treturn hints\n}\n\nfunc getTableName(tblName model.CIStr, asName *model.CIStr) model.CIStr {\n\tif asName != nil && asName.L != \"\" {\n\t\treturn *asName\n\t}\n\treturn tblName\n}\n\nfunc extractTableAsName(p PhysicalPlan) (*model.CIStr, *model.CIStr) {\n\t_, isProj := p.(*PhysicalProjection)\n\t_, isUnionScan := p.(*PhysicalUnionScan)\n\tif isProj || isUnionScan {\n\t\treturn extractTableAsName(p.Children()[0])\n\t}\n\tif len(p.Children()) > 1 {\n\t\treturn nil, nil\n\t}\n\tswitch x := p.(type) {\n\tcase *PhysicalTableReader:\n\t\tts := x.TablePlans[0].(*PhysicalTableScan)\n\t\tif ts.TableAsName.L != \"\" {\n\t\t\treturn &ts.DBName, ts.TableAsName\n\t\t}\n\t\treturn &ts.DBName, &ts.Table.Name\n\tcase *PhysicalIndexReader:\n\t\tis := x.IndexPlans[0].(*PhysicalIndexScan)\n\t\tif is.TableAsName.L != \"\" {\n\t\t\treturn &is.DBName, is.TableAsName\n\t\t}\n\t\treturn &is.DBName, &is.Table.Name\n\tcase *PhysicalIndexLookUpReader:\n\t\tis := x.IndexPlans[0].(*PhysicalIndexScan)\n\t\tif is.TableAsName.L != \"\" {\n\t\t\treturn &is.DBName, is.TableAsName\n\t\t}\n\t\treturn &is.DBName, &is.Table.Name\n\t}\n\treturn nil, nil\n}\n\nfunc getJoinHints(sctx sessionctx.Context, joinType string, parentOffset int, nodeType utilhint.NodeType, children ...PhysicalPlan) (res []*ast.TableOptimizerHint) {\n\tif parentOffset == -1 {\n\t\treturn res\n\t}\n\tfor _, child := range children {\n\t\tblockOffset := child.SelectBlockOffset()\n\t\tif blockOffset == -1 {\n\t\t\tcontinue\n\t\t}\n\t\tvar dbName, tableName *model.CIStr\n\t\tif blockOffset != parentOffset {\n\t\t\tblockAsNames := sctx.GetSessionVars().PlannerSelectBlockAsName\n\t\t\tif blockOffset >= len(blockAsNames) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\thintTable := blockAsNames[blockOffset]\n\t\t\t\/\/ For sub-queries like `(select * from t) t1`, t1 should belong to its surrounding select block.\n\t\t\tdbName, tableName, blockOffset = &hintTable.DBName, &hintTable.TableName, parentOffset\n\t\t} else {\n\t\t\tdbName, tableName = extractTableAsName(child)\n\t\t}\n\t\tif tableName == nil || tableName.L == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tqbName, err := utilhint.GenerateQBName(nodeType, blockOffset)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\tQBName: qbName,\n\t\t\tHintName: model.NewCIStr(joinType),\n\t\t\tTables: []ast.HintTable{{DBName: *dbName, TableName: *tableName}},\n\t\t})\n\t\tbreak\n\t}\n\treturn res\n}\n\nfunc genHintsFromPhysicalPlan(p PhysicalPlan, nodeType utilhint.NodeType) (res []*ast.TableOptimizerHint) {\n\tif p == nil {\n\t\treturn res\n\t}\n\tfor _, child := range p.Children() {\n\t\tres = append(res, genHintsFromPhysicalPlan(child, nodeType)...)\n\t}\n\tqbName, err := utilhint.GenerateQBName(nodeType, p.SelectBlockOffset())\n\tif err != nil {\n\t\treturn res\n\t}\n\tswitch pp := p.(type) {\n\tcase *PhysicalTableReader:\n\t\ttbl := pp.TablePlans[0].(*PhysicalTableScan)\n\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\tQBName: qbName,\n\t\t\tHintName: model.NewCIStr(HintUseIndex),\n\t\t\tTables: []ast.HintTable{{DBName: tbl.DBName, TableName: getTableName(tbl.Table.Name, tbl.TableAsName)}},\n\t\t})\n\t\tif tbl.StoreType == kv.TiFlash {\n\t\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\t\tQBName: qbName,\n\t\t\t\tHintName: model.NewCIStr(HintReadFromStorage),\n\t\t\t\tHintData: model.NewCIStr(kv.TiFlash.Name()),\n\t\t\t\tTables: []ast.HintTable{{DBName: tbl.DBName, TableName: getTableName(tbl.Table.Name, tbl.TableAsName)}},\n\t\t\t})\n\t\t}\n\tcase *PhysicalIndexLookUpReader:\n\t\tindex := pp.IndexPlans[0].(*PhysicalIndexScan)\n\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\tQBName: qbName,\n\t\t\tHintName: model.NewCIStr(HintUseIndex),\n\t\t\tTables: []ast.HintTable{{DBName: index.DBName, TableName: getTableName(index.Table.Name, index.TableAsName)}},\n\t\t\tIndexes: []model.CIStr{index.Index.Name},\n\t\t})\n\tcase *PhysicalIndexReader:\n\t\tindex := pp.IndexPlans[0].(*PhysicalIndexScan)\n\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\tQBName: qbName,\n\t\t\tHintName: model.NewCIStr(HintUseIndex),\n\t\t\tTables: []ast.HintTable{{DBName: index.DBName, TableName: getTableName(index.Table.Name, index.TableAsName)}},\n\t\t\tIndexes: []model.CIStr{index.Index.Name},\n\t\t})\n\tcase *PhysicalIndexMergeReader:\n\t\tIndexs := make([]model.CIStr, 0, 2)\n\t\tvar tableName model.CIStr\n\t\tvar tableAsName *model.CIStr\n\t\tfor _, partialPlan := range pp.PartialPlans {\n\t\t\tif index, ok := partialPlan[0].(*PhysicalIndexScan); ok {\n\t\t\t\tIndexs = append(Indexs, index.Index.Name)\n\t\t\t\ttableName = index.Table.Name\n\t\t\t\ttableAsName = index.TableAsName\n\t\t\t} else {\n\t\t\t\tindexName := model.NewCIStr(\"PRIMARY\")\n\t\t\t\tIndexs = append(Indexs, indexName)\n\t\t\t}\n\t\t}\n\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\tQBName: qbName,\n\t\t\tHintName: model.NewCIStr(HintIndexMerge),\n\t\t\tTables: []ast.HintTable{{TableName: getTableName(tableName, tableAsName)}},\n\t\t\tIndexes: Indexs,\n\t\t})\n\tcase *PhysicalHashAgg:\n\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\tQBName: qbName,\n\t\t\tHintName: model.NewCIStr(HintHashAgg),\n\t\t})\n\tcase *PhysicalStreamAgg:\n\t\tres = append(res, &ast.TableOptimizerHint{\n\t\t\tQBName: qbName,\n\t\t\tHintName: model.NewCIStr(HintStreamAgg),\n\t\t})\n\tcase *PhysicalMergeJoin:\n\t\tres = append(res, getJoinHints(p.SCtx(), HintSMJ, p.SelectBlockOffset(), nodeType, pp.children...)...)\n\tcase *PhysicalHashJoin:\n\t\tres = append(res, getJoinHints(p.SCtx(), HintHJ, p.SelectBlockOffset(), nodeType, pp.children...)...)\n\tcase *PhysicalIndexJoin:\n\t\tres = append(res, getJoinHints(p.SCtx(), HintINLJ, p.SelectBlockOffset(), nodeType, pp.children[pp.InnerChildIdx])...)\n\tcase *PhysicalIndexMergeJoin:\n\t\tres = append(res, getJoinHints(p.SCtx(), HintINLMJ, p.SelectBlockOffset(), nodeType, pp.children[pp.InnerChildIdx])...)\n\tcase *PhysicalIndexHashJoin:\n\t\tres = append(res, getJoinHints(p.SCtx(), HintINLHJ, p.SelectBlockOffset(), nodeType, pp.children[pp.InnerChildIdx])...)\n\t}\n\treturn res\n}\n<|endoftext|>"} {"text":"package cli\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"google.golang.org\/grpc\/grpclog\"\n)\n\n\/\/ TODO - needs to be integrated with the CLI output stream (*cli.OutStream)\n\/\/ to suppress any terminal formatting codes when not attached to a tty;\n\/\/ then add terminal formatting (color) support for CLI info\/warn\/success\/fail functions.\n\n\/\/ Logger is a simple logger for the Atomiq CLI that also implements grpclog.Logger\ntype Logger struct {\n\tout *OutStream\n\tverbose bool\n}\n\nfunc init() {\n\t\/\/ Creates an instance of Logger for grpc logging\n\t\/\/ WARNING: the grpc logger can only be set during init()\n\t\/\/ https:\/\/godoc.org\/google.golang.org\/grpc\/grpclog#SetLogger\n\t\/\/ TODO: set verbose to false after testing\n\tgrpclog.SetLogger(Logger{out: NewOutStream(os.Stdout), verbose: true})\n}\n\n\/\/ NewLogger creates a CLI Logger instance that writes to the provided stream.\nfunc NewLogger(out *OutStream, verbose bool) *Logger {\n\treturn &Logger{out: out, verbose: verbose}\n}\n\n\/\/ Verbose returns whether the logger is verbose\nfunc (l Logger) Verbose() bool {\n\treturn l.verbose\n}\n\n\/\/ OutStream return the underlying output stream\nfunc (l Logger) OutStream() *OutStream {\n\treturn l.out\n}\n\n\/\/ Fatal is equivalent to fmt.Print() followed by a call to os.Exit(1).\nfunc (l Logger) Fatal(args ...interface{}) {\n\tl.Print(args)\n\tos.Exit(1)\n}\n\n\/\/ Fatalf is equivalent to fmt.Printf() followed by a call to os.Exit(1).\nfunc (l Logger) Fatalf(format string, args ...interface{}) {\n\tl.Printf(format, args)\n\tos.Exit(1)\n}\n\n\/\/ Fatalln is equivalent to fmt.Println() followed by a call to os.Exit(1).\nfunc (l Logger) Fatalln(args ...interface{}) {\n\tl.Println(args)\n\tos.Exit(1)\n}\n\n\/\/ Print is equivalent to fmt.Print() if verbose mode.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc (l Logger) Print(args ...interface{}) {\n\tif l.verbose {\n\t\tfmt.Fprint(l.out, args)\n\t}\n}\n\n\/\/ Printf is equivalent to fmt.Printf() if verbose mode.\nfunc (l Logger) Printf(format string, args ...interface{}) {\n\tif l.verbose {\n\t\tfmt.Fprintf(l.out, format, args)\n\t}\n}\n\n\/\/ Println is equivalent to fmt.Println() if verbose mode.\nfunc (l Logger) Println(args ...interface{}) {\n\tif l.verbose {\n\t\tfmt.Fprintln(l.out, args)\n\t}\n}\nSet CLI logger to non verbose (#1429)package cli\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"google.golang.org\/grpc\/grpclog\"\n)\n\n\/\/ TODO - needs to be integrated with the CLI output stream (*cli.OutStream)\n\/\/ to suppress any terminal formatting codes when not attached to a tty;\n\/\/ then add terminal formatting (color) support for CLI info\/warn\/success\/fail functions.\n\n\/\/ Logger is a simple logger for the Atomiq CLI that also implements grpclog.Logger\ntype Logger struct {\n\tout *OutStream\n\tverbose bool\n}\n\nfunc init() {\n\t\/\/ Creates an instance of Logger for grpc logging\n\t\/\/ WARNING: the grpc logger can only be set during init()\n\t\/\/ https:\/\/godoc.org\/google.golang.org\/grpc\/grpclog#SetLogger\n\tgrpclog.SetLogger(Logger{out: NewOutStream(os.Stdout), verbose: false})\n}\n\n\/\/ NewLogger creates a CLI Logger instance that writes to the provided stream.\nfunc NewLogger(out *OutStream, verbose bool) *Logger {\n\treturn &Logger{out: out, verbose: verbose}\n}\n\n\/\/ Verbose returns whether the logger is verbose\nfunc (l Logger) Verbose() bool {\n\treturn l.verbose\n}\n\n\/\/ OutStream return the underlying output stream\nfunc (l Logger) OutStream() *OutStream {\n\treturn l.out\n}\n\n\/\/ Fatal is equivalent to fmt.Print() followed by a call to os.Exit(1).\nfunc (l Logger) Fatal(args ...interface{}) {\n\tl.Print(args)\n\tos.Exit(1)\n}\n\n\/\/ Fatalf is equivalent to fmt.Printf() followed by a call to os.Exit(1).\nfunc (l Logger) Fatalf(format string, args ...interface{}) {\n\tl.Printf(format, args)\n\tos.Exit(1)\n}\n\n\/\/ Fatalln is equivalent to fmt.Println() followed by a call to os.Exit(1).\nfunc (l Logger) Fatalln(args ...interface{}) {\n\tl.Println(args)\n\tos.Exit(1)\n}\n\n\/\/ Print is equivalent to fmt.Print() if verbose mode.\n\/\/ Arguments are handled in the manner of fmt.Printf.\nfunc (l Logger) Print(args ...interface{}) {\n\tif l.verbose {\n\t\tfmt.Fprint(l.out, args)\n\t}\n}\n\n\/\/ Printf is equivalent to fmt.Printf() if verbose mode.\nfunc (l Logger) Printf(format string, args ...interface{}) {\n\tif l.verbose {\n\t\tfmt.Fprintf(l.out, format, args)\n\t}\n}\n\n\/\/ Println is equivalent to fmt.Println() if verbose mode.\nfunc (l Logger) Println(args ...interface{}) {\n\tif l.verbose {\n\t\tfmt.Fprintln(l.out, args)\n\t}\n}\n<|endoftext|>"} {"text":"package config\n\nimport \"fmt\"\n\ntype API struct {\n\tSchema string `valid:\"required\"`\n\tHost string `valid:\"host,required\"`\n\tPort string `valid:\"port,required\"`\n\tApiPrefix string `valid:\"required\"`\n}\n\nfunc (a API) URL() string {\n\treturn fmt.Sprintf(\"%s:\/\/%s:%d\/%s\/policy\", a.Schema, a.Host, a.Port, a.ApiPrefix)\n}\n\nfunc (a API) ListenAddr() string {\n\treturn fmt.Sprintf(\"%s:%d\", a.Host, a.Port)\n}\nRemove accidentaly not moved package<|endoftext|>"} {"text":"\/*\nCopyright 2011 The Camlistore Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package mysql provides an implementation of sorted.KeyValue\n\/\/ on top of MySQL.\npackage mysql\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"camlistore.org\/pkg\/env\"\n\t\"camlistore.org\/pkg\/sorted\"\n\t\"camlistore.org\/pkg\/sorted\/sqlkv\"\n\t_ \"camlistore.org\/third_party\/github.com\/go-sql-driver\/mysql\"\n\t\"go4.org\/jsonconfig\"\n)\n\nfunc init() {\n\tsorted.RegisterKeyValue(\"mysql\", newKeyValueFromJSONConfig)\n}\n\nfunc newKeyValueFromJSONConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) {\n\tvar (\n\t\tuser = cfg.RequiredString(\"user\")\n\t\tdatabase = cfg.RequiredString(\"database\")\n\t\thost = cfg.OptionalString(\"host\", \"\")\n\t\tpassword = cfg.OptionalString(\"password\", \"\")\n\t)\n\tif err := cfg.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\tvar err error\n\tif host != \"\" {\n\t\thost, err = maybeRemapCloudSQL(host)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !strings.Contains(host, \":\") {\n\t\t\thost += \":3306\"\n\t\t}\n\t\thost = \"tcp(\" + host + \")\"\n\t}\n\t\/\/ The DSN does NOT have a database name in it so it's\n\t\/\/ cacheable and can be shared between different queues & the\n\t\/\/ index, all sharing the same database server, cutting down\n\t\/\/ number of TCP connections required. We add the database\n\t\/\/ name in queries instead.\n\tdsn := fmt.Sprintf(\"%s:%s@%s\/\", user, password, host)\n\n\tdb, err := openOrCachedDB(dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := CreateDB(db, database); err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, tableSQL := range SQLCreateTables() {\n\t\ttableSQL = strings.Replace(tableSQL, \"\/*DB*\/\", database, -1)\n\t\tif _, err := db.Exec(tableSQL); err != nil {\n\t\t\terrMsg := \"error creating table with %q: %v.\"\n\t\t\tcreateError := err\n\t\t\tsv, err := serverVersion(db)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif !hasLargeVarchar(sv) {\n\t\t\t\terrMsg += \"\\nYour MySQL server is too old (< 5.0.3) to support VARCHAR larger than 255.\"\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(errMsg, tableSQL, createError)\n\t\t}\n\t}\n\tif _, err := db.Exec(fmt.Sprintf(`REPLACE INTO %s.meta VALUES ('version', '%d')`, database, SchemaVersion())); err != nil {\n\t\treturn nil, fmt.Errorf(\"error setting schema version: %v\", err)\n\t}\n\n\tkv := &keyValue{\n\t\tdb: db,\n\t\tKeyValue: &sqlkv.KeyValue{\n\t\t\tDB: db,\n\t\t\tTablePrefix: database + \".\",\n\t\t},\n\t}\n\tif err := kv.ping(); err != nil {\n\t\treturn nil, fmt.Errorf(\"MySQL db unreachable: %v\", err)\n\t}\n\tversion, err := kv.SchemaVersion()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting schema version (need to init database?): %v\", err)\n\t}\n\tif version != requiredSchemaVersion {\n\t\tif version == 20 && requiredSchemaVersion == 21 {\n\t\t\tfmt.Fprintf(os.Stderr, fixSchema20to21)\n\t\t}\n\t\tif env.IsDev() {\n\t\t\t\/\/ Good signal that we're using the devcam server, so help out\n\t\t\t\/\/ the user with a more useful tip:\n\t\t\treturn nil, fmt.Errorf(\"database schema version is %d; expect %d (run \\\"devcam server --wipe\\\" to wipe both your blobs and re-populate the database schema)\", version, requiredSchemaVersion)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"database schema version is %d; expect %d (need to re-init\/upgrade database?)\",\n\t\t\tversion, requiredSchemaVersion)\n\t}\n\n\treturn kv, nil\n}\n\n\/\/ CreateDB creates the named database if it does not already exist.\nfunc CreateDB(db *sql.DB, dbname string) error {\n\tif dbname == \"\" {\n\t\treturn errors.New(\"can not create database: database name is missing\")\n\t}\n\tif _, err := db.Exec(fmt.Sprintf(\"CREATE DATABASE IF NOT EXISTS %s\", dbname)); err != nil {\n\t\treturn fmt.Errorf(\"error creating database %v: %v\", dbname, err)\n\t}\n\treturn nil\n}\n\n\/\/ We keep a cache of open database handles.\nvar (\n\tdbsmu sync.Mutex\n\tdbs = map[string]*sql.DB{} \/\/ DSN -> db\n)\n\nfunc openOrCachedDB(dsn string) (*sql.DB, error) {\n\tdbsmu.Lock()\n\tdefer dbsmu.Unlock()\n\tif db, ok := dbs[dsn]; ok {\n\t\treturn db, nil\n\t}\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdbs[dsn] = db\n\treturn db, nil\n}\n\ntype keyValue struct {\n\t*sqlkv.KeyValue\n\n\tdb *sql.DB\n}\n\nfunc (kv *keyValue) ping() error {\n\t\/\/ TODO(bradfitz): something more efficient here?\n\t_, err := kv.SchemaVersion()\n\treturn err\n}\n\nfunc (kv *keyValue) SchemaVersion() (version int, err error) {\n\terr = kv.db.QueryRow(\"SELECT value FROM \" + kv.KeyValue.TablePrefix + \"meta WHERE metakey='version'\").Scan(&version)\n\treturn\n}\n\nconst fixSchema20to21 = `Character set in tables changed to binary, you can fix your tables with:\nALTER TABLE rows CONVERT TO CHARACTER SET binary;\nALTER TABLE meta CONVERT TO CHARACTER SET binary;\nUPDATE meta SET value=21 WHERE metakey='version' AND value=20;\n`\n\n\/\/ serverVersion returns the MySQL server version as []int{major, minor, revision}.\nfunc serverVersion(db *sql.DB) ([]int, error) {\n\tversionRx := regexp.MustCompile(`([0-9]+)\\.([0-9]+)\\.([0-9]+)-.*`)\n\tvar version string\n\tif err := db.QueryRow(\"SELECT VERSION()\").Scan(&version); err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting MySQL server version: %v\", err)\n\t}\n\tm := versionRx.FindStringSubmatch(version)\n\tif len(m) < 4 {\n\t\treturn nil, fmt.Errorf(\"bogus MySQL server version: %v\", version)\n\t}\n\tmajor, _ := strconv.Atoi(m[1])\n\tminor, _ := strconv.Atoi(m[2])\n\trev, _ := strconv.Atoi(m[3])\n\treturn []int{major, minor, rev}, nil\n}\n\n\/\/ hasLargeVarchar returns whether the given version (as []int{major, minor, revision})\n\/\/ supports VARCHAR larger than 255.\nfunc hasLargeVarchar(version []int) bool {\n\tif len(version) < 3 {\n\t\tpanic(fmt.Sprintf(\"bogus mysql server version %v: \", version))\n\t}\n\tif version[0] < 5 {\n\t\treturn false\n\t}\n\tif version[1] > 0 {\n\t\treturn true\n\t}\n\treturn version[0] == 5 && version[1] == 0 && version[2] >= 3\n}\nsorted\/mysql: remove DB from pool when closing\/*\nCopyright 2011 The Camlistore Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package mysql provides an implementation of sorted.KeyValue\n\/\/ on top of MySQL.\npackage mysql\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"camlistore.org\/pkg\/env\"\n\t\"camlistore.org\/pkg\/sorted\"\n\t\"camlistore.org\/pkg\/sorted\/sqlkv\"\n\t_ \"camlistore.org\/third_party\/github.com\/go-sql-driver\/mysql\"\n\t\"go4.org\/jsonconfig\"\n)\n\nfunc init() {\n\tsorted.RegisterKeyValue(\"mysql\", newKeyValueFromJSONConfig)\n}\n\nfunc newKeyValueFromJSONConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) {\n\tvar (\n\t\tuser = cfg.RequiredString(\"user\")\n\t\tdatabase = cfg.RequiredString(\"database\")\n\t\thost = cfg.OptionalString(\"host\", \"\")\n\t\tpassword = cfg.OptionalString(\"password\", \"\")\n\t)\n\tif err := cfg.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\tvar err error\n\tif host != \"\" {\n\t\thost, err = maybeRemapCloudSQL(host)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !strings.Contains(host, \":\") {\n\t\t\thost += \":3306\"\n\t\t}\n\t\thost = \"tcp(\" + host + \")\"\n\t}\n\t\/\/ The DSN does NOT have a database name in it so it's\n\t\/\/ cacheable and can be shared between different queues & the\n\t\/\/ index, all sharing the same database server, cutting down\n\t\/\/ number of TCP connections required. We add the database\n\t\/\/ name in queries instead.\n\tdsn := fmt.Sprintf(\"%s:%s@%s\/\", user, password, host)\n\n\tdb, err := openOrCachedDB(dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := CreateDB(db, database); err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, tableSQL := range SQLCreateTables() {\n\t\ttableSQL = strings.Replace(tableSQL, \"\/*DB*\/\", database, -1)\n\t\tif _, err := db.Exec(tableSQL); err != nil {\n\t\t\terrMsg := \"error creating table with %q: %v.\"\n\t\t\tcreateError := err\n\t\t\tsv, err := serverVersion(db)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif !hasLargeVarchar(sv) {\n\t\t\t\terrMsg += \"\\nYour MySQL server is too old (< 5.0.3) to support VARCHAR larger than 255.\"\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(errMsg, tableSQL, createError)\n\t\t}\n\t}\n\tif _, err := db.Exec(fmt.Sprintf(`REPLACE INTO %s.meta VALUES ('version', '%d')`, database, SchemaVersion())); err != nil {\n\t\treturn nil, fmt.Errorf(\"error setting schema version: %v\", err)\n\t}\n\n\tkv := &keyValue{\n\t\tdsn: dsn,\n\t\tdb: db,\n\t\tKeyValue: &sqlkv.KeyValue{\n\t\t\tDB: db,\n\t\t\tTablePrefix: database + \".\",\n\t\t},\n\t}\n\tif err := kv.ping(); err != nil {\n\t\treturn nil, fmt.Errorf(\"MySQL db unreachable: %v\", err)\n\t}\n\tversion, err := kv.SchemaVersion()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting schema version (need to init database?): %v\", err)\n\t}\n\tif version != requiredSchemaVersion {\n\t\tif version == 20 && requiredSchemaVersion == 21 {\n\t\t\tfmt.Fprintf(os.Stderr, fixSchema20to21)\n\t\t}\n\t\tif env.IsDev() {\n\t\t\t\/\/ Good signal that we're using the devcam server, so help out\n\t\t\t\/\/ the user with a more useful tip:\n\t\t\treturn nil, fmt.Errorf(\"database schema version is %d; expect %d (run \\\"devcam server --wipe\\\" to wipe both your blobs and re-populate the database schema)\", version, requiredSchemaVersion)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"database schema version is %d; expect %d (need to re-init\/upgrade database?)\",\n\t\t\tversion, requiredSchemaVersion)\n\t}\n\n\treturn kv, nil\n}\n\n\/\/ CreateDB creates the named database if it does not already exist.\nfunc CreateDB(db *sql.DB, dbname string) error {\n\tif dbname == \"\" {\n\t\treturn errors.New(\"can not create database: database name is missing\")\n\t}\n\tif _, err := db.Exec(fmt.Sprintf(\"CREATE DATABASE IF NOT EXISTS %s\", dbname)); err != nil {\n\t\treturn fmt.Errorf(\"error creating database %v: %v\", dbname, err)\n\t}\n\treturn nil\n}\n\n\/\/ We keep a cache of open database handles.\nvar (\n\tdbsmu sync.Mutex\n\tdbs = map[string]*sql.DB{} \/\/ DSN -> db\n)\n\nfunc openOrCachedDB(dsn string) (*sql.DB, error) {\n\tdbsmu.Lock()\n\tdefer dbsmu.Unlock()\n\tif db, ok := dbs[dsn]; ok {\n\t\treturn db, nil\n\t}\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdbs[dsn] = db\n\treturn db, nil\n}\n\ntype keyValue struct {\n\t*sqlkv.KeyValue\n\n\tdsn string\n\tdb *sql.DB\n}\n\n\/\/ Close overrides KeyValue.Close because we need to remove the DB from the pool\n\/\/ when closing.\nfunc (kv *keyValue) Close() error {\n\tdbsmu.Lock()\n\tdefer dbsmu.Unlock()\n\tdelete(dbs, kv.dsn)\n\treturn kv.DB.Close()\n}\n\nfunc (kv *keyValue) ping() error {\n\t\/\/ TODO(bradfitz): something more efficient here?\n\t_, err := kv.SchemaVersion()\n\treturn err\n}\n\nfunc (kv *keyValue) SchemaVersion() (version int, err error) {\n\terr = kv.db.QueryRow(\"SELECT value FROM \" + kv.KeyValue.TablePrefix + \"meta WHERE metakey='version'\").Scan(&version)\n\treturn\n}\n\nconst fixSchema20to21 = `Character set in tables changed to binary, you can fix your tables with:\nALTER TABLE rows CONVERT TO CHARACTER SET binary;\nALTER TABLE meta CONVERT TO CHARACTER SET binary;\nUPDATE meta SET value=21 WHERE metakey='version' AND value=20;\n`\n\n\/\/ serverVersion returns the MySQL server version as []int{major, minor, revision}.\nfunc serverVersion(db *sql.DB) ([]int, error) {\n\tversionRx := regexp.MustCompile(`([0-9]+)\\.([0-9]+)\\.([0-9]+)-.*`)\n\tvar version string\n\tif err := db.QueryRow(\"SELECT VERSION()\").Scan(&version); err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting MySQL server version: %v\", err)\n\t}\n\tm := versionRx.FindStringSubmatch(version)\n\tif len(m) < 4 {\n\t\treturn nil, fmt.Errorf(\"bogus MySQL server version: %v\", version)\n\t}\n\tmajor, _ := strconv.Atoi(m[1])\n\tminor, _ := strconv.Atoi(m[2])\n\trev, _ := strconv.Atoi(m[3])\n\treturn []int{major, minor, rev}, nil\n}\n\n\/\/ hasLargeVarchar returns whether the given version (as []int{major, minor, revision})\n\/\/ supports VARCHAR larger than 255.\nfunc hasLargeVarchar(version []int) bool {\n\tif len(version) < 3 {\n\t\tpanic(fmt.Sprintf(\"bogus mysql server version %v: \", version))\n\t}\n\tif version[0] < 5 {\n\t\treturn false\n\t}\n\tif version[1] > 0 {\n\t\treturn true\n\t}\n\treturn version[0] == 5 && version[1] == 0 && version[2] >= 3\n}\n<|endoftext|>"} {"text":"package common\n\nimport (\n\t\"sync\/atomic\"\n\n\t\"github.com\/mdlayher\/wavepipe\/data\"\n)\n\nvar (\n\t\/\/ rxBytes is the total number of bytes received over the network\n\trxBytes int64\n\t\/\/ txBytes is the total number of bytes received over the network\n\ttxBytes int64\n)\n\n\/\/ Metrics represents a variety of metrics about the current wavepipe instance, and contains several\n\/\/ nested structs which contain more specific metrics\ntype Metrics struct {\n\tDatabase *DatabaseMetrics `json:\"database\"`\n\tNetwork *NetworkMetrics `json:\"network\"`\n}\n\n\/\/ DatabaseMetrics represents metrics regarding the wavepipe database, including total numbers\n\/\/ of specific objects, and the time when the database was last updated\ntype DatabaseMetrics struct {\n\tUpdated int64 `json:\"updated\"`\n\n\tArtists int64 `json:\"artists\"`\n\tAlbums int64 `json:\"albums\"`\n\tSongs int64 `json:\"songs\"`\n\tFolders int64 `json:\"folders\"`\n\tArt int64 `json:\"art\"`\n}\n\n\/\/ NetworkMetrics represents metrics regarding wavepipe network traffic, including total traffic\n\/\/ received and transmitted in bytes\ntype NetworkMetrics struct {\n\tRXBytes int64 `json:\"totalRxBytes\"`\n\tTXBytes int64 `json:\"totalTxBytes\"`\n}\n\n\/\/ GetDatabaseMetrics returns a variety of metrics about the wavepipe database, including\n\/\/ total numbers of specific objects, and the time when the database was last updated\nfunc GetDatabaseMetrics() (*DatabaseMetrics, error) {\n\t\/\/ Fetch total artists\n\tartists, err := data.DB.CountArtists()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Fetch total albums\n\talbums, err := data.DB.CountAlbums()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Fetch total songs\n\tsongs, err := data.DB.CountSongs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Fetch total folders\n\tfolders, err := data.DB.CountFolders()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Fetch total art\n\tart, err := data.DB.CountArt()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Combine all metrics\n\treturn &DatabaseMetrics{\n\t\tUpdated: ScanTime(),\n\t\tArtists: artists,\n\t\tAlbums: albums,\n\t\tSongs: songs,\n\t\tArt: art,\n\t\tFolders: folders,\n\t}, nil\n}\n\n\/\/ AddRXBytes atomically increments the rxBytes counter by the amount specified\nfunc AddRXBytes(count int64) {\n\tatomic.AddInt64(&rxBytes, count)\n}\n\n\/\/ AddTXBytes atomically increments the txBytes counter by the amount specified\nfunc AddTXBytes(count int64) {\n\tatomic.AddInt64(&txBytes, count)\n}\n\n\/\/ RXBytes returns the total number of bytes received over the network\nfunc RXBytes() int64 {\n\treturn atomic.LoadInt64(&rxBytes)\n}\n\n\/\/ TXBytes returns the total number of bytes transmitted over the network\nfunc TXBytes() int64 {\n\treturn atomic.LoadInt64(&txBytes)\n}\ncommon\/metrics: fix field names on NetworkMetricspackage common\n\nimport (\n\t\"sync\/atomic\"\n\n\t\"github.com\/mdlayher\/wavepipe\/data\"\n)\n\nvar (\n\t\/\/ rxBytes is the total number of bytes received over the network\n\trxBytes int64\n\t\/\/ txBytes is the total number of bytes received over the network\n\ttxBytes int64\n)\n\n\/\/ Metrics represents a variety of metrics about the current wavepipe instance, and contains several\n\/\/ nested structs which contain more specific metrics\ntype Metrics struct {\n\tDatabase *DatabaseMetrics `json:\"database\"`\n\tNetwork *NetworkMetrics `json:\"network\"`\n}\n\n\/\/ DatabaseMetrics represents metrics regarding the wavepipe database, including total numbers\n\/\/ of specific objects, and the time when the database was last updated\ntype DatabaseMetrics struct {\n\tUpdated int64 `json:\"updated\"`\n\n\tArtists int64 `json:\"artists\"`\n\tAlbums int64 `json:\"albums\"`\n\tSongs int64 `json:\"songs\"`\n\tFolders int64 `json:\"folders\"`\n\tArt int64 `json:\"art\"`\n}\n\n\/\/ NetworkMetrics represents metrics regarding wavepipe network traffic, including total traffic\n\/\/ received and transmitted in bytes\ntype NetworkMetrics struct {\n\tRXBytes int64 `json:\"rxBytes\"`\n\tTXBytes int64 `json:\"txBytes\"`\n}\n\n\/\/ GetDatabaseMetrics returns a variety of metrics about the wavepipe database, including\n\/\/ total numbers of specific objects, and the time when the database was last updated\nfunc GetDatabaseMetrics() (*DatabaseMetrics, error) {\n\t\/\/ Fetch total artists\n\tartists, err := data.DB.CountArtists()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Fetch total albums\n\talbums, err := data.DB.CountAlbums()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Fetch total songs\n\tsongs, err := data.DB.CountSongs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Fetch total folders\n\tfolders, err := data.DB.CountFolders()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Fetch total art\n\tart, err := data.DB.CountArt()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Combine all metrics\n\treturn &DatabaseMetrics{\n\t\tUpdated: ScanTime(),\n\t\tArtists: artists,\n\t\tAlbums: albums,\n\t\tSongs: songs,\n\t\tArt: art,\n\t\tFolders: folders,\n\t}, nil\n}\n\n\/\/ AddRXBytes atomically increments the rxBytes counter by the amount specified\nfunc AddRXBytes(count int64) {\n\tatomic.AddInt64(&rxBytes, count)\n}\n\n\/\/ AddTXBytes atomically increments the txBytes counter by the amount specified\nfunc AddTXBytes(count int64) {\n\tatomic.AddInt64(&txBytes, count)\n}\n\n\/\/ RXBytes returns the total number of bytes received over the network\nfunc RXBytes() int64 {\n\treturn atomic.LoadInt64(&rxBytes)\n}\n\n\/\/ TXBytes returns the total number of bytes transmitted over the network\nfunc TXBytes() int64 {\n\treturn atomic.LoadInt64(&txBytes)\n}\n<|endoftext|>"} {"text":"package list\n\nimport (\n\t\"errors\"\n)\n\n\/\/ Node is a node of the list\ntype Node struct {\n\tnext *Node \/\/ The node after this node in the list\n\tlist *LinkedList \/\/ The list to which this element belongs\n\tValue interface{} \/\/ The value stored with this node\n}\n\n\/\/ Next returns the next node or nil\nfunc (n *Node) Next() *Node {\n\tif i := n.next; n.list != nil {\n\t\treturn i\n\t}\n\n\treturn nil\n}\n\n\/\/ LinkedList is a single linked list\ntype LinkedList struct {\n\tfirst *Node \/\/ The first node of the list\n\tlast *Node \/\/ The last node of the list\n\tlen int \/\/ The current list length\n}\n\n\/\/ New returns an initialized list\nfunc New() *LinkedList {\n\treturn new(LinkedList).init()\n}\n\n\/\/ init initializes or clears the list\nfunc (l *LinkedList) init() *LinkedList {\n\tl.Clear()\n\n\treturn l\n}\n\n\/\/ Clear removes all nodes from the list\nfunc (l *LinkedList) Clear() {\n\ti := l.first\n\n\tfor i != nil {\n\t\tj := i.Next()\n\n\t\ti.list = nil\n\t\ti.next = nil\n\n\t\ti = j\n\t}\n\n\tl.first = nil\n\tl.last = nil\n\tl.len = 0\n}\n\n\/\/ Len returns the curren list length\nfunc (l *LinkedList) Len() int {\n\treturn l.len\n}\n\n\/\/ First returns the first node of the list or nil\nfunc (l *LinkedList) First() *Node {\n\treturn l.first\n}\n\n\/\/ Last returns the last node of the list or nil\nfunc (l *LinkedList) Last() *Node {\n\treturn l.last\n}\n\n\/\/ Get returns the node with the given index or nil\nfunc (l *LinkedList) Get(i int) (*Node, error) {\n\tif i < 0 || i >= l.len {\n\t\treturn nil, errors.New(\"index bounds out of range\")\n\t}\n\n\tj := 0\n\n\tfor n := l.First(); n != nil; n = n.Next() {\n\t\tif i == j {\n\t\t\treturn n, nil\n\t\t}\n\n\t\tj++\n\t}\n\n\tpanic(\"there is something wrong with the internal structure\")\n}\n\n\/\/ Set replaces the value in the list with the given value\nfunc (l *LinkedList) Set(i int, v interface{}) error {\n\tif i < 0 || i >= l.len {\n\t\treturn errors.New(\"index bounds out of range\")\n\t}\n\n\tj := 0\n\n\tfor n := l.First(); n != nil; n = n.Next() {\n\t\tif i == j {\n\t\t\tn.Value = v\n\n\t\t\treturn nil\n\t\t}\n\n\t\tj++\n\t}\n\n\tpanic(\"there is something wrong with the internal structure\")\n}\n\n\/\/ Copy returns an exact copy of the list\nfunc (l *LinkedList) Copy() *LinkedList {\n\tn := New()\n\n\tfor i := l.First(); i != nil; i = i.Next() {\n\t\tn.Push(i.Value)\n\t}\n\n\treturn n\n}\n\n\/\/ ToArray returns a copy of the list as slice\nfunc (l *LinkedList) ToArray() []interface{} {\n\ta := make([]interface{}, l.len)\n\n\tj := 0\n\n\tfor i := l.First(); i != nil; i = i.Next() {\n\t\ta[j] = i.Value\n\n\t\tj++\n\t}\n\n\treturn a\n}\n\n\/\/ newNode initializes a new node for the list\nfunc (l *LinkedList) newNode(v interface{}) *Node {\n\treturn &Node{\n\t\tlist: l,\n\t\tValue: v,\n\t}\n}\n\n\/\/ findParent returns the parent to a given node or nil\nfunc (l *LinkedList) findParent(c *Node) *Node {\n\tif c == nil || c.list != l {\n\t\treturn nil\n\t}\n\n\tvar p *Node\n\n\tfor i := l.First(); i != nil; i = i.Next() {\n\t\tif i == c {\n\t\t\treturn p\n\t\t}\n\n\t\tp = i\n\t}\n\n\tpanic(\"there is something wrong with the internal structure\")\n}\n\n\/\/ InsertAfter creates a new node from a value, inserts it after a given node and returns the new one\nfunc (l *LinkedList) InsertAfter(v interface{}, p *Node) *Node {\n\tif (p == nil && l.len != 0) || (p != nil && p.list != l) {\n\t\treturn nil\n\t}\n\n\tn := l.newNode(v)\n\n\t\/\/ insert first node\n\tif p == nil {\n\t\tl.first = n\n\t\tl.last = n\n\t} else {\n\t\tn.next = p.next\n\t\tp.next = n\n\n\t\tif p == l.last {\n\t\t\tl.last = n\n\t\t}\n\t}\n\n\tl.len++\n\n\treturn n\n}\n\n\/\/ InsertBefore creates a new node from a value, inserts it before a given node and returns the new one\nfunc (l *LinkedList) InsertBefore(v interface{}, p *Node) *Node {\n\tif (p == nil && l.len != 0) || (p != nil && p.list != l) {\n\t\treturn nil\n\t}\n\n\tn := l.newNode(v)\n\n\t\/\/ insert first node\n\tif p == nil {\n\t\tl.first = n\n\t\tl.last = n\n\t} else {\n\t\tif p == l.first {\n\t\t\tl.first = n\n\t\t} else {\n\t\t\tpp := l.findParent(p)\n\n\t\t\tpp.next = n\n\t\t}\n\n\t\tn.next = p\n\t}\n\n\tl.len++\n\n\treturn n\n}\n\n\/\/ InsertAt creates a new mnode from a value, inserts it at the exact index which must be in range of the list and returns the new node\nfunc (l *LinkedList) InsertAt(i int, v interface{}) (*Node, error) {\n\tif i < 0 || i > l.len {\n\t\treturn nil, errors.New(\"index bounds out of range\")\n\t}\n\n\tn := l.newNode(v)\n\n\tif i == 0 {\n\t\tn.next = l.first\n\t\tl.first = n\n\t} else if i == l.len {\n\t\tl.last.next = n\n\t\tl.last = n\n\t} else {\n\t\tp, _ := l.Get(i - 1)\n\n\t\tn.next = p.next\n\t\tp.next = n\n\t}\n\n\tl.len++\n\n\treturn n, nil\n}\n\n\/\/ remove removes a given node from the list using the provided parent p\nfunc (l *LinkedList) remove(c *Node, p *Node) *Node {\n\tif c == nil || c.list != l || l.len == 0 {\n\t\treturn nil\n\t}\n\n\tif c == l.first {\n\t\tl.first = c.next\n\n\t\t\/\/ c is the last node\n\t\tif c == l.last {\n\t\t\tl.last = nil\n\t\t}\n\t} else {\n\t\tif p == nil {\n\t\t\tp = l.findParent(c)\n\t\t}\n\n\t\tp.next = c.next\n\n\t\tif c == l.last {\n\t\t\tl.last = p\n\t\t}\n\t}\n\n\tc.list = nil\n\tc.next = nil\n\n\tl.len--\n\n\treturn c\n}\n\n\/\/ Remove removes a given node from the list\nfunc (l *LinkedList) Remove(c *Node) *Node {\n\treturn l.remove(c, nil)\n}\n\n\/\/ RemoveAt removes a node from the list at the given index\nfunc (l *LinkedList) RemoveAt(i int) (*Node, error) {\n\tswitch {\n\tcase i < 0 || i >= l.len:\n\t\treturn nil, errors.New(\"index bounds out of range\")\n\tcase i == 0:\n\t\treturn l.remove(l.first, nil), nil\n\tdefault:\n\t\tp, _ := l.Get(i - 1)\n\n\t\treturn l.remove(p.next, p), nil\n\t}\n}\n\n\/\/ RemoveFirstOccurrence removes the first node with the given value from the list and returns it or nil\nfunc (l *LinkedList) RemoveFirstOccurrence(v interface{}) *Node {\n\tvar c, p *Node\n\n\tfor i := l.First(); i != nil; i = i.Next() {\n\t\tif i.Value == v {\n\t\t\tc = i\n\n\t\t\tbreak\n\t\t}\n\n\t\tp = i\n\t}\n\n\tif c != nil {\n\t\tl.remove(c, p)\n\t}\n\n\treturn c\n}\n\n\/\/ RemoveLastOccurrence removes the last node with the given value from the list and returns it or nil\nfunc (l *LinkedList) RemoveLastOccurrence(v interface{}) *Node {\n\tvar c, p, pp *Node\n\n\tfor i := l.First(); i != nil; i = i.Next() {\n\t\tif i.Value == v {\n\t\t\tc = i\n\t\t\tp = pp\n\t\t}\n\n\t\tpp = i\n\t}\n\n\tif c != nil {\n\t\tl.remove(c, p)\n\t}\n\n\treturn c\n}\n\n\/\/ Pop removes and returns the last node or nil\nfunc (l *LinkedList) Pop() *Node {\n\treturn l.Remove(l.last)\n}\n\n\/\/ Push creates a new node from a value, inserts it as the last node and returns it\nfunc (l *LinkedList) Push(v interface{}) *Node {\n\treturn l.InsertAfter(v, l.last)\n}\n\n\/\/ PushList adds the values of a list to the end of the list\nfunc (l *LinkedList) PushList(l2 *LinkedList) {\n\tfor i := l2.First(); i != nil; i = i.Next() {\n\t\tl.Push(i.Value)\n\t}\n}\n\n\/\/ Shift removes and returns the first node or nil\nfunc (l *LinkedList) Shift() *Node {\n\treturn l.Remove(l.first)\n}\n\n\/\/ Unshift creates a new node from a value, inserts it as the first node and returns it\nfunc (l *LinkedList) Unshift(v interface{}) *Node {\n\treturn l.InsertBefore(v, l.first)\n}\n\n\/\/ UnshiftList adds the values of a list to the front of the list\nfunc (l *LinkedList) UnshiftList(l2 *LinkedList) {\n\tfor i := l2.First(); i != nil; i = i.Next() {\n\t\tl.Unshift(i.Value)\n\t}\n}\n\n\/\/ Contains returns true if the value exists in the list\nfunc (l *LinkedList) Contains(v interface{}) bool {\n\t_, ok := l.IndexOf(v)\n\n\treturn ok\n}\n\n\/\/ IndexOf returns the first index of an occurence of the given value and true or -1 and false if the value does not exist\nfunc (l *LinkedList) IndexOf(v interface{}) (int, bool) {\n\ti := 0\n\n\tfor n := l.First(); n != nil; n = n.Next() {\n\t\tif n.Value == v {\n\t\t\treturn i, true\n\t\t}\n\n\t\ti++\n\t}\n\n\treturn -1, false\n}\n\n\/\/ LastIndexOf returns the last index of an occurence of the given value and true or -1 and false if the value does not exist\nfunc (l *LinkedList) LastIndexOf(v interface{}) (int, bool) {\n\ti := 0\n\tj := -1\n\n\tfor n := l.First(); n != nil; n = n.Next() {\n\t\tif n.Value == v {\n\t\t\tj = i\n\t\t}\n\n\t\ti++\n\t}\n\n\treturn j, j != -1\n}\n\nfunc (l *LinkedList) MoveAfter(n, p *Node) {\n\tif n.list != l || p.list != l || n == p {\n\t\treturn\n\t}\n\n\tl.InsertAfter(l.Remove(n).Value, p)\n}\n\nfunc (l *LinkedList) MoveBefore(n, p *Node) {\n\tif n.list != l || p.list != l || n == p {\n\t\treturn\n\t}\n\n\tl.InsertBefore(l.Remove(n).Value, p)\n}\n\nfunc (l *LinkedList) MoveToBack(n *Node) {\n\tl.MoveAfter(n, l.last)\n}\n\nfunc (l *LinkedList) MoveToFront(n *Node) {\n\tl.MoveBefore(n, l.first)\n}\nInsertAt can use other functionspackage list\n\nimport (\n\t\"errors\"\n)\n\n\/\/ Node is a node of the list\ntype Node struct {\n\tnext *Node \/\/ The node after this node in the list\n\tlist *LinkedList \/\/ The list to which this element belongs\n\tValue interface{} \/\/ The value stored with this node\n}\n\n\/\/ Next returns the next node or nil\nfunc (n *Node) Next() *Node {\n\tif i := n.next; n.list != nil {\n\t\treturn i\n\t}\n\n\treturn nil\n}\n\n\/\/ LinkedList is a single linked list\ntype LinkedList struct {\n\tfirst *Node \/\/ The first node of the list\n\tlast *Node \/\/ The last node of the list\n\tlen int \/\/ The current list length\n}\n\n\/\/ New returns an initialized list\nfunc New() *LinkedList {\n\treturn new(LinkedList).init()\n}\n\n\/\/ init initializes or clears the list\nfunc (l *LinkedList) init() *LinkedList {\n\tl.Clear()\n\n\treturn l\n}\n\n\/\/ Clear removes all nodes from the list\nfunc (l *LinkedList) Clear() {\n\ti := l.first\n\n\tfor i != nil {\n\t\tj := i.Next()\n\n\t\ti.list = nil\n\t\ti.next = nil\n\n\t\ti = j\n\t}\n\n\tl.first = nil\n\tl.last = nil\n\tl.len = 0\n}\n\n\/\/ Len returns the curren list length\nfunc (l *LinkedList) Len() int {\n\treturn l.len\n}\n\n\/\/ First returns the first node of the list or nil\nfunc (l *LinkedList) First() *Node {\n\treturn l.first\n}\n\n\/\/ Last returns the last node of the list or nil\nfunc (l *LinkedList) Last() *Node {\n\treturn l.last\n}\n\n\/\/ Get returns the node with the given index or nil\nfunc (l *LinkedList) Get(i int) (*Node, error) {\n\tif i < 0 || i >= l.len {\n\t\treturn nil, errors.New(\"index bounds out of range\")\n\t}\n\n\tj := 0\n\n\tfor n := l.First(); n != nil; n = n.Next() {\n\t\tif i == j {\n\t\t\treturn n, nil\n\t\t}\n\n\t\tj++\n\t}\n\n\tpanic(\"there is something wrong with the internal structure\")\n}\n\n\/\/ Set replaces the value in the list with the given value\nfunc (l *LinkedList) Set(i int, v interface{}) error {\n\tif i < 0 || i >= l.len {\n\t\treturn errors.New(\"index bounds out of range\")\n\t}\n\n\tj := 0\n\n\tfor n := l.First(); n != nil; n = n.Next() {\n\t\tif i == j {\n\t\t\tn.Value = v\n\n\t\t\treturn nil\n\t\t}\n\n\t\tj++\n\t}\n\n\tpanic(\"there is something wrong with the internal structure\")\n}\n\n\/\/ Copy returns an exact copy of the list\nfunc (l *LinkedList) Copy() *LinkedList {\n\tn := New()\n\n\tfor i := l.First(); i != nil; i = i.Next() {\n\t\tn.Push(i.Value)\n\t}\n\n\treturn n\n}\n\n\/\/ ToArray returns a copy of the list as slice\nfunc (l *LinkedList) ToArray() []interface{} {\n\ta := make([]interface{}, l.len)\n\n\tj := 0\n\n\tfor i := l.First(); i != nil; i = i.Next() {\n\t\ta[j] = i.Value\n\n\t\tj++\n\t}\n\n\treturn a\n}\n\n\/\/ newNode initializes a new node for the list\nfunc (l *LinkedList) newNode(v interface{}) *Node {\n\treturn &Node{\n\t\tlist: l,\n\t\tValue: v,\n\t}\n}\n\n\/\/ findParent returns the parent to a given node or nil\nfunc (l *LinkedList) findParent(c *Node) *Node {\n\tif c == nil || c.list != l {\n\t\treturn nil\n\t}\n\n\tvar p *Node\n\n\tfor i := l.First(); i != nil; i = i.Next() {\n\t\tif i == c {\n\t\t\treturn p\n\t\t}\n\n\t\tp = i\n\t}\n\n\tpanic(\"there is something wrong with the internal structure\")\n}\n\n\/\/ InsertAfter creates a new node from a value, inserts it after a given node and returns the new one\nfunc (l *LinkedList) InsertAfter(v interface{}, p *Node) *Node {\n\tif (p == nil && l.len != 0) || (p != nil && p.list != l) {\n\t\treturn nil\n\t}\n\n\tn := l.newNode(v)\n\n\t\/\/ insert first node\n\tif p == nil {\n\t\tl.first = n\n\t\tl.last = n\n\t} else {\n\t\tn.next = p.next\n\t\tp.next = n\n\n\t\tif p == l.last {\n\t\t\tl.last = n\n\t\t}\n\t}\n\n\tl.len++\n\n\treturn n\n}\n\n\/\/ InsertBefore creates a new node from a value, inserts it before a given node and returns the new one\nfunc (l *LinkedList) InsertBefore(v interface{}, p *Node) *Node {\n\tif (p == nil && l.len != 0) || (p != nil && p.list != l) {\n\t\treturn nil\n\t}\n\n\tn := l.newNode(v)\n\n\t\/\/ insert first node\n\tif p == nil {\n\t\tl.first = n\n\t\tl.last = n\n\t} else {\n\t\tif p == l.first {\n\t\t\tl.first = n\n\t\t} else {\n\t\t\tpp := l.findParent(p)\n\n\t\t\tpp.next = n\n\t\t}\n\n\t\tn.next = p\n\t}\n\n\tl.len++\n\n\treturn n\n}\n\n\/\/ InsertAt creates a new mnode from a value, inserts it at the exact index which must be in range of the list and returns the new node\nfunc (l *LinkedList) InsertAt(i int, v interface{}) (*Node, error) {\n\tif i < 0 || i > l.len {\n\t\treturn nil, errors.New(\"index bounds out of range\")\n\t}\n\n\tif i == 0 {\n\t\treturn l.Unshift(v), nil\n\t} else if i == l.len {\n\t\treturn l.Push(v), nil\n\t}\n\n\tp, _ := l.Get(i)\n\n\treturn l.InsertBefore(v, p), nil\n}\n\n\/\/ remove removes a given node from the list using the provided parent p\nfunc (l *LinkedList) remove(c *Node, p *Node) *Node {\n\tif c == nil || c.list != l || l.len == 0 {\n\t\treturn nil\n\t}\n\n\tif c == l.first {\n\t\tl.first = c.next\n\n\t\t\/\/ c is the last node\n\t\tif c == l.last {\n\t\t\tl.last = nil\n\t\t}\n\t} else {\n\t\tif p == nil {\n\t\t\tp = l.findParent(c)\n\t\t}\n\n\t\tp.next = c.next\n\n\t\tif c == l.last {\n\t\t\tl.last = p\n\t\t}\n\t}\n\n\tc.list = nil\n\tc.next = nil\n\n\tl.len--\n\n\treturn c\n}\n\n\/\/ Remove removes a given node from the list\nfunc (l *LinkedList) Remove(c *Node) *Node {\n\treturn l.remove(c, nil)\n}\n\n\/\/ RemoveAt removes a node from the list at the given index\nfunc (l *LinkedList) RemoveAt(i int) (*Node, error) {\n\tswitch {\n\tcase i < 0 || i >= l.len:\n\t\treturn nil, errors.New(\"index bounds out of range\")\n\tcase i == 0:\n\t\treturn l.remove(l.first, nil), nil\n\tdefault:\n\t\tp, _ := l.Get(i - 1)\n\n\t\treturn l.remove(p.next, p), nil\n\t}\n}\n\n\/\/ RemoveFirstOccurrence removes the first node with the given value from the list and returns it or nil\nfunc (l *LinkedList) RemoveFirstOccurrence(v interface{}) *Node {\n\tvar c, p *Node\n\n\tfor i := l.First(); i != nil; i = i.Next() {\n\t\tif i.Value == v {\n\t\t\tc = i\n\n\t\t\tbreak\n\t\t}\n\n\t\tp = i\n\t}\n\n\tif c != nil {\n\t\tl.remove(c, p)\n\t}\n\n\treturn c\n}\n\n\/\/ RemoveLastOccurrence removes the last node with the given value from the list and returns it or nil\nfunc (l *LinkedList) RemoveLastOccurrence(v interface{}) *Node {\n\tvar c, p, pp *Node\n\n\tfor i := l.First(); i != nil; i = i.Next() {\n\t\tif i.Value == v {\n\t\t\tc = i\n\t\t\tp = pp\n\t\t}\n\n\t\tpp = i\n\t}\n\n\tif c != nil {\n\t\tl.remove(c, p)\n\t}\n\n\treturn c\n}\n\n\/\/ Pop removes and returns the last node or nil\nfunc (l *LinkedList) Pop() *Node {\n\treturn l.Remove(l.last)\n}\n\n\/\/ Push creates a new node from a value, inserts it as the last node and returns it\nfunc (l *LinkedList) Push(v interface{}) *Node {\n\treturn l.InsertAfter(v, l.last)\n}\n\n\/\/ PushList adds the values of a list to the end of the list\nfunc (l *LinkedList) PushList(l2 *LinkedList) {\n\tfor i := l2.First(); i != nil; i = i.Next() {\n\t\tl.Push(i.Value)\n\t}\n}\n\n\/\/ Shift removes and returns the first node or nil\nfunc (l *LinkedList) Shift() *Node {\n\treturn l.Remove(l.first)\n}\n\n\/\/ Unshift creates a new node from a value, inserts it as the first node and returns it\nfunc (l *LinkedList) Unshift(v interface{}) *Node {\n\treturn l.InsertBefore(v, l.first)\n}\n\n\/\/ UnshiftList adds the values of a list to the front of the list\nfunc (l *LinkedList) UnshiftList(l2 *LinkedList) {\n\tfor i := l2.First(); i != nil; i = i.Next() {\n\t\tl.Unshift(i.Value)\n\t}\n}\n\n\/\/ Contains returns true if the value exists in the list\nfunc (l *LinkedList) Contains(v interface{}) bool {\n\t_, ok := l.IndexOf(v)\n\n\treturn ok\n}\n\n\/\/ IndexOf returns the first index of an occurence of the given value and true or -1 and false if the value does not exist\nfunc (l *LinkedList) IndexOf(v interface{}) (int, bool) {\n\ti := 0\n\n\tfor n := l.First(); n != nil; n = n.Next() {\n\t\tif n.Value == v {\n\t\t\treturn i, true\n\t\t}\n\n\t\ti++\n\t}\n\n\treturn -1, false\n}\n\n\/\/ LastIndexOf returns the last index of an occurence of the given value and true or -1 and false if the value does not exist\nfunc (l *LinkedList) LastIndexOf(v interface{}) (int, bool) {\n\ti := 0\n\tj := -1\n\n\tfor n := l.First(); n != nil; n = n.Next() {\n\t\tif n.Value == v {\n\t\t\tj = i\n\t\t}\n\n\t\ti++\n\t}\n\n\treturn j, j != -1\n}\n\nfunc (l *LinkedList) MoveAfter(n, p *Node) {\n\tif n.list != l || p.list != l || n == p {\n\t\treturn\n\t}\n\n\tl.InsertAfter(l.Remove(n).Value, p)\n}\n\nfunc (l *LinkedList) MoveBefore(n, p *Node) {\n\tif n.list != l || p.list != l || n == p {\n\t\treturn\n\t}\n\n\tl.InsertBefore(l.Remove(n).Value, p)\n}\n\nfunc (l *LinkedList) MoveToBack(n *Node) {\n\tl.MoveAfter(n, l.last)\n}\n\nfunc (l *LinkedList) MoveToFront(n *Node) {\n\tl.MoveBefore(n, l.first)\n}\n<|endoftext|>"} {"text":"package main\n\nconst kernelSource = `\nstatic inline ulong rotr64( __const ulong w, __const unsigned c ) { return ( w >> c ) | ( w << ( 64 - c ) ); }\n\n__constant static const uchar blake2b_sigma[12][16] = {\n\t{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } ,\n\t{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } ,\n\t{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } ,\n\t{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } ,\n\t{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } ,\n\t{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } ,\n\t{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } ,\n\t{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } ,\n\t{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 } ,\n\t{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 } ,\n\t{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } ,\n\t{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } };\n\n\/\/ Target is passed in via headerIn[32 - 29]\n__kernel void nonceGrind(__global ulong *headerIn, __global ulong *nonceOut) {\n\tulong target = headerIn[4];\n\tulong m[16] = {\theaderIn[0], headerIn[1],\n\t headerIn[2], headerIn[3],\n\t (ulong)get_global_id(0), headerIn[5],\n\t headerIn[6], headerIn[7],\n\t headerIn[8], headerIn[9], 0, 0, 0, 0, 0, 0 };\n\n\tulong v[16] = { 0x6a09e667f2bdc928, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,\n\t 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,\n\t 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,\n\t 0x510e527fade68281, 0x9b05688c2b3e6c1f, 0xe07c265404be4294, 0x5be0cd19137e2179 };\n\n\n\n#define G(r,i,a,b,c,d) \\\n\ta = a + b + m[blake2b_sigma[r][2*i]]; \\\n\td = rotr64(d ^ a, 32); \\\n\tc = c + d; \\\n\tb = rotr64(b ^ c, 24); \\\n\ta = a + b + m[blake2b_sigma[r][2*i+1]]; \\\n\td = rotr64(d ^ a, 16); \\\n\tc = c + d; \\\n\tb = rotr64(b ^ c, 63);\n\n#define ROUND(r) \\\n\tG(r,0,v[ 0],v[ 4],v[ 8],v[12]); \\\n\tG(r,1,v[ 1],v[ 5],v[ 9],v[13]); \\\n\tG(r,2,v[ 2],v[ 6],v[10],v[14]); \\\n\tG(r,3,v[ 3],v[ 7],v[11],v[15]); \\\n\tG(r,4,v[ 0],v[ 5],v[10],v[15]); \\\n\tG(r,5,v[ 1],v[ 6],v[11],v[12]); \\\n\tG(r,6,v[ 2],v[ 7],v[ 8],v[13]); \\\n\tG(r,7,v[ 3],v[ 4],v[ 9],v[14]);\n\n\tROUND( 0 );\n\tROUND( 1 );\n\tROUND( 2 );\n\tROUND( 3 );\n\tROUND( 4 );\n\tROUND( 5 );\n\tROUND( 6 );\n\tROUND( 7 );\n\tROUND( 8 );\n\tROUND( 9 );\n\tROUND( 10 );\n\tROUND( 11 );\n#undef G\n#undef ROUND\n\n\tif (as_ulong(as_uchar8(0x6a09e667f2bdc928 ^ v[0] ^ v[8]).s76543210) < target) {\n\t\t*nonceOut = m[4];\n\t\treturn;\n\t}\n}\n`\nuint2 mathpackage main\n\nconst kernelSource = `\n\ninline static uint2 ror64(const uint2 x, const uint y)\n{\n return (uint2)(((x).x>>y)^((x).y<<(32-y)),((x).y>>y)^((x).x<<(32-y)));\n}\n\ninline static uint2 ror64_2(const uint2 x, const uint y)\n{\n return (uint2)(((x).y>>(y-32))^((x).x<<(64-y)),((x).x>>(y-32))^((x).y<<(64-y)));\n}\n\n\n__constant static const uchar blake2b_sigma[12][16] = {\n\t{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } ,\n\t{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } ,\n\t{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } ,\n\t{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } ,\n\t{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } ,\n\t{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } ,\n\t{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } ,\n\t{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } ,\n\t{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 } ,\n\t{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 } ,\n\t{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } ,\n\t{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } };\n\n\/\/ Target is passed in via headerIn[32 - 29]\n__kernel void nonceGrind(__global ulong *headerIn, __global ulong *nonceOut) {\n\tulong target = headerIn[4];\n\tulong m[16] = {\theaderIn[0], headerIn[1],\n\t headerIn[2], headerIn[3],\n\t (ulong)get_global_id(0), headerIn[5],\n\t headerIn[6], headerIn[7],\n\t headerIn[8], headerIn[9], 0, 0, 0, 0, 0, 0 };\n\n\tulong v[16] = { 0x6a09e667f2bdc928, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,\n\t 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,\n\t 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,\n\t 0x510e527fade68281, 0x9b05688c2b3e6c1f, 0xe07c265404be4294, 0x5be0cd19137e2179 };\n\n\n\n#define G(r,i,a,b,c,d) \\\n\ta = a + b + m[ blake2b_sigma[r][2*i] ]; \\\n\t((uint2*)&d)[0] = ((uint2*)&d)[0].yx ^ ((uint2*)&a)[0].yx; \\\n\tc = c + d; \\\n\t((uint2*)&b)[0] = ror64( ((uint2*)&b)[0] ^ ((uint2*)&c)[0], 24U); \\\n\ta = a + b + m[ blake2b_sigma[r][2*i+1] ]; \\\n\t((uint2*)&d)[0] = ror64( ((uint2*)&d)[0] ^ ((uint2*)&a)[0], 16U); \\\n\tc = c + d; \\\n ((uint2*)&b)[0] = ror64_2( ((uint2*)&b)[0] ^ ((uint2*)&c)[0], 63U);\n\n\n#define ROUND(r) \\\n\tG(r,0,v[ 0],v[ 4],v[ 8],v[12]); \\\n\tG(r,1,v[ 1],v[ 5],v[ 9],v[13]); \\\n\tG(r,2,v[ 2],v[ 6],v[10],v[14]); \\\n\tG(r,3,v[ 3],v[ 7],v[11],v[15]); \\\n\tG(r,4,v[ 0],v[ 5],v[10],v[15]); \\\n\tG(r,5,v[ 1],v[ 6],v[11],v[12]); \\\n\tG(r,6,v[ 2],v[ 7],v[ 8],v[13]); \\\n\tG(r,7,v[ 3],v[ 4],v[ 9],v[14]);\n\n\tROUND( 0 );\n\tROUND( 1 );\n\tROUND( 2 );\n\tROUND( 3 );\n\tROUND( 4 );\n\tROUND( 5 );\n\tROUND( 6 );\n\tROUND( 7 );\n\tROUND( 8 );\n\tROUND( 9 );\n\tROUND( 10 );\n\tROUND( 11 );\n#undef G\n#undef ROUND\n\n\tif (as_ulong(as_uchar8(0x6a09e667f2bdc928 ^ v[0] ^ v[8]).s76543210) < target) {\n\t\t*nonceOut = m[4];\n\t\treturn;\n\t}\n}\n`\n<|endoftext|>"} {"text":"\/*\n Copyright 2020 Docker, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage azure\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/Azure\/azure-sdk-for-go\/services\/containerinstance\/mgmt\/2018-10-01\/containerinstance\"\n\t\"github.com\/Azure\/go-autorest\/autorest\/to\"\n\t\"github.com\/compose-spec\/compose-go\/cli\"\n\t\"github.com\/compose-spec\/compose-go\/types\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/docker\/api\/azure\/convert\"\n\t\"github.com\/docker\/api\/azure\/login\"\n\t\"github.com\/docker\/api\/backend\"\n\t\"github.com\/docker\/api\/compose\"\n\t\"github.com\/docker\/api\/containers\"\n\tapicontext \"github.com\/docker\/api\/context\"\n\t\"github.com\/docker\/api\/context\/cloud\"\n\t\"github.com\/docker\/api\/context\/store\"\n\t\"github.com\/docker\/api\/errdefs\"\n)\n\nconst (\n\tsingleContainerName = \"single--container--aci\"\n\tcomposeContainerSeparator = \"_\"\n)\n\n\/\/ ErrNoSuchContainer is returned when the mentioned container does not exist\nvar ErrNoSuchContainer = errors.New(\"no such container\")\n\nfunc init() {\n\tbackend.Register(\"aci\", \"aci\", service, getCloudService)\n}\n\nfunc service(ctx context.Context) (backend.Service, error) {\n\tcontextStore := store.ContextStore(ctx)\n\tcurrentContext := apicontext.CurrentContext(ctx)\n\tvar aciContext store.AciContext\n\n\tif err := contextStore.GetEndpoint(currentContext, &aciContext); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn getAciAPIService(aciContext), nil\n}\n\nfunc getCloudService() (cloud.Service, error) {\n\tservice, err := login.NewAzureLoginService()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &aciCloudService{\n\t\tloginService: service,\n\t}, nil\n}\n\nfunc getAciAPIService(aciCtx store.AciContext) *aciAPIService {\n\treturn &aciAPIService{\n\t\taciContainerService: &aciContainerService{\n\t\t\tctx: aciCtx,\n\t\t},\n\t\taciComposeService: &aciComposeService{\n\t\t\tctx: aciCtx,\n\t\t},\n\t}\n}\n\ntype aciAPIService struct {\n\t*aciContainerService\n\t*aciComposeService\n}\n\nfunc (a *aciAPIService) ContainerService() containers.Service {\n\treturn a.aciContainerService\n}\n\nfunc (a *aciAPIService) ComposeService() compose.Service {\n\treturn a.aciComposeService\n}\n\ntype aciContainerService struct {\n\tctx store.AciContext\n}\n\nfunc (cs *aciContainerService) List(ctx context.Context, _ bool) ([]containers.Container, error) {\n\tgroupsClient, err := getContainerGroupsClient(cs.ctx.SubscriptionID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar containerGroups []containerinstance.ContainerGroup\n\tresult, err := groupsClient.ListByResourceGroup(ctx, cs.ctx.ResourceGroup)\n\tif err != nil {\n\t\treturn []containers.Container{}, err\n\t}\n\n\tfor result.NotDone() {\n\t\tcontainerGroups = append(containerGroups, result.Values()...)\n\t\tif err := result.NextWithContext(ctx); err != nil {\n\t\t\treturn []containers.Container{}, err\n\t\t}\n\t}\n\n\tvar res []containers.Container\n\tfor _, containerGroup := range containerGroups {\n\t\tgroup, err := groupsClient.Get(ctx, cs.ctx.ResourceGroup, *containerGroup.Name)\n\t\tif err != nil {\n\t\t\treturn []containers.Container{}, err\n\t\t}\n\n\t\tfor _, container := range *group.Containers {\n\t\t\tvar containerID string\n\t\t\t\/\/ don't list sidecar container\n\t\t\tif *container.Name == convert.ComposeDNSSidecarName {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif *container.Name == singleContainerName {\n\t\t\t\tcontainerID = *containerGroup.Name\n\t\t\t} else {\n\t\t\t\tcontainerID = *containerGroup.Name + composeContainerSeparator + *container.Name\n\t\t\t}\n\t\t\tstatus := \"Unknown\"\n\t\t\tif container.InstanceView != nil && container.InstanceView.CurrentState != nil {\n\t\t\t\tstatus = *container.InstanceView.CurrentState.State\n\t\t\t}\n\n\t\t\tres = append(res, containers.Container{\n\t\t\t\tID: containerID,\n\t\t\t\tImage: *container.Image,\n\t\t\t\tStatus: status,\n\t\t\t\tPorts: convert.ToPorts(group.IPAddress, *container.Ports),\n\t\t\t})\n\t\t}\n\t}\n\n\treturn res, nil\n}\n\nfunc (cs *aciContainerService) Run(ctx context.Context, r containers.ContainerConfig) error {\n\tif strings.Contains(r.ID, composeContainerSeparator) {\n\t\treturn errors.New(fmt.Sprintf(\"invalid container name. ACI container name cannot include %q\", composeContainerSeparator))\n\t}\n\n\tvar ports []types.ServicePortConfig\n\tfor _, p := range r.Ports {\n\t\tports = append(ports, types.ServicePortConfig{\n\t\t\tTarget: p.ContainerPort,\n\t\t\tPublished: p.HostPort,\n\t\t})\n\t}\n\n\tprojectVolumes, serviceConfigVolumes, err := convert.GetRunVolumes(r.Volumes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tproject := types.Project{\n\t\tName: r.ID,\n\t\tServices: []types.ServiceConfig{\n\t\t\t{\n\t\t\t\tName: singleContainerName,\n\t\t\t\tImage: r.Image,\n\t\t\t\tPorts: ports,\n\t\t\t\tLabels: r.Labels,\n\t\t\t\tVolumes: serviceConfigVolumes,\n\t\t\t\tDeploy: &types.DeployConfig{\n\t\t\t\t\tResources: types.Resources{\n\t\t\t\t\t\tLimits: &types.Resource{\n\t\t\t\t\t\t\tNanoCPUs: fmt.Sprintf(\"%f\", r.CPULimit),\n\t\t\t\t\t\t\tMemoryBytes: types.UnitBytes(r.MemLimit.Value()),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tVolumes: projectVolumes,\n\t}\n\n\tlogrus.Debugf(\"Running container %q with name %q\\n\", r.Image, r.ID)\n\tgroupDefinition, err := convert.ToContainerGroup(cs.ctx, project)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn createACIContainers(ctx, cs.ctx, groupDefinition)\n}\n\nfunc (cs *aciContainerService) Stop(ctx context.Context, containerName string, timeout *uint32) error {\n\treturn errdefs.ErrNotImplemented\n}\n\nfunc getGroupAndContainerName(containerID string) (groupName string, containerName string) {\n\ttokens := strings.Split(containerID, composeContainerSeparator)\n\tgroupName = tokens[0]\n\tif len(tokens) > 1 {\n\t\tcontainerName = tokens[len(tokens)-1]\n\t\tgroupName = containerID[:len(containerID)-(len(containerName)+1)]\n\t} else {\n\t\tcontainerName = singleContainerName\n\t}\n\treturn groupName, containerName\n}\n\nfunc (cs *aciContainerService) Exec(ctx context.Context, name string, command string, reader io.Reader, writer io.Writer) error {\n\tgroupName, containerAciName := getGroupAndContainerName(name)\n\tcontainerExecResponse, err := execACIContainer(ctx, cs.ctx, command, groupName, containerAciName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn exec(\n\t\tcontext.Background(),\n\t\t*containerExecResponse.WebSocketURI,\n\t\t*containerExecResponse.Password,\n\t\treader,\n\t\twriter,\n\t)\n}\n\nfunc (cs *aciContainerService) Logs(ctx context.Context, containerName string, req containers.LogsRequest) error {\n\tgroupName, containerAciName := getGroupAndContainerName(containerName)\n\tvar tail *int32\n\n\tif req.Follow {\n\t\treturn streamLogs(ctx, cs.ctx, groupName, containerAciName, req.Writer)\n\t}\n\n\tif req.Tail != \"all\" {\n\t\treqTail, err := strconv.Atoi(req.Tail)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ti32 := int32(reqTail)\n\t\ttail = &i32\n\t}\n\n\tlogs, err := getACIContainerLogs(ctx, cs.ctx, groupName, containerAciName, tail)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = fmt.Fprint(req.Writer, logs)\n\treturn err\n}\n\nfunc (cs *aciContainerService) Delete(ctx context.Context, containerID string, _ bool) error {\n\tgroupName, containerName := getGroupAndContainerName(containerID)\n\tif groupName != containerID {\n\t\treturn errors.New(fmt.Sprintf(`cannot delete service \"%s\" from compose app \"%s\", you must delete the entire compose app with docker compose down`, containerName, groupName))\n\t}\n\tcg, err := deleteACIContainerGroup(ctx, cs.ctx, groupName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cg.StatusCode == http.StatusNoContent {\n\t\treturn ErrNoSuchContainer\n\t}\n\n\treturn err\n}\n\nfunc (cs *aciContainerService) Inspect(ctx context.Context, containerID string) (containers.Container, error) {\n\tgroupName, containerName := getGroupAndContainerName(containerID)\n\n\tcg, err := getACIContainerGroup(ctx, cs.ctx, groupName)\n\tif err != nil {\n\t\treturn containers.Container{}, err\n\t}\n\tif cg.StatusCode == http.StatusNoContent {\n\t\treturn containers.Container{}, ErrNoSuchContainer\n\t}\n\n\tvar cc containerinstance.Container\n\tvar found = false\n\tfor _, c := range *cg.Containers {\n\t\tif to.String(c.Name) == containerName {\n\t\t\tcc = c\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\treturn containers.Container{}, ErrNoSuchContainer\n\t}\n\n\treturn convert.ContainerGroupToContainer(containerID, cg, cc)\n}\n\ntype aciComposeService struct {\n\tctx store.AciContext\n}\n\nfunc (cs *aciComposeService) Up(ctx context.Context, opts cli.ProjectOptions) error {\n\tproject, err := cli.ProjectFromOptions(&opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogrus.Debugf(\"Up on project with name %q\\n\", project.Name)\n\tgroupDefinition, err := convert.ToContainerGroup(cs.ctx, *project)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn createOrUpdateACIContainers(ctx, cs.ctx, groupDefinition)\n}\n\nfunc (cs *aciComposeService) Down(ctx context.Context, opts cli.ProjectOptions) error {\n\tvar project types.Project\n\n\tif opts.Name != \"\" {\n\t\tproject = types.Project{Name: opts.Name}\n\t} else {\n\t\tfullProject, err := cli.ProjectFromOptions(&opts)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject = *fullProject\n\t}\n\tlogrus.Debugf(\"Down on project with name %q\\n\", project.Name)\n\n\tcg, err := deleteACIContainerGroup(ctx, cs.ctx, project.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cg.StatusCode == http.StatusNoContent {\n\t\treturn ErrNoSuchContainer\n\t}\n\n\treturn err\n}\n\ntype aciCloudService struct {\n\tloginService login.AzureLoginService\n}\n\nfunc (cs *aciCloudService) Login(ctx context.Context, params map[string]string) error {\n\treturn cs.loginService.Login(ctx, params[login.TenantIDLoginParam])\n}\n\nfunc (cs *aciCloudService) CreateContextData(ctx context.Context, params map[string]string) (interface{}, string, error) {\n\tcontextHelper := newContextCreateHelper()\n\treturn contextHelper.createContextData(ctx, params)\n}\n@gtardif @rumpl Use %q instead of \\\"%s\\\"\/*\n Copyright 2020 Docker, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage azure\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/Azure\/azure-sdk-for-go\/services\/containerinstance\/mgmt\/2018-10-01\/containerinstance\"\n\t\"github.com\/Azure\/go-autorest\/autorest\/to\"\n\t\"github.com\/compose-spec\/compose-go\/cli\"\n\t\"github.com\/compose-spec\/compose-go\/types\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/docker\/api\/azure\/convert\"\n\t\"github.com\/docker\/api\/azure\/login\"\n\t\"github.com\/docker\/api\/backend\"\n\t\"github.com\/docker\/api\/compose\"\n\t\"github.com\/docker\/api\/containers\"\n\tapicontext \"github.com\/docker\/api\/context\"\n\t\"github.com\/docker\/api\/context\/cloud\"\n\t\"github.com\/docker\/api\/context\/store\"\n\t\"github.com\/docker\/api\/errdefs\"\n)\n\nconst (\n\tsingleContainerName = \"single--container--aci\"\n\tcomposeContainerSeparator = \"_\"\n)\n\n\/\/ ErrNoSuchContainer is returned when the mentioned container does not exist\nvar ErrNoSuchContainer = errors.New(\"no such container\")\n\nfunc init() {\n\tbackend.Register(\"aci\", \"aci\", service, getCloudService)\n}\n\nfunc service(ctx context.Context) (backend.Service, error) {\n\tcontextStore := store.ContextStore(ctx)\n\tcurrentContext := apicontext.CurrentContext(ctx)\n\tvar aciContext store.AciContext\n\n\tif err := contextStore.GetEndpoint(currentContext, &aciContext); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn getAciAPIService(aciContext), nil\n}\n\nfunc getCloudService() (cloud.Service, error) {\n\tservice, err := login.NewAzureLoginService()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &aciCloudService{\n\t\tloginService: service,\n\t}, nil\n}\n\nfunc getAciAPIService(aciCtx store.AciContext) *aciAPIService {\n\treturn &aciAPIService{\n\t\taciContainerService: &aciContainerService{\n\t\t\tctx: aciCtx,\n\t\t},\n\t\taciComposeService: &aciComposeService{\n\t\t\tctx: aciCtx,\n\t\t},\n\t}\n}\n\ntype aciAPIService struct {\n\t*aciContainerService\n\t*aciComposeService\n}\n\nfunc (a *aciAPIService) ContainerService() containers.Service {\n\treturn a.aciContainerService\n}\n\nfunc (a *aciAPIService) ComposeService() compose.Service {\n\treturn a.aciComposeService\n}\n\ntype aciContainerService struct {\n\tctx store.AciContext\n}\n\nfunc (cs *aciContainerService) List(ctx context.Context, _ bool) ([]containers.Container, error) {\n\tgroupsClient, err := getContainerGroupsClient(cs.ctx.SubscriptionID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar containerGroups []containerinstance.ContainerGroup\n\tresult, err := groupsClient.ListByResourceGroup(ctx, cs.ctx.ResourceGroup)\n\tif err != nil {\n\t\treturn []containers.Container{}, err\n\t}\n\n\tfor result.NotDone() {\n\t\tcontainerGroups = append(containerGroups, result.Values()...)\n\t\tif err := result.NextWithContext(ctx); err != nil {\n\t\t\treturn []containers.Container{}, err\n\t\t}\n\t}\n\n\tvar res []containers.Container\n\tfor _, containerGroup := range containerGroups {\n\t\tgroup, err := groupsClient.Get(ctx, cs.ctx.ResourceGroup, *containerGroup.Name)\n\t\tif err != nil {\n\t\t\treturn []containers.Container{}, err\n\t\t}\n\n\t\tfor _, container := range *group.Containers {\n\t\t\tvar containerID string\n\t\t\t\/\/ don't list sidecar container\n\t\t\tif *container.Name == convert.ComposeDNSSidecarName {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif *container.Name == singleContainerName {\n\t\t\t\tcontainerID = *containerGroup.Name\n\t\t\t} else {\n\t\t\t\tcontainerID = *containerGroup.Name + composeContainerSeparator + *container.Name\n\t\t\t}\n\t\t\tstatus := \"Unknown\"\n\t\t\tif container.InstanceView != nil && container.InstanceView.CurrentState != nil {\n\t\t\t\tstatus = *container.InstanceView.CurrentState.State\n\t\t\t}\n\n\t\t\tres = append(res, containers.Container{\n\t\t\t\tID: containerID,\n\t\t\t\tImage: *container.Image,\n\t\t\t\tStatus: status,\n\t\t\t\tPorts: convert.ToPorts(group.IPAddress, *container.Ports),\n\t\t\t})\n\t\t}\n\t}\n\n\treturn res, nil\n}\n\nfunc (cs *aciContainerService) Run(ctx context.Context, r containers.ContainerConfig) error {\n\tif strings.Contains(r.ID, composeContainerSeparator) {\n\t\treturn errors.New(fmt.Sprintf(\"invalid container name. ACI container name cannot include %q\", composeContainerSeparator))\n\t}\n\n\tvar ports []types.ServicePortConfig\n\tfor _, p := range r.Ports {\n\t\tports = append(ports, types.ServicePortConfig{\n\t\t\tTarget: p.ContainerPort,\n\t\t\tPublished: p.HostPort,\n\t\t})\n\t}\n\n\tprojectVolumes, serviceConfigVolumes, err := convert.GetRunVolumes(r.Volumes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tproject := types.Project{\n\t\tName: r.ID,\n\t\tServices: []types.ServiceConfig{\n\t\t\t{\n\t\t\t\tName: singleContainerName,\n\t\t\t\tImage: r.Image,\n\t\t\t\tPorts: ports,\n\t\t\t\tLabels: r.Labels,\n\t\t\t\tVolumes: serviceConfigVolumes,\n\t\t\t\tDeploy: &types.DeployConfig{\n\t\t\t\t\tResources: types.Resources{\n\t\t\t\t\t\tLimits: &types.Resource{\n\t\t\t\t\t\t\tNanoCPUs: fmt.Sprintf(\"%f\", r.CPULimit),\n\t\t\t\t\t\t\tMemoryBytes: types.UnitBytes(r.MemLimit.Value()),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tVolumes: projectVolumes,\n\t}\n\n\tlogrus.Debugf(\"Running container %q with name %q\\n\", r.Image, r.ID)\n\tgroupDefinition, err := convert.ToContainerGroup(cs.ctx, project)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn createACIContainers(ctx, cs.ctx, groupDefinition)\n}\n\nfunc (cs *aciContainerService) Stop(ctx context.Context, containerName string, timeout *uint32) error {\n\treturn errdefs.ErrNotImplemented\n}\n\nfunc getGroupAndContainerName(containerID string) (groupName string, containerName string) {\n\ttokens := strings.Split(containerID, composeContainerSeparator)\n\tgroupName = tokens[0]\n\tif len(tokens) > 1 {\n\t\tcontainerName = tokens[len(tokens)-1]\n\t\tgroupName = containerID[:len(containerID)-(len(containerName)+1)]\n\t} else {\n\t\tcontainerName = singleContainerName\n\t}\n\treturn groupName, containerName\n}\n\nfunc (cs *aciContainerService) Exec(ctx context.Context, name string, command string, reader io.Reader, writer io.Writer) error {\n\tgroupName, containerAciName := getGroupAndContainerName(name)\n\tcontainerExecResponse, err := execACIContainer(ctx, cs.ctx, command, groupName, containerAciName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn exec(\n\t\tcontext.Background(),\n\t\t*containerExecResponse.WebSocketURI,\n\t\t*containerExecResponse.Password,\n\t\treader,\n\t\twriter,\n\t)\n}\n\nfunc (cs *aciContainerService) Logs(ctx context.Context, containerName string, req containers.LogsRequest) error {\n\tgroupName, containerAciName := getGroupAndContainerName(containerName)\n\tvar tail *int32\n\n\tif req.Follow {\n\t\treturn streamLogs(ctx, cs.ctx, groupName, containerAciName, req.Writer)\n\t}\n\n\tif req.Tail != \"all\" {\n\t\treqTail, err := strconv.Atoi(req.Tail)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ti32 := int32(reqTail)\n\t\ttail = &i32\n\t}\n\n\tlogs, err := getACIContainerLogs(ctx, cs.ctx, groupName, containerAciName, tail)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = fmt.Fprint(req.Writer, logs)\n\treturn err\n}\n\nfunc (cs *aciContainerService) Delete(ctx context.Context, containerID string, _ bool) error {\n\tgroupName, containerName := getGroupAndContainerName(containerID)\n\tif groupName != containerID {\n\t\treturn errors.New(fmt.Sprintf(\"cannot delete service %q from compose app %q, you must delete the entire compose app with docker compose down\", containerName, groupName))\n\t}\n\tcg, err := deleteACIContainerGroup(ctx, cs.ctx, groupName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cg.StatusCode == http.StatusNoContent {\n\t\treturn ErrNoSuchContainer\n\t}\n\n\treturn err\n}\n\nfunc (cs *aciContainerService) Inspect(ctx context.Context, containerID string) (containers.Container, error) {\n\tgroupName, containerName := getGroupAndContainerName(containerID)\n\n\tcg, err := getACIContainerGroup(ctx, cs.ctx, groupName)\n\tif err != nil {\n\t\treturn containers.Container{}, err\n\t}\n\tif cg.StatusCode == http.StatusNoContent {\n\t\treturn containers.Container{}, ErrNoSuchContainer\n\t}\n\n\tvar cc containerinstance.Container\n\tvar found = false\n\tfor _, c := range *cg.Containers {\n\t\tif to.String(c.Name) == containerName {\n\t\t\tcc = c\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\treturn containers.Container{}, ErrNoSuchContainer\n\t}\n\n\treturn convert.ContainerGroupToContainer(containerID, cg, cc)\n}\n\ntype aciComposeService struct {\n\tctx store.AciContext\n}\n\nfunc (cs *aciComposeService) Up(ctx context.Context, opts cli.ProjectOptions) error {\n\tproject, err := cli.ProjectFromOptions(&opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogrus.Debugf(\"Up on project with name %q\\n\", project.Name)\n\tgroupDefinition, err := convert.ToContainerGroup(cs.ctx, *project)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn createOrUpdateACIContainers(ctx, cs.ctx, groupDefinition)\n}\n\nfunc (cs *aciComposeService) Down(ctx context.Context, opts cli.ProjectOptions) error {\n\tvar project types.Project\n\n\tif opts.Name != \"\" {\n\t\tproject = types.Project{Name: opts.Name}\n\t} else {\n\t\tfullProject, err := cli.ProjectFromOptions(&opts)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject = *fullProject\n\t}\n\tlogrus.Debugf(\"Down on project with name %q\\n\", project.Name)\n\n\tcg, err := deleteACIContainerGroup(ctx, cs.ctx, project.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cg.StatusCode == http.StatusNoContent {\n\t\treturn ErrNoSuchContainer\n\t}\n\n\treturn err\n}\n\ntype aciCloudService struct {\n\tloginService login.AzureLoginService\n}\n\nfunc (cs *aciCloudService) Login(ctx context.Context, params map[string]string) error {\n\treturn cs.loginService.Login(ctx, params[login.TenantIDLoginParam])\n}\n\nfunc (cs *aciCloudService) CreateContextData(ctx context.Context, params map[string]string) (interface{}, string, error) {\n\tcontextHelper := newContextCreateHelper()\n\treturn contextHelper.createContextData(ctx, params)\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype Message struct {\n\tText string `json:\"text\" firestore:\"text\"`\n\tAudience []string `json:\"audience\" firestore:\"audience\"`\n\tBingo bool `json:\"bingo\" firestore:\"bingo\"`\n}\n\nfunc (m *Message) SetText(t string, args ...interface{}) {\n\tm.Text = fmt.Sprintf(t, args...)\n}\n\nfunc (m *Message) SetAudience(a ...string) {\n\tm.Audience = a\n}\n\n\/\/ Game is the master structure for the game\ntype Game struct {\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tMaster Master `json:\"master\" firestore:\"-\"`\n\tActive bool `json:\"active\"`\n}\n\n\/\/ NewBoard creates a new board for a user.\nfunc (g *Game) NewBoard(p Player) Board {\n\tb := Board{}\n\tb.Game = g.ID\n\tb.Player = p\n\tb.Load(g.Master.Phrases())\n\treturn b\n\n}\n\n\/\/ JSON Returns the given Board struct as a JSON string\nfunc (g Game) JSON() (string, error) {\n\n\tbytes, err := json.Marshal(g)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not marshal json for response: %s\", err)\n\t}\n\n\treturn string(bytes), nil\n}\n\n\/\/ Master is the collection of all of the people who have selected which\n\/\/ element in the game\ntype Master struct {\n\tRecords []Record `json:\"record\"`\n}\n\n\/\/ Load adds the master list of phrases to the game.\nfunc (m *Master) Load(p []Phrase) {\n\tfor _, v := range p {\n\t\tr := Record{}\n\t\tr.Phrase = v\n\t\tm.Records = append(m.Records, r)\n\t}\n}\n\n\/\/ Phrases returns the List of phrases to populate boards.\nfunc (m Master) Phrases() []Phrase {\n\tresult := []Phrase{}\n\tfor _, v := range m.Records {\n\t\tresult = append(result, v.Phrase)\n\t}\n\treturn result\n}\n\n\/\/ Select marks a phrase as selected by one or more players\nfunc (m *Master) Select(ph Phrase, pl Player) Record {\n\tr := Record{}\n\tfor i, v := range m.Records {\n\n\t\tif v.Phrase.ID == ph.ID {\n\t\t\tif v.Players.IsMember(pl) {\n\t\t\t\tfmt.Printf(\"Was member, removing. \\n\")\n\t\t\t\tnew := v.Players.Remove(pl)\n\t\t\t\tv.Players = new\n\n\t\t\t\tif len(new) == 0 {\n\t\t\t\t\tv.Phrase.Selected = false\n\t\t\t\t}\n\t\t\t\tm.Records[i] = v\n\t\t\t\treturn v\n\t\t\t}\n\t\t\tfmt.Printf(\"Was not member, adding. \\n\")\n\t\t\tv.Phrase.Selected = true\n\t\t\tv.Players = append(v.Players, pl)\n\t\t\tm.Records[i] = v\n\t\t\treturn v\n\t\t}\n\t}\n\treturn r\n}\n\n\/\/ Record is a structure that keeps track of who has selected which Phrase\ntype Record struct {\n\tID string `json:\"id\"`\n\tPhrase Phrase `json:\"phrase\"`\n\tPlayers Players `json:\"players\"`\n}\n\n\/\/ Player is a human user who is playing the game.\ntype Player struct {\n\tName string `json:\"name\"`\n\tEmail string `json:\"email\"`\n\tAdmin bool `json:\"admin\"`\n}\n\n\/\/ Players is a slice of Player.\ntype Players []Player\n\n\/\/ IsMember checks to see if a player is in the collection already\nfunc (ps Players) IsMember(p Player) bool {\n\tfor _, v := range ps {\n\t\tif v.Email == p.Email {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Remove removes a particular player from the list.\nfunc (ps *Players) Remove(p Player) Players {\n\tout := Players{}\n\tfor _, v := range *ps {\n\t\tif v.Email != p.Email {\n\t\t\tout = append(out, v)\n\t\t}\n\t}\n\treturn out\n}\n\n\/\/ Add adds a particular player from the list.\nfunc (ps *Players) Add(p Player) {\n\tout := Players{}\n\tout = append(out, p)\n\tps = &out\n\treturn\n}\n\n\/\/ Board is an individual board that the players use to play bingo\ntype Board struct {\n\tID string `json:\"id\"`\n\tGame string `json:\"game\"`\n\tPlayer Player `json:\"player\"`\n\tBingoDeclared bool `json:\"bingodeclared\"`\n\tPhrases []Phrase `json:\"phrases\" firestore:\"-\"`\n}\n\n\/\/ Bingo determins if the correct sequence of items have been Selected to\n\/\/ make bingo on this board.\nfunc (b *Board) Bingo() bool {\n\tdiag1 := []string{\"B1\", \"I2\", \"N3\", \"G4\", \"O5\"}\n\tdiag2 := []string{\"B5\", \"I4\", \"N3\", \"G2\", \"O1\"}\n\tcounts := make(map[string]int)\n\n\tfor _, v := range b.Phrases {\n\t\tif v.Selected {\n\t\t\tcounts[v.Column]++\n\t\t\tcounts[v.Row]++\n\t\t}\n\n\t\tfor _, sub := range diag1 {\n\t\t\tif sub == v.Position() {\n\t\t\t\tcounts[\"diag1\"]++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tfor _, sub := range diag2 {\n\t\t\tif sub == v.Position() {\n\t\t\t\tcounts[\"diag2\"]++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, v := range counts {\n\t\tif v == 5 {\n\t\t\tfmt.Printf(\"Bingo Declared\\n\")\n\t\t\tb.BingoDeclared = true\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Select records if a phrase on the board has been selected.\nfunc (b *Board) Select(ph Phrase) Phrase {\n\tfor i, v := range b.Phrases {\n\t\tif v.ID == ph.ID {\n\t\t\tif v.Selected {\n\t\t\t\tv.Selected = false\n\t\t\t\tb.Phrases[i] = v\n\t\t\t\treturn v\n\t\t\t}\n\t\t\tv.Selected = true\n\t\t\tb.Phrases[i] = v\n\t\t\treturn v\n\t\t}\n\t}\n\treturn ph\n}\n\n\/\/ Load adds the phrases to the board and randomly orders them.\nfunc (b *Board) Load(p []Phrase) {\n\trand.Seed(randseedfunc())\n\trand.Shuffle(len(p), func(i, j int) { p[i], p[j] = p[j], p[i] })\n\n\tfor i, v := range p {\n\t\tv.Selected = false\n\t\tv.Column, v.Row = b.CalcColumnsRows(i + 1)\n\t\tv.DisplayOrder = i\n\t\tp[i] = v\n\t}\n\tb.Phrases = p\n}\n\nfunc (b *Board) CalcColumnsRows(i int) (string, string) {\n\tcolumn := \"\"\n\trow := \"\"\n\n\tswitch i % 5 {\n\tcase 1:\n\t\tcolumn = \"B\"\n\tcase 2:\n\t\tcolumn = \"I\"\n\tcase 3:\n\t\tcolumn = \"N\"\n\tcase 4:\n\t\tcolumn = \"G\"\n\tdefault:\n\t\tcolumn = \"O\"\n\t}\n\n\trow = strconv.Itoa(int(math.Round(float64((i - 1) \/ 5))))\n\n\treturn column, row\n}\n\n\/\/ JSON Returns the given Board struct as a JSON string\nfunc (b Board) JSON() (string, error) {\n\n\tbytes, err := json.Marshal(b)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not marshal json for response: %s\", err)\n\t}\n\n\treturn string(bytes), nil\n}\n\nfunc randomseed() int64 {\n\treturn time.Now().UnixNano()\n}\n\n\/\/ Phrase represents a statement, event or other such thing that we are on the\n\/\/ lookout for in this game of bingo.\ntype Phrase struct {\n\tID string `json:\"id\"`\n\tText string `json:\"text\"`\n\tSelected bool `json:\"selected\"`\n\tRow string `json:\"row\"`\n\tColumn string `json:\"column\"`\n\tDisplayOrder int `json:\"display_order\"`\n}\n\n\/\/ Position returns the combined Row and Column of the Phrase\nfunc (p Phrase) Position() string {\n\treturn p.Column + p.Row\n}\nEveryboard gets free by default now.package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype Message struct {\n\tText string `json:\"text\" firestore:\"text\"`\n\tAudience []string `json:\"audience\" firestore:\"audience\"`\n\tBingo bool `json:\"bingo\" firestore:\"bingo\"`\n}\n\nfunc (m *Message) SetText(t string, args ...interface{}) {\n\tm.Text = fmt.Sprintf(t, args...)\n}\n\nfunc (m *Message) SetAudience(a ...string) {\n\tm.Audience = a\n}\n\n\/\/ Game is the master structure for the game\ntype Game struct {\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tMaster Master `json:\"master\" firestore:\"-\"`\n\tActive bool `json:\"active\"`\n}\n\n\/\/ NewBoard creates a new board for a user.\nfunc (g *Game) NewBoard(p Player) Board {\n\tb := Board{}\n\tb.Game = g.ID\n\tb.Player = p\n\tb.Load(g.Master.Phrases())\n\treturn b\n\n}\n\n\/\/ JSON Returns the given Board struct as a JSON string\nfunc (g Game) JSON() (string, error) {\n\n\tbytes, err := json.Marshal(g)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not marshal json for response: %s\", err)\n\t}\n\n\treturn string(bytes), nil\n}\n\n\/\/ Master is the collection of all of the people who have selected which\n\/\/ element in the game\ntype Master struct {\n\tRecords []Record `json:\"record\"`\n}\n\n\/\/ Load adds the master list of phrases to the game.\nfunc (m *Master) Load(p []Phrase) {\n\tfor _, v := range p {\n\t\tr := Record{}\n\t\tr.Phrase = v\n\t\tm.Records = append(m.Records, r)\n\t}\n}\n\n\/\/ Phrases returns the List of phrases to populate boards.\nfunc (m Master) Phrases() []Phrase {\n\tresult := []Phrase{}\n\tfor _, v := range m.Records {\n\t\tresult = append(result, v.Phrase)\n\t}\n\treturn result\n}\n\n\/\/ Select marks a phrase as selected by one or more players\nfunc (m *Master) Select(ph Phrase, pl Player) Record {\n\tr := Record{}\n\tfor i, v := range m.Records {\n\n\t\tif v.Phrase.ID == ph.ID {\n\t\t\tif v.Players.IsMember(pl) {\n\t\t\t\tfmt.Printf(\"Was member, removing. \\n\")\n\t\t\t\tnew := v.Players.Remove(pl)\n\t\t\t\tv.Players = new\n\n\t\t\t\tif len(new) == 0 {\n\t\t\t\t\tv.Phrase.Selected = false\n\t\t\t\t}\n\t\t\t\tm.Records[i] = v\n\t\t\t\treturn v\n\t\t\t}\n\t\t\tfmt.Printf(\"Was not member, adding. \\n\")\n\t\t\tv.Phrase.Selected = true\n\t\t\tv.Players = append(v.Players, pl)\n\t\t\tm.Records[i] = v\n\t\t\treturn v\n\t\t}\n\t}\n\treturn r\n}\n\n\/\/ Record is a structure that keeps track of who has selected which Phrase\ntype Record struct {\n\tID string `json:\"id\"`\n\tPhrase Phrase `json:\"phrase\"`\n\tPlayers Players `json:\"players\"`\n}\n\n\/\/ Player is a human user who is playing the game.\ntype Player struct {\n\tName string `json:\"name\"`\n\tEmail string `json:\"email\"`\n\tAdmin bool `json:\"admin\"`\n}\n\n\/\/ Players is a slice of Player.\ntype Players []Player\n\n\/\/ IsMember checks to see if a player is in the collection already\nfunc (ps Players) IsMember(p Player) bool {\n\tfor _, v := range ps {\n\t\tif v.Email == p.Email {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Remove removes a particular player from the list.\nfunc (ps *Players) Remove(p Player) Players {\n\tout := Players{}\n\tfor _, v := range *ps {\n\t\tif v.Email != p.Email {\n\t\t\tout = append(out, v)\n\t\t}\n\t}\n\treturn out\n}\n\n\/\/ Add adds a particular player from the list.\nfunc (ps *Players) Add(p Player) {\n\tout := Players{}\n\tout = append(out, p)\n\tps = &out\n\treturn\n}\n\n\/\/ Board is an individual board that the players use to play bingo\ntype Board struct {\n\tID string `json:\"id\"`\n\tGame string `json:\"game\"`\n\tPlayer Player `json:\"player\"`\n\tBingoDeclared bool `json:\"bingodeclared\"`\n\tPhrases []Phrase `json:\"phrases\" firestore:\"-\"`\n}\n\n\/\/ Bingo determins if the correct sequence of items have been Selected to\n\/\/ make bingo on this board.\nfunc (b *Board) Bingo() bool {\n\tdiag1 := []string{\"B1\", \"I2\", \"N3\", \"G4\", \"O5\"}\n\tdiag2 := []string{\"B5\", \"I4\", \"N3\", \"G2\", \"O1\"}\n\tcounts := make(map[string]int)\n\n\tfor _, v := range b.Phrases {\n\t\tif v.Selected {\n\t\t\tcounts[v.Column]++\n\t\t\tcounts[v.Row]++\n\t\t}\n\n\t\tfor _, sub := range diag1 {\n\t\t\tif sub == v.Position() {\n\t\t\t\tcounts[\"diag1\"]++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tfor _, sub := range diag2 {\n\t\t\tif sub == v.Position() {\n\t\t\t\tcounts[\"diag2\"]++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, v := range counts {\n\t\tif v == 5 {\n\t\t\tfmt.Printf(\"Bingo Declared\\n\")\n\t\t\tb.BingoDeclared = true\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Select records if a phrase on the board has been selected.\nfunc (b *Board) Select(ph Phrase) Phrase {\n\tfor i, v := range b.Phrases {\n\t\tif v.ID == ph.ID {\n\t\t\tif v.Selected {\n\t\t\t\tv.Selected = false\n\t\t\t\tb.Phrases[i] = v\n\t\t\t\treturn v\n\t\t\t}\n\t\t\tv.Selected = true\n\t\t\tb.Phrases[i] = v\n\t\t\treturn v\n\t\t}\n\t}\n\treturn ph\n}\n\n\/\/ Load adds the phrases to the board and randomly orders them.\nfunc (b *Board) Load(p []Phrase) {\n\trand.Seed(randseedfunc())\n\trand.Shuffle(len(p), func(i, j int) { p[i], p[j] = p[j], p[i] })\n\n\tfree := 0\n\tcenter := 12\n\n\tfor i, v := range p {\n\n\t\tv.Selected = false\n\t\tv.Column, v.Row = b.CalcColumnsRows(i + 1)\n\t\tv.DisplayOrder = i\n\n\t\tif v.Text == \"FREE\" {\n\t\t\tfree = i\n\t\t\tv.Selected = true\n\t\t}\n\t\tp[i] = v\n\n\t}\n\n\tp[free], p[center] = p[center], p[free]\n\n\tb.Phrases = p\n}\n\nfunc (b *Board) CalcColumnsRows(i int) (string, string) {\n\tcolumn := \"\"\n\trow := \"\"\n\n\tswitch i % 5 {\n\tcase 1:\n\t\tcolumn = \"B\"\n\tcase 2:\n\t\tcolumn = \"I\"\n\tcase 3:\n\t\tcolumn = \"N\"\n\tcase 4:\n\t\tcolumn = \"G\"\n\tdefault:\n\t\tcolumn = \"O\"\n\t}\n\n\trow = strconv.Itoa(int(math.Round(float64((i - 1) \/ 5))))\n\n\treturn column, row\n}\n\n\/\/ JSON Returns the given Board struct as a JSON string\nfunc (b Board) JSON() (string, error) {\n\n\tbytes, err := json.Marshal(b)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not marshal json for response: %s\", err)\n\t}\n\n\treturn string(bytes), nil\n}\n\nfunc randomseed() int64 {\n\treturn time.Now().UnixNano()\n}\n\n\/\/ Phrase represents a statement, event or other such thing that we are on the\n\/\/ lookout for in this game of bingo.\ntype Phrase struct {\n\tID string `json:\"id\"`\n\tText string `json:\"text\"`\n\tSelected bool `json:\"selected\"`\n\tRow string `json:\"row\"`\n\tColumn string `json:\"column\"`\n\tDisplayOrder int `json:\"display_order\"`\n}\n\n\/\/ Position returns the combined Row and Column of the Phrase\nfunc (p Phrase) Position() string {\n\treturn p.Column + p.Row\n}\n<|endoftext|>"} {"text":"package check_volume\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/appscode\/go\/flags\"\n\t\"github.com\/appscode\/searchlight\/pkg\/client\/k8s\"\n\t\"github.com\/appscode\/searchlight\/util\"\n\t\"github.com\/spf13\/cobra\"\n\tkapi \"k8s.io\/kubernetes\/pkg\/api\"\n)\n\nconst (\n\tawsElasticBlockStorePluginName = \"kubernetes.io~aws-ebs\"\n\tazureDataDiskPluginName = \"kubernetes.io~azure-disk\"\n\tazureFilePluginName = \"kubernetes.io~azure-file\"\n\tcephfsPluginName = \"kubernetes.io~cephfs\"\n\tcinderVolumePluginName = \"kubernetes.io~cinder\"\n\tconfigMapPluginName = \"kubernetes.io~configmap\"\n\tdownwardAPIPluginName = \"kubernetes.io~downward-api\"\n\temptyDirPluginName = \"kubernetes.io~empty-dir\"\n\tfcPluginName = \"kubernetes.io~fc\"\n\tflockerPluginName = \"kubernetes.io~flocker\"\n\tgcePersistentDiskPluginName = \"kubernetes.io~gce-pd\"\n\tgitRepoPluginName = \"kubernetes.io~git-repo\"\n\tglusterfsPluginName = \"kubernetes.io~glusterfs\"\n\thostPathPluginName = \"kubernetes.io~host-path\"\n\tiscsiPluginName = \"kubernetes.io~iscsi\"\n\tnfsPluginName = \"kubernetes.io~nfs\"\n\tquobytePluginName = \"kubernetes.io~quobyte\"\n\trbdPluginName = \"kubernetes.io~rbd\"\n\tsecretPluginName = \"kubernetes.io~secret\"\n\tvsphereVolumePluginName = \"kubernetes.io~vsphere-volume\"\n)\n\nfunc getVolumePluginName(volumeSource *kapi.VolumeSource) string {\n\tif volumeSource.AWSElasticBlockStore != nil {\n\t\treturn awsElasticBlockStorePluginName\n\t} else if volumeSource.AzureDisk != nil {\n\t\treturn azureDataDiskPluginName\n\t} else if volumeSource.AzureFile != nil {\n\t\treturn azureFilePluginName\n\t} else if volumeSource.CephFS != nil {\n\t\treturn cephfsPluginName\n\t} else if volumeSource.Cinder != nil {\n\t\treturn cinderVolumePluginName\n\t} else if volumeSource.ConfigMap != nil {\n\t\treturn configMapPluginName\n\t} else if volumeSource.DownwardAPI != nil {\n\t\treturn downwardAPIPluginName\n\t} else if volumeSource.EmptyDir != nil {\n\t\treturn emptyDirPluginName\n\t} else if volumeSource.FC != nil {\n\t\treturn fcPluginName\n\t} else if volumeSource.Flocker != nil {\n\t\treturn flockerPluginName\n\t} else if volumeSource.GCEPersistentDisk != nil {\n\t\treturn gcePersistentDiskPluginName\n\t} else if volumeSource.GitRepo != nil {\n\t\treturn gitRepoPluginName\n\t} else if volumeSource.Glusterfs != nil {\n\t\treturn glusterfsPluginName\n\t} else if volumeSource.HostPath != nil {\n\t\treturn hostPathPluginName\n\t} else if volumeSource.ISCSI != nil {\n\t\treturn iscsiPluginName\n\t} else if volumeSource.NFS != nil {\n\t\treturn nfsPluginName\n\t} else if volumeSource.Quobyte != nil {\n\t\treturn quobytePluginName\n\t} else if volumeSource.RBD != nil {\n\t\treturn rbdPluginName\n\t} else if volumeSource.Secret != nil {\n\t\treturn secretPluginName\n\t} else if volumeSource.VsphereVolume != nil {\n\t\treturn vsphereVolumePluginName\n\t}\n\treturn \"\"\n}\n\nfunc getPersistentVolumePluginName(volumeSource *kapi.PersistentVolumeSource) string {\n\tif volumeSource.AWSElasticBlockStore != nil {\n\t\treturn awsElasticBlockStorePluginName\n\t} else if volumeSource.AzureDisk != nil {\n\t\treturn azureDataDiskPluginName\n\t} else if volumeSource.AzureFile != nil {\n\t\treturn azureFilePluginName\n\t} else if volumeSource.CephFS != nil {\n\t\treturn cephfsPluginName\n\t} else if volumeSource.Cinder != nil {\n\t\treturn cinderVolumePluginName\n\t} else if volumeSource.FC != nil {\n\t\treturn fcPluginName\n\t} else if volumeSource.Flocker != nil {\n\t\treturn flockerPluginName\n\t} else if volumeSource.GCEPersistentDisk != nil {\n\t\treturn gcePersistentDiskPluginName\n\t} else if volumeSource.Glusterfs != nil {\n\t\treturn glusterfsPluginName\n\t} else if volumeSource.HostPath != nil {\n\t\treturn hostPathPluginName\n\t} else if volumeSource.ISCSI != nil {\n\t\treturn iscsiPluginName\n\t} else if volumeSource.NFS != nil {\n\t\treturn nfsPluginName\n\t} else if volumeSource.Quobyte != nil {\n\t\treturn quobytePluginName\n\t} else if volumeSource.RBD != nil {\n\t\treturn rbdPluginName\n\t} else if volumeSource.VsphereVolume != nil {\n\t\treturn vsphereVolumePluginName\n\t}\n\treturn \"\"\n}\n\nconst (\n\thostFactPort = 56977\n)\n\ntype request struct {\n\thost string\n\tname string\n\twarning float64\n\tcritical float64\n\tnode_stat bool\n\tsecret string\n}\n\ntype usageStat struct {\n\tPath string `json:\"path\"`\n\tFstype string `json:\"fstype\"`\n\tTotal uint64 `json:\"total\"`\n\tFree uint64 `json:\"free\"`\n\tUsed uint64 `json:\"used\"`\n\tUsedPercent float64 `json:\"usedPercent\"`\n\tInodesTotal uint64 `json:\"inodesTotal\"`\n\tInodesUsed uint64 `json:\"inodesUsed\"`\n\tInodesFree uint64 `json:\"inodesFree\"`\n\tInodesUsedPercent float64 `json:\"inodesUsedPercent\"`\n}\n\ntype authInfo struct {\n\tca string\n\tkey string\n\tcrt string\n\tauthToken string\n\tusername string\n\tpassword string\n}\n\nconst (\n\tca = \"ca.crt\"\n\tkey = \"hostfacts.key\"\n\tcrt = \"hostfacts.crt\"\n\tauthToken = \"auth_token\"\n\tusername = \"username\"\n\tpassword = \"password\"\n)\n\nfunc getHostfactsSecretData(kubeClient *k8s.KubeClient, secretName string) *authInfo {\n\tif secretName == \"\" {\n\t\treturn nil\n\t}\n\n\tparts := strings.Split(secretName, \".\")\n\tname := parts[0]\n\tnamespace := \"default\"\n\tif len(parts) > 1 {\n\t\tnamespace = parts[1]\n\t}\n\n\tsecret, err := kubeClient.Client.Core().Secrets(namespace).Get(name)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tauthData := &authInfo{\n\t\tca: string(secret.Data[ca]),\n\t\tkey: string(secret.Data[key]),\n\t\tcrt: string(secret.Data[crt]),\n\t\tauthToken: string(secret.Data[authToken]),\n\t\tusername: string(secret.Data[username]),\n\t\tpassword: string(secret.Data[password]),\n\t}\n\n\treturn authData\n}\n\nfunc getUsage(authInfo *authInfo, hostIP, path string) (*usageStat, error) {\n\tprotocol := \"http\"\n\tif authInfo != nil {\n\t\tprotocol = \"https\"\n\t}\n\n\turlStr := fmt.Sprintf(\"%v:\/\/%v:%v\/du?p=%v\", protocol, hostIP, hostFactPort, path)\n\treq, err := http.NewRequest(http.MethodGet, urlStr, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmTLSConfig := &tls.Config{}\n\n\tif authInfo != nil {\n\t\tif authInfo.username != \"\" && authInfo.password != \"\" {\n\t\t\treq.SetBasicAuth(authInfo.username, authInfo.password)\n\t\t} else if authInfo.authToken != \"\" {\n\t\t\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", authInfo.authToken))\n\t\t}\n\n\t\tif authInfo.ca != \"\" {\n\t\t\tcerts := x509.NewCertPool()\n\t\t\tcerts.AppendCertsFromPEM([]byte(authInfo.ca))\n\t\t\tmTLSConfig.RootCAs = certs\n\t\t\tif authInfo.crt != \"\" && authInfo.key != \"\" {\n\t\t\t\tcert, err := tls.X509KeyPair([]byte(authInfo.crt), []byte(authInfo.key))\n\t\t\t\tif err == nil {\n\t\t\t\t\tmTLSConfig.Certificates = []tls.Certificate{cert}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tmTLSConfig.InsecureSkipVerify = true\n\t\t}\n\t}\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: mTLSConfig,\n\t}\n\tclient := &http.Client{Transport: tr}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespData, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tusages := make([]*usageStat, 1)\n\tif err = json.Unmarshal(respData, &usages); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn usages[0], nil\n}\n\nfunc checkResult(field string, warning, critical, result float64) (util.IcingaState, interface{}) {\n\tif result >= critical {\n\t\treturn util.Critical, fmt.Sprintf(\"%v used more than %v%%\", field, critical)\n\t}\n\tif result >= warning {\n\t\treturn util.Warning, fmt.Sprintf(\"%v used more than %v%%\", field, warning)\n\t}\n\treturn util.Ok, \"(Disk & Inodes)\"\n}\n\nfunc checkDiskStat(kubeClient *k8s.KubeClient, req *request, nodeIP, path string) (util.IcingaState, interface{}) {\n\tauthInfo := getHostfactsSecretData(kubeClient, req.secret)\n\n\tusage, err := getUsage(authInfo, nodeIP, path)\n\tif err != nil {\n\t\treturn util.Unknown, err\n\t}\n\n\twarning := req.warning\n\tcritical := req.critical\n\tstate, message := checkResult(\"Disk\", warning, critical, usage.UsedPercent)\n\tif state != util.Ok {\n\t\treturn state, message\n\t}\n\tstate, message = checkResult(\"Inodes\", warning, critical, usage.InodesUsedPercent)\n\treturn state, message\n}\n\nfunc checkNodeDiskStat(req *request) (util.IcingaState, interface{}) {\n\thost := req.host\n\tparts := strings.Split(host, \"@\")\n\tif len(parts) != 2 {\n\t\treturn util.Unknown, \"Invalid icinga host.name\"\n\t}\n\n\tkubeClient, err := k8s.NewClient()\n\tif err != nil {\n\t\treturn util.Unknown, err\n\t}\n\n\tnode_name := parts[0]\n\tnode, err := kubeClient.Client.Core().Nodes().Get(node_name)\n\tif err != nil {\n\t\treturn util.Unknown, err\n\t}\n\n\tif node == nil {\n\t\treturn util.Unknown, \"Node not found\"\n\t}\n\n\thostIP := \"\"\n\tfor _, address := range node.Status.Addresses {\n\t\tif address.Type == kapi.NodeInternalIP {\n\t\t\thostIP = address.Address\n\t\t}\n\t}\n\n\tif hostIP == \"\" {\n\t\treturn util.Unknown, \"Node InternalIP not found\"\n\t}\n\treturn checkDiskStat(kubeClient, req, hostIP, \"\/\")\n}\n\nfunc checkPodVolumeStat(req *request) (util.IcingaState, interface{}) {\n\thost := req.host\n\tname := req.name\n\tparts := strings.Split(host, \"@\")\n\tif len(parts) != 2 {\n\t\treturn util.Unknown, \"Invalid icinga host.name\"\n\t}\n\n\tkubeClient, err := k8s.NewClient()\n\tif err != nil {\n\t\treturn util.Unknown, err\n\t}\n\n\tpod_name := parts[0]\n\tnamespace := parts[1]\n\tpod, err := kubeClient.Client.Core().Pods(namespace).Get(pod_name)\n\tif err != nil {\n\t\treturn util.Unknown, err\n\t}\n\n\tvar volumeSourcePluginName = \"\"\n\tvar volumeSourceName = \"\"\n\tfor _, volume := range pod.Spec.Volumes {\n\t\tif volume.Name == name {\n\t\t\tif volume.PersistentVolumeClaim != nil {\n\t\t\t\tclaim, err := kubeClient.Client.Core().\n\t\t\t\t\tPersistentVolumeClaims(namespace).Get(volume.PersistentVolumeClaim.ClaimName)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn util.Unknown, err\n\n\t\t\t\t}\n\t\t\t\tvolume, err := kubeClient.Client.Core().PersistentVolumes().Get(claim.Spec.VolumeName)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn util.Unknown, err\n\t\t\t\t}\n\t\t\t\tvolumeSourcePluginName = getPersistentVolumePluginName(&volume.Spec.PersistentVolumeSource)\n\t\t\t\tvolumeSourceName = volume.Name\n\n\t\t\t} else {\n\t\t\t\tvolumeSourcePluginName = getVolumePluginName(&volume.VolumeSource)\n\t\t\t\tvolumeSourceName = volume.Name\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif volumeSourcePluginName == \"\" {\n\t\treturn util.Unknown, errors.New(\"Invalid volume source\")\n\t}\n\n\tpath := fmt.Sprintf(\"\/var\/lib\/kubelet\/pods\/%v\/volumes\/%v\/%v\", pod.UID, volumeSourcePluginName, volumeSourceName)\n\treturn checkDiskStat(kubeClient, req, pod.Status.HostIP, path)\n}\n\nfunc NewCmd() *cobra.Command {\n\tvar req request\n\n\tc := &cobra.Command{\n\t\tUse: \"check_volume\",\n\t\tShort: \"Check kubernetes volume\",\n\t\tExample: \"\",\n\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tflags.EnsureRequiredFlags(cmd, \"host\")\n\t\t\tif req.node_stat {\n\t\t\t\tcheckNodeDiskStat(&req)\n\t\t\t} else {\n\t\t\t\tflags.EnsureRequiredFlags(cmd, \"name\")\n\t\t\t\tcheckPodVolumeStat(&req)\n\t\t\t}\n\t\t},\n\t}\n\n\tc.Flags().BoolVar(&req.node_stat, \"node_stat\", false, \"Checking Node disk size\")\n\tc.Flags().StringVarP(&req.secret, \"secret\", \"s\", \"\", `Kubernetes secret name`)\n\tc.Flags().StringVarP(&req.host, \"host\", \"H\", \"\", \"Icinga host name\")\n\tc.Flags().StringVarP(&req.name, \"name\", \"N\", \"\", \"Volume name\")\n\tc.Flags().Float64VarP(&req.warning, \"warning\", \"w\", 75.0, \"Warning level value (usage percentage)\")\n\tc.Flags().Float64VarP(&req.critical, \"critical\", \"c\", 90.0, \"Critical level value (usage percentage)\")\n\treturn c\n}\nUsed \"appscode\/go\/net\/httpclient\" as Client (#23)package check_volume\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/appscode\/go\/flags\"\n\t\"github.com\/appscode\/go\/net\/httpclient\"\n\t\"github.com\/appscode\/searchlight\/pkg\/client\/k8s\"\n\t\"github.com\/appscode\/searchlight\/util\"\n\t\"github.com\/spf13\/cobra\"\n\tkapi \"k8s.io\/kubernetes\/pkg\/api\"\n)\n\nconst (\n\tawsElasticBlockStorePluginName = \"kubernetes.io~aws-ebs\"\n\tazureDataDiskPluginName = \"kubernetes.io~azure-disk\"\n\tazureFilePluginName = \"kubernetes.io~azure-file\"\n\tcephfsPluginName = \"kubernetes.io~cephfs\"\n\tcinderVolumePluginName = \"kubernetes.io~cinder\"\n\tconfigMapPluginName = \"kubernetes.io~configmap\"\n\tdownwardAPIPluginName = \"kubernetes.io~downward-api\"\n\temptyDirPluginName = \"kubernetes.io~empty-dir\"\n\tfcPluginName = \"kubernetes.io~fc\"\n\tflockerPluginName = \"kubernetes.io~flocker\"\n\tgcePersistentDiskPluginName = \"kubernetes.io~gce-pd\"\n\tgitRepoPluginName = \"kubernetes.io~git-repo\"\n\tglusterfsPluginName = \"kubernetes.io~glusterfs\"\n\thostPathPluginName = \"kubernetes.io~host-path\"\n\tiscsiPluginName = \"kubernetes.io~iscsi\"\n\tnfsPluginName = \"kubernetes.io~nfs\"\n\tquobytePluginName = \"kubernetes.io~quobyte\"\n\trbdPluginName = \"kubernetes.io~rbd\"\n\tsecretPluginName = \"kubernetes.io~secret\"\n\tvsphereVolumePluginName = \"kubernetes.io~vsphere-volume\"\n)\n\nfunc getVolumePluginName(volumeSource *kapi.VolumeSource) string {\n\tif volumeSource.AWSElasticBlockStore != nil {\n\t\treturn awsElasticBlockStorePluginName\n\t} else if volumeSource.AzureDisk != nil {\n\t\treturn azureDataDiskPluginName\n\t} else if volumeSource.AzureFile != nil {\n\t\treturn azureFilePluginName\n\t} else if volumeSource.CephFS != nil {\n\t\treturn cephfsPluginName\n\t} else if volumeSource.Cinder != nil {\n\t\treturn cinderVolumePluginName\n\t} else if volumeSource.ConfigMap != nil {\n\t\treturn configMapPluginName\n\t} else if volumeSource.DownwardAPI != nil {\n\t\treturn downwardAPIPluginName\n\t} else if volumeSource.EmptyDir != nil {\n\t\treturn emptyDirPluginName\n\t} else if volumeSource.FC != nil {\n\t\treturn fcPluginName\n\t} else if volumeSource.Flocker != nil {\n\t\treturn flockerPluginName\n\t} else if volumeSource.GCEPersistentDisk != nil {\n\t\treturn gcePersistentDiskPluginName\n\t} else if volumeSource.GitRepo != nil {\n\t\treturn gitRepoPluginName\n\t} else if volumeSource.Glusterfs != nil {\n\t\treturn glusterfsPluginName\n\t} else if volumeSource.HostPath != nil {\n\t\treturn hostPathPluginName\n\t} else if volumeSource.ISCSI != nil {\n\t\treturn iscsiPluginName\n\t} else if volumeSource.NFS != nil {\n\t\treturn nfsPluginName\n\t} else if volumeSource.Quobyte != nil {\n\t\treturn quobytePluginName\n\t} else if volumeSource.RBD != nil {\n\t\treturn rbdPluginName\n\t} else if volumeSource.Secret != nil {\n\t\treturn secretPluginName\n\t} else if volumeSource.VsphereVolume != nil {\n\t\treturn vsphereVolumePluginName\n\t}\n\treturn \"\"\n}\n\nfunc getPersistentVolumePluginName(volumeSource *kapi.PersistentVolumeSource) string {\n\tif volumeSource.AWSElasticBlockStore != nil {\n\t\treturn awsElasticBlockStorePluginName\n\t} else if volumeSource.AzureDisk != nil {\n\t\treturn azureDataDiskPluginName\n\t} else if volumeSource.AzureFile != nil {\n\t\treturn azureFilePluginName\n\t} else if volumeSource.CephFS != nil {\n\t\treturn cephfsPluginName\n\t} else if volumeSource.Cinder != nil {\n\t\treturn cinderVolumePluginName\n\t} else if volumeSource.FC != nil {\n\t\treturn fcPluginName\n\t} else if volumeSource.Flocker != nil {\n\t\treturn flockerPluginName\n\t} else if volumeSource.GCEPersistentDisk != nil {\n\t\treturn gcePersistentDiskPluginName\n\t} else if volumeSource.Glusterfs != nil {\n\t\treturn glusterfsPluginName\n\t} else if volumeSource.HostPath != nil {\n\t\treturn hostPathPluginName\n\t} else if volumeSource.ISCSI != nil {\n\t\treturn iscsiPluginName\n\t} else if volumeSource.NFS != nil {\n\t\treturn nfsPluginName\n\t} else if volumeSource.Quobyte != nil {\n\t\treturn quobytePluginName\n\t} else if volumeSource.RBD != nil {\n\t\treturn rbdPluginName\n\t} else if volumeSource.VsphereVolume != nil {\n\t\treturn vsphereVolumePluginName\n\t}\n\treturn \"\"\n}\n\nconst (\n\thostFactPort = 56977\n)\n\ntype request struct {\n\thost string\n\tname string\n\twarning float64\n\tcritical float64\n\tnode_stat bool\n\tsecret string\n}\n\ntype usageStat struct {\n\tPath string `json:\"path\"`\n\tFstype string `json:\"fstype\"`\n\tTotal uint64 `json:\"total\"`\n\tFree uint64 `json:\"free\"`\n\tUsed uint64 `json:\"used\"`\n\tUsedPercent float64 `json:\"usedPercent\"`\n\tInodesTotal uint64 `json:\"inodesTotal\"`\n\tInodesUsed uint64 `json:\"inodesUsed\"`\n\tInodesFree uint64 `json:\"inodesFree\"`\n\tInodesUsedPercent float64 `json:\"inodesUsedPercent\"`\n}\n\ntype authInfo struct {\n\tca []byte\n\tkey []byte\n\tcrt []byte\n\tauthToken string\n\tusername string\n\tpassword string\n}\n\nconst (\n\tca = \"ca.crt\"\n\tkey = \"hostfacts.key\"\n\tcrt = \"hostfacts.crt\"\n\tauthToken = \"auth_token\"\n\tusername = \"username\"\n\tpassword = \"password\"\n)\n\nfunc getHostfactsSecretData(kubeClient *k8s.KubeClient, secretName string) *authInfo {\n\tif secretName == \"\" {\n\t\treturn nil\n\t}\n\n\tparts := strings.Split(secretName, \".\")\n\tname := parts[0]\n\tnamespace := \"default\"\n\tif len(parts) > 1 {\n\t\tnamespace = parts[1]\n\t}\n\n\tsecret, err := kubeClient.Client.Core().Secrets(namespace).Get(name)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tauthData := &authInfo{\n\t\tca: secret.Data[ca],\n\t\tkey: secret.Data[key],\n\t\tcrt: secret.Data[crt],\n\t\tauthToken: string(secret.Data[authToken]),\n\t\tusername: string(secret.Data[username]),\n\t\tpassword: string(secret.Data[password]),\n\t}\n\n\treturn authData\n}\n\nfunc getUsage(authInfo *authInfo, hostIP, path string) (*usageStat, error) {\n\tscheme := \"http\"\n\thttpClient := httpclient.Default()\n\tif authInfo != nil && authInfo.ca != nil {\n\t\tscheme = \"https\"\n\t\thttpClient.WithBasicAuth(authInfo.username, authInfo.password).\n\t\t\tWithBearerToken(authInfo.authToken).\n\t\t WithTLSConfig(authInfo.ca, authInfo.crt, authInfo.key)\n\t}\n\n\turlStr := fmt.Sprintf(\"%v:\/\/%v:%v\/du?p=%v\", scheme, hostIP, hostFactPort, path)\n\tusages := make([]*usageStat, 1)\n\t_, err := httpClient.Call(http.MethodGet, urlStr, nil, &usages, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn usages[0], nil\n}\n\nfunc checkResult(field string, warning, critical, result float64) (util.IcingaState, interface{}) {\n\tif result >= critical {\n\t\treturn util.Critical, fmt.Sprintf(\"%v used more than %v%%\", field, critical)\n\t}\n\tif result >= warning {\n\t\treturn util.Warning, fmt.Sprintf(\"%v used more than %v%%\", field, warning)\n\t}\n\treturn util.Ok, \"(Disk & Inodes)\"\n}\n\nfunc checkDiskStat(kubeClient *k8s.KubeClient, req *request, nodeIP, path string) (util.IcingaState, interface{}) {\n\tauthInfo := getHostfactsSecretData(kubeClient, req.secret)\n\n\tusage, err := getUsage(authInfo, nodeIP, path)\n\tif err != nil {\n\t\treturn util.Unknown, err\n\t}\n\n\twarning := req.warning\n\tcritical := req.critical\n\tstate, message := checkResult(\"Disk\", warning, critical, usage.UsedPercent)\n\tif state != util.Ok {\n\t\treturn state, message\n\t}\n\tstate, message = checkResult(\"Inodes\", warning, critical, usage.InodesUsedPercent)\n\treturn state, message\n}\n\nfunc checkNodeDiskStat(req *request) (util.IcingaState, interface{}) {\n\thost := req.host\n\tparts := strings.Split(host, \"@\")\n\tif len(parts) != 2 {\n\t\treturn util.Unknown, \"Invalid icinga host.name\"\n\t}\n\n\tkubeClient, err := k8s.NewClient()\n\tif err != nil {\n\t\treturn util.Unknown, err\n\t}\n\n\tnode_name := parts[0]\n\tnode, err := kubeClient.Client.Core().Nodes().Get(node_name)\n\tif err != nil {\n\t\treturn util.Unknown, err\n\t}\n\n\tif node == nil {\n\t\treturn util.Unknown, \"Node not found\"\n\t}\n\n\thostIP := \"\"\n\tfor _, address := range node.Status.Addresses {\n\t\tif address.Type == kapi.NodeInternalIP {\n\t\t\thostIP = address.Address\n\t\t}\n\t}\n\n\tif hostIP == \"\" {\n\t\treturn util.Unknown, \"Node InternalIP not found\"\n\t}\n\treturn checkDiskStat(kubeClient, req, hostIP, \"\/\")\n}\n\nfunc checkPodVolumeStat(req *request) (util.IcingaState, interface{}) {\n\thost := req.host\n\tname := req.name\n\tparts := strings.Split(host, \"@\")\n\tif len(parts) != 2 {\n\t\treturn util.Unknown, \"Invalid icinga host.name\"\n\t}\n\n\tkubeClient, err := k8s.NewClient()\n\tif err != nil {\n\t\treturn util.Unknown, err\n\t}\n\n\tpod_name := parts[0]\n\tnamespace := parts[1]\n\tpod, err := kubeClient.Client.Core().Pods(namespace).Get(pod_name)\n\tif err != nil {\n\t\treturn util.Unknown, err\n\t}\n\n\tvar volumeSourcePluginName = \"\"\n\tvar volumeSourceName = \"\"\n\tfor _, volume := range pod.Spec.Volumes {\n\t\tif volume.Name == name {\n\t\t\tif volume.PersistentVolumeClaim != nil {\n\t\t\t\tclaim, err := kubeClient.Client.Core().\n\t\t\t\t\tPersistentVolumeClaims(namespace).Get(volume.PersistentVolumeClaim.ClaimName)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn util.Unknown, err\n\n\t\t\t\t}\n\t\t\t\tvolume, err := kubeClient.Client.Core().PersistentVolumes().Get(claim.Spec.VolumeName)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn util.Unknown, err\n\t\t\t\t}\n\t\t\t\tvolumeSourcePluginName = getPersistentVolumePluginName(&volume.Spec.PersistentVolumeSource)\n\t\t\t\tvolumeSourceName = volume.Name\n\n\t\t\t} else {\n\t\t\t\tvolumeSourcePluginName = getVolumePluginName(&volume.VolumeSource)\n\t\t\t\tvolumeSourceName = volume.Name\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif volumeSourcePluginName == \"\" {\n\t\treturn util.Unknown, errors.New(\"Invalid volume source\")\n\t}\n\n\tpath := fmt.Sprintf(\"\/var\/lib\/kubelet\/pods\/%v\/volumes\/%v\/%v\", pod.UID, volumeSourcePluginName, volumeSourceName)\n\treturn checkDiskStat(kubeClient, req, pod.Status.HostIP, path)\n}\n\nfunc NewCmd() *cobra.Command {\n\tvar req request\n\n\tc := &cobra.Command{\n\t\tUse: \"check_volume\",\n\t\tShort: \"Check kubernetes volume\",\n\t\tExample: \"\",\n\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tflags.EnsureRequiredFlags(cmd, \"host\")\n\t\t\tif req.node_stat {\n\t\t\t\tcheckNodeDiskStat(&req)\n\t\t\t} else {\n\t\t\t\tflags.EnsureRequiredFlags(cmd, \"name\")\n\t\t\t\tcheckPodVolumeStat(&req)\n\t\t\t}\n\t\t},\n\t}\n\n\tc.Flags().BoolVar(&req.node_stat, \"node_stat\", false, \"Checking Node disk size\")\n\tc.Flags().StringVarP(&req.secret, \"secret\", \"s\", \"\", `Kubernetes secret name`)\n\tc.Flags().StringVarP(&req.host, \"host\", \"H\", \"\", \"Icinga host name\")\n\tc.Flags().StringVarP(&req.name, \"name\", \"N\", \"\", \"Volume name\")\n\tc.Flags().Float64VarP(&req.warning, \"warning\", \"w\", 75.0, \"Warning level value (usage percentage)\")\n\tc.Flags().Float64VarP(&req.critical, \"critical\", \"c\", 90.0, \"Critical level value (usage percentage)\")\n\treturn c\n}\n<|endoftext|>"} {"text":"package main\r\n\r\n\/*\r\nconference.go -- server-side Go App Engine API;\r\n uses Google Cloud Endpoints\r\n\r\n*\/\r\n\r\n\r\nimport (\r\n\t\"log\"\r\n\tapplog \"google.golang.org\/appengine\/log\"\r\n\t\"github.com\/GoogleCloudPlatform\/go-endpoints\/endpoints\"\r\n\t\"net\/http\"\r\n\t\"google.golang.org\/appengine\"\r\n)\r\n\r\ntype ConferenceApi struct {\r\n}\r\n\r\nfunc copyProfileToForm(r *http.Request, prof *Profile) (*ProfileForm, error) {\r\n\t\/\/Copy relevant fields from Profile to ProfileForm.\r\n\tpf := &ProfileForm{\r\n\t\t\tDisplayName: prof.DisplayName,\r\n\t\t\tMainEmail: prof.MainEmail,\r\n\t\t\tTeeShirtSize: StringEnumToTeeShirtSize(prof.TeeShirtSize),\r\n\t}\r\n\tappCtx := appengine.NewContext(r)\r\n\tapplog.Debugf(appCtx, \"Did run copyProfileToForm()\")\r\n\treturn pf, nil\r\n}\r\n\r\nfunc getProfileFromUser(r *http.Request) (*Profile, error) {\r\n\t\/\/Return user Profile from datastore, creating new one if non-existent.\r\n\t\/\/TODO\r\n\t\/\/make sure user is authed\r\n\tc := endpoints.NewContext(r)\r\n\tuser, err := endpoints.CurrentUser(c, []string{endpoints.EmailScope},\r\n\t\t[]string{WEB_CLIENT_ID, endpoints.APIExplorerClientID}, []string{WEB_CLIENT_ID, endpoints.APIExplorerClientID})\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tif user == nil {\r\n\t\treturn nil, endpoints.UnauthorizedError\r\n\t}\r\n\tvar profile *Profile\r\n\tif profile == nil {\r\n\t\tprofile = &Profile{\r\n\t\t\tDisplayName: user.String(),\r\n\t\t\tMainEmail: user.Email,\r\n\t\t\tTeeShirtSize: TeeShirtSizeToStringEnum(NOT_SPECIFIED),\r\n\t\t}\r\n\t\t\/\/TODO\r\n\t}\r\n\treturn profile, nil\r\n}\r\n\r\nfunc doProfile(r *http.Request, saveRequest *ProfileMiniForm) (*ProfileForm, error) {\r\n\t\/\/Get user Profile and return to user, possibly updating it first.\r\n\t\/\/get user Profile\r\n\tprof, err := getProfileFromUser(r)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\t\r\n\t\/\/if saveProfile(), process user-modifyable fields\r\n\tif saveRequest != nil {\r\n\t\tprof.TeeShirtSize = TeeShirtSizeToStringEnum(saveRequest.TeeShirtSize)\r\n\t\tprof.DisplayName = saveRequest.DisplayName\r\n\t}\r\n\t\r\n\t\/\/return ProfileForm\r\n\treturn copyProfileToForm(r, prof)\r\n}\r\n\r\nfunc (h *ConferenceApi) GetProfile(r *http.Request) (*ProfileForm, error) {\r\n\t\/\/Return user profile.\r\n\treturn doProfile(r, nil)\r\n}\r\n\r\nfunc (h *ConferenceApi) SaveProfile(r *http.Request, pf *ProfileMiniForm) (*ProfileForm, error) {\r\n\t\/\/Update & return user profile.\r\n\treturn doProfile(r, pf)\r\n}\r\n\r\nfunc init() {\r\n\t\/\/Conference API v0.1\r\n\tconference := &ConferenceApi{}\r\n\t\/\/registers API\r\n\tapi, err := endpoints.RegisterService(conference, \"conference\", \"v1\", \"Conference API\", true)\r\n\tif err != nil {\r\n\t\tlog.Fatalf(\"Register service: %v\", err)\r\n\t}\r\n\t\r\n\tregister := func(orig, name, method, path, desc string) {\r\n\t\tm := api.MethodByName(orig)\r\n\t\tif m == nil {\r\n\t\t\tlog.Fatalf(\"Missing method %s\", orig)\r\n\t\t}\r\n\t\ti := m.Info()\r\n\t\ti.Name, i.HTTPMethod, i.Path, i.Desc = name, method, path, desc\r\n\t\ti.Scopes = []string{endpoints.EmailScope}\r\n\t\ti.ClientIds = []string{WEB_CLIENT_ID, endpoints.APIExplorerClientID}\r\n\t}\r\n\r\n\tregister(\"GetProfile\", \"getProfile\", \"GET\", \"profile\", \"Get profile\")\r\n\tregister(\"SaveProfile\", \"saveProfile\", \"POST\", \"profile\", \"Save profile\")\r\n\tendpoints.HandleHTTP()\r\n}\r\nchanged how user-modified fields processed in lines 64-69 - empty fields allow defaults or previous values to persist in lab 3package main\r\n\r\n\/*\r\nconference.go -- server-side Go App Engine API;\r\n uses Google Cloud Endpoints\r\n\r\n*\/\r\n\r\n\r\nimport (\r\n\t\"log\"\r\n\tapplog \"google.golang.org\/appengine\/log\"\r\n\t\"github.com\/GoogleCloudPlatform\/go-endpoints\/endpoints\"\r\n\t\"net\/http\"\r\n\t\"google.golang.org\/appengine\"\r\n)\r\n\r\ntype ConferenceApi struct {\r\n}\r\n\r\nfunc copyProfileToForm(r *http.Request, prof *Profile) (*ProfileForm, error) {\r\n\t\/\/Copy relevant fields from Profile to ProfileForm.\r\n\tpf := &ProfileForm{\r\n\t\t\tDisplayName: prof.DisplayName,\r\n\t\t\tMainEmail: prof.MainEmail,\r\n\t\t\tTeeShirtSize: StringEnumToTeeShirtSize(prof.TeeShirtSize),\r\n\t}\r\n\tappCtx := appengine.NewContext(r)\r\n\tapplog.Debugf(appCtx, \"Did run copyProfileToForm()\")\r\n\treturn pf, nil\r\n}\r\n\r\nfunc getProfileFromUser(r *http.Request) (*Profile, error) {\r\n\t\/\/Return user Profile from datastore, creating new one if non-existent.\r\n\t\/\/TODO\r\n\t\/\/make sure user is authed\r\n\tc := endpoints.NewContext(r)\r\n\tuser, err := endpoints.CurrentUser(c, []string{endpoints.EmailScope},\r\n\t\t[]string{WEB_CLIENT_ID, endpoints.APIExplorerClientID}, []string{WEB_CLIENT_ID, endpoints.APIExplorerClientID})\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tif user == nil {\r\n\t\treturn nil, endpoints.UnauthorizedError\r\n\t}\r\n\tvar profile *Profile\r\n\tif profile == nil {\r\n\t\tprofile = &Profile{\r\n\t\t\tDisplayName: user.String(),\r\n\t\t\tMainEmail: user.Email,\r\n\t\t\tTeeShirtSize: TeeShirtSizeToStringEnum(NOT_SPECIFIED),\r\n\t\t}\r\n\t\t\/\/TODO\r\n\t}\r\n\treturn profile, nil\r\n}\r\n\r\nfunc doProfile(r *http.Request, saveRequest *ProfileMiniForm) (*ProfileForm, error) {\r\n\t\/\/Get user Profile and return to user, possibly updating it first.\r\n\t\/\/get user Profile\r\n\tprof, err := getProfileFromUser(r)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\t\r\n\t\/\/if saveProfile(), process user-modifyable fields\r\n\tif saveRequest != nil {\r\n\t\tif saveRequest.DisplayName != \"\" {\r\n\t\t\tprof.DisplayName = saveRequest.DisplayName\r\n\t\t}\r\n\t\tif TeeShirtSizeToStringEnum(saveRequest.TeeShirtSize) != \"\" {\r\n\t\t\tprof.TeeShirtSize = TeeShirtSizeToStringEnum(saveRequest.TeeShirtSize)\r\n\t\t}\r\n\t}\r\n\t\r\n\t\/\/return ProfileForm\r\n\treturn copyProfileToForm(r, prof)\r\n}\r\n\r\nfunc (h *ConferenceApi) GetProfile(r *http.Request) (*ProfileForm, error) {\r\n\t\/\/Return user profile.\r\n\treturn doProfile(r, nil)\r\n}\r\n\r\nfunc (h *ConferenceApi) SaveProfile(r *http.Request, pf *ProfileMiniForm) (*ProfileForm, error) {\r\n\t\/\/Update & return user profile.\r\n\treturn doProfile(r, pf)\r\n}\r\n\r\nfunc init() {\r\n\t\/\/Conference API v0.1\r\n\tconference := &ConferenceApi{}\r\n\t\/\/registers API\r\n\tapi, err := endpoints.RegisterService(conference, \"conference\", \"v1\", \"Conference API\", true)\r\n\tif err != nil {\r\n\t\tlog.Fatalf(\"Register service: %v\", err)\r\n\t}\r\n\t\r\n\tregister := func(orig, name, method, path, desc string) {\r\n\t\tm := api.MethodByName(orig)\r\n\t\tif m == nil {\r\n\t\t\tlog.Fatalf(\"Missing method %s\", orig)\r\n\t\t}\r\n\t\ti := m.Info()\r\n\t\ti.Name, i.HTTPMethod, i.Path, i.Desc = name, method, path, desc\r\n\t\ti.Scopes = []string{endpoints.EmailScope}\r\n\t\ti.ClientIds = []string{WEB_CLIENT_ID, endpoints.APIExplorerClientID}\r\n\t}\r\n\r\n\tregister(\"GetProfile\", \"getProfile\", \"GET\", \"profile\", \"Get profile\")\r\n\tregister(\"SaveProfile\", \"saveProfile\", \"POST\", \"profile\", \"Save profile\")\r\n\tendpoints.HandleHTTP()\r\n}\r\n<|endoftext|>"} {"text":"package battery\n\nimport (\n\t\"bufio\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc Info() (int, int, bool, error) {\n\tf, err := os.Open(\"\/sys\/class\/power_supply\/BAT0\/uevent\")\n\tif err != nil {\n\t\treturn 0, 0, false, err\n\t}\n\tdefer f.Close()\n\tscanner := bufio.NewScanner(f)\n\n\tvar full, now, powerNow float64\n\tvar present bool\n\tfor scanner.Scan() {\n\t\ttokens := strings.SplitN(scanner.Text(), \"=\", 2)\n\t\tif len(tokens) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tswitch tokens[0] {\n\t\tcase \"POWER_SUPPLY_ENERGY_FULL_DESIGN\":\n\t\t\tfull, _ = strconv.ParseFloat(tokens[1], 64)\n\t\tcase \"POWER_SUPPLY_CHARGE_FULL\":\n\t\t\tfull, _ = strconv.ParseFloat(tokens[1], 64)\n\t\tcase \"POWER_SUPPLY_ENERGY_NOW\":\n\t\t\tnow, _ = strconv.ParseFloat(tokens[1], 64)\n\t\tcase \"POWER_SUPPLY_CHARGE_NOW\":\n\t\t\tnow, _ = strconv.ParseFloat(tokens[1], 64)\n\t\tcase \"POWER_SUPPLY_STATUS\":\n\t\t\tpresent = tokens[1] == \"Charging\"\n\t\tcase \"POWER_SUPPLY_POWER_NOW\":\n\t\t\tpowerNow, _ = strconv.ParseFloat(tokens[1], 64)\n\t\t}\n\t}\n\tvar percent, elapsed int\n\tif full > 0 {\n\t\tpercent = int(now \/ full * 100)\n\t}\n\tif powerNow > 0 {\n\t\telapsed = int(now \/ powerNow * 60)\n\t}\n\treturn percent, elapsed, present, nil\n}\nEnable to detect batteries rather than BAT0package battery\n\nimport (\n\t\"bufio\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc Info() (percent int, elapsed int, present bool, err error) {\n\tvar uevents []string\n\tuevents, err = filepath.Glob(\"\/sys\/class\/power_supply\/BAT*\/uevent\")\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(uevents) == 0 {\n\t\treturn\n\t}\n\tvar f *os.File\n\tfor _, u := range uevents {\n\t\tf, err = os.Open(u)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\tscanner := bufio.NewScanner(f)\n\n\tvar full, now, powerNow float64\n\tfor scanner.Scan() {\n\t\ttokens := strings.SplitN(scanner.Text(), \"=\", 2)\n\t\tif len(tokens) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tswitch tokens[0] {\n\t\tcase \"POWER_SUPPLY_ENERGY_FULL_DESIGN\":\n\t\t\tfull, _ = strconv.ParseFloat(tokens[1], 64)\n\t\tcase \"POWER_SUPPLY_CHARGE_FULL\":\n\t\t\tfull, _ = strconv.ParseFloat(tokens[1], 64)\n\t\tcase \"POWER_SUPPLY_ENERGY_NOW\":\n\t\t\tnow, _ = strconv.ParseFloat(tokens[1], 64)\n\t\tcase \"POWER_SUPPLY_CHARGE_NOW\":\n\t\t\tnow, _ = strconv.ParseFloat(tokens[1], 64)\n\t\tcase \"POWER_SUPPLY_STATUS\":\n\t\t\tpresent = tokens[1] == \"Charging\"\n\t\tcase \"POWER_SUPPLY_POWER_NOW\":\n\t\t\tpowerNow, _ = strconv.ParseFloat(tokens[1], 64)\n\t\t}\n\t}\n\tif full > 0 {\n\t\tpercent = int(now \/ full * 100)\n\t}\n\tif powerNow > 0 {\n\t\telapsed = int(now \/ powerNow * 60)\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"package bbox\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/nsf\/termbox-go\"\n)\n\n\/\/ mapping from keyboard box\n\/\/ 2 x 21 = [volume down]\n\/\/ 2 x 24 = [mute]\n\/\/ 3 x 19 = `\n\/\/ 3 x 20 = 1\n\/\/ 3 x 21 = q\n\/\/ 3 x 22 = KeyTab\n\/\/ 3 x 23 = a\n\/\/ 3 x 24 = z\n\/\/ 4 x 19 = KeyF1\n\/\/ 4 x 20 = 2\n\/\/ 4 x 21 = w\n\/\/ 4 x 23 = S\n\/\/ 4 x 24 = §\n\/\/ 4 x 25 = x\n\/\/ 5 x 19 = KeyF2\n\/\/ 5 x 20 = 3\n\/\/ 5 x 21 = e\n\/\/ 5 x 22 = d\n\/\/ 5 x 23 = c\n\/\/ 5 x 24 = KeyF4\n\/\/ 6 x 19 = 5\n\/\/ 6 x 20 = 4\n\/\/ 6 x 21 = r\n\/\/ 6 x 22 = t\n\/\/ 6 x 23 = f\n\/\/ 6 x 24 = g\n\/\/ 6 x 25 = v\n\/\/ 6 x 26 = b\n\/\/ 7 x 19 = 6\n\/\/ 7 x 20 = 7\n\/\/ 7 x 21 = u\n\/\/ 7 x 22 = y\n\/\/ 7 x 23 = j\n\/\/ 7 x 24 = h\n\/\/ 7 x 25 = m\n\/\/ 7 x 26 = n\n\/\/ 8 x 19 = =\n\/\/ 8 x 20 = 8\n\/\/ 8 x 21 = i\n\/\/ 8 x 22 = ]\n\/\/ 8 x 23 = K\n\/\/ 8 x 24 = KeyF6\n\/\/ 8 x 25 = ,\n\/\/ 9 x 19 = KeyF8\n\/\/ 9 x 20 = 9\n\/\/ 9 x 21 = o\n\/\/ 9 x 23 = l\n\/\/ 9 x 25 = .\n\/\/ 10 x 19 = -\n\/\/ 10 x 20 = 0\n\/\/ 10 x 21 = p\n\/\/ 10 x 22 = [\n\/\/ 10 x 23 = ;\n\/\/ 10 x 24 = '\n\/\/ 10 x 25 = \\\n\/\/ 10 x 26 = \/\n\/\/ 11 x 19 = KeyF9\n\/\/ 11 x 20 = KeyF10\n\/\/ 11 x 22 = KeyBackspace2\n\/\/ 11 x 23 = \\ ***\n\/\/ 11 x 24 = KeyF5\n\/\/ 11 x 25 = KeyEnter\n\/\/ 11 x 26 = KeySpace\n\/\/ 12 x 20 = KeyF12\n\/\/ 12 x 21 = 8 ***\n\/\/ 12 x 22 = 5 ***\n\/\/ 12 x 23 = 2 ***\n\/\/ 12 x 24 = 0 ***\n\/\/ 12 x 25 = \/ ***\n\/\/ 12 x 26 = KeyArrowRight\n\/\/ 13 x 19 = KeyDelete\n\/\/ 13 x 20 = [fn f11]\n\/\/ 13 x 21 = 7 ***\n\/\/ 13 x 22 = 4 ***\n\/\/ 13 x 23 = 1 ***\n\/\/ 13 x 26 = KeyArrowDown\n\/\/ 14 x 19 = KeyPgup\n\/\/ 14 x 20 = KeyPgdn\n\/\/ 14 x 21 = 9 ***\n\/\/ 14 x 22 = 6 ***\n\/\/ 14 x 23 = 3 ***\n\/\/ 14 x 24 = . ***\n\/\/ 14 x 25 = *\n\/\/ 14 x 26 = - ***\n\/\/ 15 x 19 = KeyHome\n\/\/ 15 x 20 = KeyEnd\n\/\/ 15 x 21 = +\n\/\/ 15 x 23 = KeyEnter ***\n\/\/ 15 x 24 = KeyArrowUp\n\/\/ 15 x 25 = [brightness up]\n\/\/ 15 x 26 = KeyArrowLeft\n\/\/ 16 x 21 = [brightness down]\n\/\/ 17 x 24 = [launch itunes?]\n\/\/ 18 x 22 = [volume up]\n\nvar keymaps = map[string][]int{\n\t\"1\": []int{0, 0},\n\t\"2\": []int{0, 1},\n\t\"3\": []int{0, 2},\n\t\"4\": []int{0, 3},\n\t\"5\": []int{0, 4},\n\t\"6\": []int{0, 5},\n\t\"7\": []int{0, 6},\n\t\"8\": []int{0, 7},\n\t\"!\": []int{0, 8},\n\t\"@\": []int{0, 9},\n\t\"#\": []int{0, 10},\n\t\"$\": []int{0, 11},\n\t\"%\": []int{0, 12},\n\t\"^\": []int{0, 13},\n\t\"&\": []int{0, 14},\n\t\"*\": []int{0, 15},\n\n\t\"w\": []int{1, 0},\n\t\"e\": []int{1, 1},\n\t\"r\": []int{1, 2},\n\t\"t\": []int{1, 3},\n\t\"y\": []int{1, 4},\n\t\"u\": []int{1, 5},\n\t\"i\": []int{1, 6},\n\t\"o\": []int{1, 7},\n\t\"W\": []int{1, 8},\n\t\"E\": []int{1, 9},\n\t\"R\": []int{1, 10},\n\t\"T\": []int{1, 11},\n\t\"Y\": []int{1, 12},\n\t\"U\": []int{1, 13},\n\t\"I\": []int{1, 14},\n\t\"O\": []int{1, 15},\n\n\t\"a\": []int{2, 0},\n\t\"s\": []int{2, 1},\n\t\"d\": []int{2, 2},\n\t\"f\": []int{2, 3},\n\t\"g\": []int{2, 4},\n\t\"h\": []int{2, 5},\n\t\"j\": []int{2, 6},\n\t\"k\": []int{2, 7},\n\t\"A\": []int{2, 8},\n\t\"S\": []int{2, 9},\n\t\"D\": []int{2, 10},\n\t\"F\": []int{2, 11},\n\t\"G\": []int{2, 12},\n\t\"H\": []int{2, 13},\n\t\"J\": []int{2, 14},\n\t\"K\": []int{2, 15},\n\n\t\"z\": []int{3, 0},\n\t\"x\": []int{3, 1},\n\t\"c\": []int{3, 2},\n\t\"v\": []int{3, 3},\n\t\"b\": []int{3, 4},\n\t\"n\": []int{3, 5},\n\t\"m\": []int{3, 6},\n\t\",\": []int{3, 7},\n\t\"Z\": []int{3, 8},\n\t\"X\": []int{3, 9},\n\t\"C\": []int{3, 10},\n\t\"V\": []int{3, 11},\n\t\"B\": []int{3, 12},\n\t\"N\": []int{3, 13},\n\t\"M\": []int{3, 14},\n\t\"<\": []int{3, 15},\n}\n\nvar keymaps_rpi = map[string][]int{\n\t\"1\": []int{0, 0}, \/\/ 3 x 20\n\t\"q\": []int{0, 1}, \/\/ 3 x 21\n\t\"a\": []int{0, 2}, \/\/ 3 x 23\n\t\"z\": []int{0, 4}, \/\/ 3 x 24\n\t\"2\": []int{0, 5}, \/\/ 4 x 20\n\t\"w\": []int{0, 6}, \/\/ 4 x 21\n\t\"S\": []int{0, 7}, \/\/ 4 x 23\n\t\"§\": []int{0, 8}, \/\/ 4 x 24\n\t\"x\": []int{0, 9}, \/\/ 4 x 25\n\t\"3\": []int{0, 10}, \/\/ 5 x 20\n\t\"e\": []int{0, 11}, \/\/ 5 x 21\n\t\"d\": []int{0, 12}, \/\/ 5 x 22\n\t\"c\": []int{0, 13}, \/\/ 5 x 23\n\t\"5\": []int{0, 14}, \/\/ 6 x 19\n\t\"4\": []int{0, 15}, \/\/ 6 x 20\n\n\t\"r\": []int{1, 0}, \/\/ 6 x 21\n\t\"t\": []int{1, 1}, \/\/ 6 x 22\n\t\"f\": []int{1, 2}, \/\/ 6 x 23\n\t\"g\": []int{1, 3}, \/\/ 6 x 24\n\t\"v\": []int{1, 4}, \/\/ 6 x 25\n\t\"b\": []int{1, 5}, \/\/ 6 x 26\n\t\"6\": []int{1, 6}, \/\/ 7 x 19\n\t\"7\": []int{1, 7}, \/\/ 7 x 20\n\t\"u\": []int{1, 8}, \/\/ 7 x 21\n\t\"y\": []int{1, 9}, \/\/ 7 x 22\n\t\"j\": []int{1, 10}, \/\/ 7 x 23\n\t\"h\": []int{1, 11}, \/\/ 7 x 24\n\t\"m\": []int{1, 12}, \/\/ 7 x 25\n\t\"n\": []int{1, 13}, \/\/ 7 x 26\n\t\"=\": []int{1, 14}, \/\/ 8 x 19\n\t\"8\": []int{1, 15}, \/\/ 8 x 20\n\n\t\"i\": []int{2, 0}, \/\/ 8 x 21\n\t\"]\": []int{2, 1}, \/\/ 8 x 22\n\t\"K\": []int{2, 2}, \/\/ 8 x 23\n\t\",\": []int{2, 3}, \/\/ 8 x 25\n\t\"9\": []int{2, 4}, \/\/ 9 x 20\n\t\"o\": []int{2, 5}, \/\/ 9 x 21\n\t\"l\": []int{2, 6}, \/\/ 9 x 23\n\t\".\": []int{2, 7}, \/\/ 9 x 23\n\t\"-\": []int{2, 8}, \/\/ 10 x 19\n\t\"0\": []int{2, 9}, \/\/ 10 x 20\n\t\"p\": []int{2, 10}, \/\/ 10 x 21\n\t\"[\": []int{2, 11}, \/\/ 10 x 22\n\t\";\": []int{2, 12}, \/\/ 10 x 23\n\t\"'\": []int{2, 13}, \/\/ 10 x 24\n\t\"\\\\\": []int{2, 14}, \/\/ 10 x 25\n\t\"\/\": []int{2, 15}, \/\/ 10 x 26\n\n\t\/\/ \".\": []int{3, 8}, \/\/ 14 x 24\n\n\t\"*\": []int{3, 9}, \/\/ 14 x 25\n\n\t\/\/ \"-\": []int{3, 10}, \/\/ 14 x 26\n\n\t\"+\": []int{3, 13}, \/\/ 15 x 21\n}\n\nvar keymaps_rpi_keys = map[termbox.Key][]int{\n\ttermbox.KeyTab: []int{0, 3}, \/\/ 3 x 22\n\n\ttermbox.KeyBackspace: []int{3, 0}, \/\/ 11 x 22\n\ttermbox.KeyEnter: []int{3, 1}, \/\/ 11 x 25\n\ttermbox.KeySpace: []int{3, 2}, \/\/ 11 x 26\n\ttermbox.KeyArrowRight: []int{3, 3}, \/\/ 12 x 26\n\ttermbox.KeyDelete: []int{3, 4}, \/\/ 13 x 19\n\ttermbox.KeyArrowDown: []int{3, 5}, \/\/ 13 x 26\n\ttermbox.KeyPgup: []int{3, 6}, \/\/ 14 x 19\n\ttermbox.KeyPgdn: []int{3, 7}, \/\/ 14 x 20\n\ttermbox.KeyHome: []int{3, 11}, \/\/ 15 x 19\n\ttermbox.KeyEnd: []int{3, 12}, \/\/ 15 x 20\n\ttermbox.KeyArrowUp: []int{3, 14}, \/\/ 15 x 24\n\ttermbox.KeyF2: []int{3, 15}, \/\/ 15 x 25\n}\n\n\/\/ normal operation:\n\/\/ beats -> emit -> msgs\n\/\/ shtudown operation:\n\/\/ q -> close(emit) -> close(msgs) -> termbox.Close()\ntype Keyboard struct {\n\tbeats Beats\n\temit chan Beats\n\tmsgs []chan<- Beats\n}\n\nfunc tbprint(x, y int, fg, bg termbox.Attribute, msg string) {\n\tfor _, c := range msg {\n\t\ttermbox.SetCell(x, y, c, fg, bg)\n\t\tx++\n\t}\n}\n\nfunc InitKeyboard(msgs []chan<- Beats) *Keyboard {\n\t\/\/ termbox.Close() called when Render.Run() exits\n\terr := termbox.Init()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ttermbox.SetInputMode(termbox.InputAlt)\n\n\treturn &Keyboard{\n\t\tbeats: Beats{},\n\t\temit: make(chan Beats),\n\t\tmsgs: msgs,\n\t}\n}\n\nfunc (kb *Keyboard) Run() {\n\tvar current string\n\tvar curev termbox.Event\n\n\tdefer close(kb.emit)\n\n\tdata := make([]byte, 0, 64)\n\n\t\/\/ starter beat\n\tgo kb.Emitter()\n\tkb.beats[1][0] = true\n\tkb.beats[1][8] = true\n\tkb.Emit()\n\n\tfor {\n\t\tif cap(data)-len(data) < 32 {\n\t\t\tnewdata := make([]byte, len(data), len(data)+32)\n\t\t\tcopy(newdata, data)\n\t\t\tdata = newdata\n\t\t}\n\t\tbeg := len(data)\n\t\td := data[beg : beg+32]\n\t\tswitch ev := termbox.PollRawEvent(d); ev.Type {\n\t\tcase termbox.EventRaw:\n\t\t\tdata = data[:beg+ev.N]\n\t\t\tcurrent = fmt.Sprintf(\"%s\", data)\n\t\t\tif current == \"`\" {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tkey := keymaps[current]\n\t\t\tif key != nil {\n\t\t\t\tkb.beats[key[0]][key[1]] = !kb.beats[key[0]][key[1]]\n\t\t\t\tkb.Emit()\n\t\t\t}\n\n\t\t\tfor {\n\t\t\t\t\/\/ TODO: move kb.beats code to here\n\t\t\t\tev := termbox.ParseEvent(data)\n\t\t\t\tif ev.N == 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tcurev = ev\n\t\t\t\tcopy(data, data[curev.N:])\n\t\t\t\tdata = data[:len(data)-curev.N]\n\n\t\t\t\ttbprint(0, BEATS+1, termbox.ColorDefault, termbox.ColorDefault,\n\t\t\t\t\tfmt.Sprintf(\"EventKey: k: %5d, c: %c\", ev.Key, ev.Ch))\n\t\t\t\ttermbox.Flush()\n\t\t\t}\n\t\tcase termbox.EventError:\n\t\t\tpanic(ev.Err)\n\t\t}\n\t}\n}\n\nfunc (kb *Keyboard) Emit() {\n\tbeats := kb.beats\n\tkb.emit <- beats\n}\n\nfunc (kb *Keyboard) Emitter() {\n\tfor {\n\t\tselect {\n\t\tcase beats, more := <-kb.emit:\n\t\t\tif more {\n\t\t\t\tfor _, msg := range kb.msgs {\n\t\t\t\t\tmsg <- beats\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor _, msg := range kb.msgs {\n\t\t\t\t\tclose(msg)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\nclean up mappingspackage bbox\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/nsf\/termbox-go\"\n)\n\nvar keymaps = map[string][]int{\n\t\"1\": []int{0, 0},\n\t\"2\": []int{0, 1},\n\t\"3\": []int{0, 2},\n\t\"4\": []int{0, 3},\n\t\"5\": []int{0, 4},\n\t\"6\": []int{0, 5},\n\t\"7\": []int{0, 6},\n\t\"8\": []int{0, 7},\n\t\"!\": []int{0, 8},\n\t\"@\": []int{0, 9},\n\t\"#\": []int{0, 10},\n\t\"$\": []int{0, 11},\n\t\"%\": []int{0, 12},\n\t\"^\": []int{0, 13},\n\t\"&\": []int{0, 14},\n\t\"*\": []int{0, 15},\n\n\t\"w\": []int{1, 0},\n\t\"e\": []int{1, 1},\n\t\"r\": []int{1, 2},\n\t\"t\": []int{1, 3},\n\t\"y\": []int{1, 4},\n\t\"u\": []int{1, 5},\n\t\"i\": []int{1, 6},\n\t\"o\": []int{1, 7},\n\t\"W\": []int{1, 8},\n\t\"E\": []int{1, 9},\n\t\"R\": []int{1, 10},\n\t\"T\": []int{1, 11},\n\t\"Y\": []int{1, 12},\n\t\"U\": []int{1, 13},\n\t\"I\": []int{1, 14},\n\t\"O\": []int{1, 15},\n\n\t\"a\": []int{2, 0},\n\t\"s\": []int{2, 1},\n\t\"d\": []int{2, 2},\n\t\"f\": []int{2, 3},\n\t\"g\": []int{2, 4},\n\t\"h\": []int{2, 5},\n\t\"j\": []int{2, 6},\n\t\"k\": []int{2, 7},\n\t\"A\": []int{2, 8},\n\t\"S\": []int{2, 9},\n\t\"D\": []int{2, 10},\n\t\"F\": []int{2, 11},\n\t\"G\": []int{2, 12},\n\t\"H\": []int{2, 13},\n\t\"J\": []int{2, 14},\n\t\"K\": []int{2, 15},\n\n\t\"z\": []int{3, 0},\n\t\"x\": []int{3, 1},\n\t\"c\": []int{3, 2},\n\t\"v\": []int{3, 3},\n\t\"b\": []int{3, 4},\n\t\"n\": []int{3, 5},\n\t\"m\": []int{3, 6},\n\t\",\": []int{3, 7},\n\t\"Z\": []int{3, 8},\n\t\"X\": []int{3, 9},\n\t\"C\": []int{3, 10},\n\t\"V\": []int{3, 11},\n\t\"B\": []int{3, 12},\n\t\"N\": []int{3, 13},\n\t\"M\": []int{3, 14},\n\t\"<\": []int{3, 15},\n}\n\ntype Key struct {\n\tCh rune \/\/ a unicode character\n\tKey termbox.Key \/\/ one of Key* constants, invalid if 'Ch' is not 0\n}\n\n\/\/ mapping from keyboard box\nvar keymaps_rpi = map[Key][]int{\n\t\/\/ 2 x 21 = [volume down]\n\t\/\/ 2 x 24 = [mute]\n\t\/\/ 3 x 19 = ` (quit)\n\n\t{'1', 0}: []int{0, 0}, \/\/ 3 x 20\n\t{'q', 0}: []int{0, 1}, \/\/ 3 x 21\n\t{0, termbox.KeyTab}: []int{0, 2}, \/\/ 3 x 22\n\t{'a', 0}: []int{0, 3}, \/\/ 3 x 23\n\t{'z', 0}: []int{0, 4}, \/\/ 3 x 24\n\t{0, termbox.KeyF1}: []int{0, 5}, \/\/ 4 x 19\n\t{'2', 0}: []int{0, 6}, \/\/ 4 x 20\n\t{'w', 0}: []int{0, 7}, \/\/ 4 x 21\n\t{'S', 0}: []int{0, 8}, \/\/ 4 x 23\n\t\/\/ 4 x 24 = §\n\t{'x', 0}: []int{0, 9}, \/\/ 4 x 25\n\t{0, termbox.KeyF2}: []int{0, 10}, \/\/ 5 x 19\n\t{'3', 0}: []int{0, 11}, \/\/ 5 x 20\n\t{'e', 0}: []int{0, 12}, \/\/ 5 x 21\n\t{'d', 0}: []int{0, 13}, \/\/ 5 x 22\n\t{'c', 0}: []int{0, 14}, \/\/ 5 x 23\n\t{0, termbox.KeyF4}: []int{0, 15}, \/\/ 5 x 24\n\n\t{'5', 0}: []int{1, 0}, \/\/ 6 x 19\n\t{'4', 0}: []int{1, 1}, \/\/ 6 x 20\n\t{'r', 0}: []int{1, 2}, \/\/ 6 x 21\n\t{'t', 0}: []int{1, 3}, \/\/ 6 x 22\n\t{'f', 0}: []int{1, 4}, \/\/ 6 x 23\n\t{'g', 0}: []int{1, 5}, \/\/ 6 x 24\n\t{'v', 0}: []int{1, 6}, \/\/ 6 x 25\n\t{'b', 0}: []int{1, 7}, \/\/ 6 x 26\n\t{'6', 0}: []int{1, 8}, \/\/ 7 x 19\n\t{'7', 0}: []int{1, 9}, \/\/ 7 x 20\n\t{'u', 0}: []int{1, 10}, \/\/ 7 x 21\n\t{'y', 0}: []int{1, 11}, \/\/ 7 x 22\n\t{'j', 0}: []int{1, 12}, \/\/ 7 x 23\n\t{'h', 0}: []int{1, 13}, \/\/ 7 x 24\n\t{'m', 0}: []int{1, 14}, \/\/ 7 x 25\n\t{'n', 0}: []int{1, 15}, \/\/ 7 x 26\n\n\t{'=', 0}: []int{2, 0}, \/\/ 8 x 19\n\t{'8', 0}: []int{2, 1}, \/\/ 8 x 20\n\t{'i', 0}: []int{2, 2}, \/\/ 8 x 21\n\t{']', 0}: []int{2, 3}, \/\/ 8 x 22\n\t{'K', 0}: []int{2, 4}, \/\/ 8 x 23\n\t{0, termbox.KeyF6}: []int{2, 5}, \/\/ 8 x 24\n\t{',', 0}: []int{2, 6}, \/\/ 8 x 25\n\t{0, termbox.KeyF8}: []int{2, 7}, \/\/ 9 x 19\n\t{'9', 0}: []int{2, 8}, \/\/ 9 x 20\n\t{'o', 0}: []int{2, 9}, \/\/ 9 x 21\n\t{'l', 0}: []int{2, 10}, \/\/ 9 x 23\n\t{'.', 0}: []int{2, 11}, \/\/ 9 x 25\n\t{'-', 0}: []int{2, 12}, \/\/ 10 x 19\n\t{'0', 0}: []int{2, 13}, \/\/ 10 x 20\n\t{'p', 0}: []int{2, 14}, \/\/ 10 x 21\n\t{'[', 0}: []int{2, 15}, \/\/ 10 x 22\n\n\t{';', 0}: []int{3, 0}, \/\/ 10 x 23\n\t{'\\'', 0}: []int{3, 1}, \/\/ 10 x 24\n\t{'\\\\', 0}: []int{3, 2}, \/\/ 10 x 25\n\t{'\/', 0}: []int{3, 3}, \/\/ 10 x 26\n\t{0, termbox.KeyF9}: []int{3, 4}, \/\/ 11 x 19\n\t{0, termbox.KeyF10}: []int{3, 5}, \/\/ 11 x 20\n\t{0, termbox.KeyBackspace2}: []int{3, 6}, \/\/ 11 x 22\n\t\/\/ 11 x 23 = \\ ***\n\t{0, termbox.KeyF5}: []int{3, 7}, \/\/ 11 x 24\n\t{0, termbox.KeyEnter}: []int{3, 8}, \/\/ 11 x 25\n\t{0, termbox.KeySpace}: []int{3, 9}, \/\/ 11 x 26\n\t{0, termbox.KeyF12}: []int{3, 10}, \/\/ 12 x 20\n\t\/\/ 12 x 21 = 8 ***\n\t\/\/ 12 x 22 = 5 ***\n\t\/\/ 12 x 23 = 2 ***\n\t\/\/ 12 x 24 = 0 ***\n\t\/\/ 12 x 25 = \/ ***\n\t{0, termbox.KeyArrowRight}: []int{3, 11}, \/\/ 12 x 26\n\t{0, termbox.KeyDelete}: []int{3, 12}, \/\/ 13 x 19\n\t\/\/ 13 x 20 = [fn f11]\n\t\/\/ 13 x 21 = 7 ***\n\t\/\/ 13 x 22 = 4 ***\n\t\/\/ 13 x 23 = 1 ***\n\t{0, termbox.KeyArrowDown}: []int{3, 13}, \/\/ 13 x 26\n\t{0, termbox.KeyPgup}: []int{3, 14}, \/\/ 14 x 19\n\t{0, termbox.KeyPgdn}: []int{3, 15}, \/\/ 14 x 20\n\t\/\/ 14 x 21 = 9 ***\n\t\/\/ 14 x 22 = 6 ***\n\t\/\/ 14 x 23 = 3 ***\n\t\/\/ 14 x 24 = . ***\n\t\/\/ 14 x 25 = *\n\t\/\/ 14 x 26 = - ***\n\t\/\/ 15 x 19 = KeyHome\n\t\/\/ 15 x 20 = KeyEnd\n\t\/\/ 15 x 21 = +\n\t\/\/ 15 x 23 = KeyEnter ***\n\t\/\/ 15 x 24 = KeyArrowUp\n\t\/\/ 15 x 25 = [brightness up]\n\t\/\/ 15 x 26 = KeyArrowLeft\n\t\/\/ 16 x 21 = [brightness down]\n\t\/\/ 17 x 24 = [launch itunes?]\n\t\/\/ 18 x 22 = [volume up]\n}\n\n\/\/ normal operation:\n\/\/ beats -> emit -> msgs\n\/\/ shtudown operation:\n\/\/ q -> close(emit) -> close(msgs) -> termbox.Close()\ntype Keyboard struct {\n\tbeats Beats\n\temit chan Beats\n\tmsgs []chan<- Beats\n}\n\nfunc tbprint(x, y int, fg, bg termbox.Attribute, msg string) {\n\tfor _, c := range msg {\n\t\ttermbox.SetCell(x, y, c, fg, bg)\n\t\tx++\n\t}\n}\n\nfunc InitKeyboard(msgs []chan<- Beats) *Keyboard {\n\t\/\/ termbox.Close() called when Render.Run() exits\n\terr := termbox.Init()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ttermbox.SetInputMode(termbox.InputAlt)\n\n\treturn &Keyboard{\n\t\tbeats: Beats{},\n\t\temit: make(chan Beats),\n\t\tmsgs: msgs,\n\t}\n}\n\nfunc (kb *Keyboard) Run() {\n\tvar current string\n\tvar curev termbox.Event\n\n\tdefer close(kb.emit)\n\n\tdata := make([]byte, 0, 64)\n\n\t\/\/ starter beat\n\tgo kb.Emitter()\n\tkb.beats[1][0] = true\n\tkb.beats[1][8] = true\n\tkb.Emit()\n\n\tfor {\n\t\tif cap(data)-len(data) < 32 {\n\t\t\tnewdata := make([]byte, len(data), len(data)+32)\n\t\t\tcopy(newdata, data)\n\t\t\tdata = newdata\n\t\t}\n\t\tbeg := len(data)\n\t\td := data[beg : beg+32]\n\t\tswitch ev := termbox.PollRawEvent(d); ev.Type {\n\t\tcase termbox.EventRaw:\n\t\t\tdata = data[:beg+ev.N]\n\t\t\tcurrent = fmt.Sprintf(\"%s\", data)\n\t\t\tif current == \"`\" {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tkey := keymaps[current]\n\t\t\tif key != nil {\n\t\t\t\tkb.beats[key[0]][key[1]] = !kb.beats[key[0]][key[1]]\n\t\t\t\tkb.Emit()\n\t\t\t}\n\n\t\t\tfor {\n\t\t\t\t\/\/ TODO: move kb.beats code to here\n\t\t\t\tev := termbox.ParseEvent(data)\n\t\t\t\tif ev.N == 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tcurev = ev\n\t\t\t\tcopy(data, data[curev.N:])\n\t\t\t\tdata = data[:len(data)-curev.N]\n\n\t\t\t\ttbprint(0, BEATS+1, termbox.ColorDefault, termbox.ColorDefault,\n\t\t\t\t\tfmt.Sprintf(\"EventKey: k: %5d, c: %c\", ev.Key, ev.Ch))\n\t\t\t\ttermbox.Flush()\n\t\t\t}\n\t\tcase termbox.EventError:\n\t\t\tpanic(ev.Err)\n\t\t}\n\t}\n}\n\nfunc (kb *Keyboard) Emit() {\n\tbeats := kb.beats\n\tkb.emit <- beats\n}\n\nfunc (kb *Keyboard) Emitter() {\n\tfor {\n\t\tselect {\n\t\tcase beats, more := <-kb.emit:\n\t\t\tif more {\n\t\t\t\tfor _, msg := range kb.msgs {\n\t\t\t\t\tmsg <- beats\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor _, msg := range kb.msgs {\n\t\t\t\t\tclose(msg)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing\n\/\/ algorithm. See http:\/\/www.usenix.org\/event\/usenix99\/provos\/provos.pdf\npackage bcrypt\n\n\/\/ The code is a port of Provos and Mazières's C implementation.\nimport (\n\t\"code.google.com\/p\/go.crypto\/blowfish\"\n\t\"crypto\/rand\"\n\t\"crypto\/subtle\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n)\n\nconst (\n\tMinCost int = 4 \/\/ the minimum allowable cost as passed in to GenerateFromPassword\n\tMaxCost int = 31 \/\/ the maximum allowable cost as passed in to GenerateFromPassword\n\tDefaultCost int = 10 \/\/ the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword\n)\n\n\/\/ The error returned from CompareHashAndPassword when a password and hash do\n\/\/ not match.\nvar ErrMismatchedHashAndPassword = errors.New(\"crypto\/bcrypt: hashedPassword is not the hash of the given password\")\n\n\/\/ The error returned from CompareHashAndPassword when a hash is too short to\n\/\/ be a bcrypt hash.\nvar ErrHashTooShort = errors.New(\"crypto\/bcrypt: hashedSecret too short to be a bcrypted password\")\n\n\/\/ The error returned from CompareHashAndPassword when a hash was created with\n\/\/ a bcrypt algorithm newer than this implementation.\ntype HashVersionTooNewError byte\n\nfunc (hv HashVersionTooNewError) Error() string {\n\treturn fmt.Sprintf(\"crypto\/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'\", byte(hv), majorVersion)\n}\n\n\/\/ The error returned from CompareHashAndPassword when a hash starts with something other than '$'\ntype InvalidHashPrefixError byte\n\nfunc (ih InvalidHashPrefixError) Error() string {\n\treturn fmt.Sprintf(\"crypto\/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'\", byte(ih))\n}\n\ntype InvalidCostError int\n\nfunc (ic InvalidCostError) Error() string {\n\treturn fmt.Sprintf(\"crypto\/bcrypt: cost %d is outside allowed range (%d,%d)\", int(ic), int(MinCost), int(MaxCost))\n}\n\nconst (\n\tmajorVersion = '2'\n\tminorVersion = 'a'\n\tmaxSaltSize = 16\n\tmaxCryptedHashSize = 23\n\tencodedSaltSize = 22\n\tencodedHashSize = 31\n\tminHashSize = 59\n)\n\n\/\/ magicCipherData is an IV for the 64 Blowfish encryption calls in\n\/\/ bcrypt(). It's the string \"OrpheanBeholderScryDoubt\" in big-endian bytes.\nvar magicCipherData = []byte{\n\t0x4f, 0x72, 0x70, 0x68,\n\t0x65, 0x61, 0x6e, 0x42,\n\t0x65, 0x68, 0x6f, 0x6c,\n\t0x64, 0x65, 0x72, 0x53,\n\t0x63, 0x72, 0x79, 0x44,\n\t0x6f, 0x75, 0x62, 0x74,\n}\n\ntype hashed struct {\n\thash []byte\n\tsalt []byte\n\tcost int \/\/ allowed range is MinCost to MaxCost\n\tmajor byte\n\tminor byte\n}\n\n\/\/ GenerateFromPassword returns the bcrypt hash of the password at the given\n\/\/ cost. If the cost given is less than MinCost, the cost will be set to\n\/\/ DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,\n\/\/ to compare the returned hashed password with its cleartext version.\nfunc GenerateFromPassword(password []byte, cost int) ([]byte, error) {\n\tp, err := newFromPassword(password, cost)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p.Hash(), nil\n}\n\n\/\/ CompareHashAndPassword compares a bcrypt hashed password with its possible\n\/\/ plaintext equivalent. Returns nil on success, or an error on failure.\nfunc CompareHashAndPassword(hashedPassword, password []byte) error {\n\tp, err := newFromHash(hashedPassword)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\totherHash, err := bcrypt(password, p.cost, p.salt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\totherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}\n\tif subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {\n\t\treturn nil\n\t}\n\n\treturn ErrMismatchedHashAndPassword\n}\n\n\/\/ Cost returns the hashing cost used to create the given hashed\n\/\/ password. When, in the future, the hashing cost of a password system needs\n\/\/ to be increased in order to adjust for greater computational power, this\n\/\/ function allows one to establish which passwords need to be updated.\nfunc Cost(hashedPassword []byte) (int, error) {\n\tp, err := newFromHash(hashedPassword)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn p.cost, nil\n}\n\nfunc newFromPassword(password []byte, cost int) (*hashed, error) {\n\tif cost < MinCost {\n\t\tcost = DefaultCost\n\t}\n\tp := new(hashed)\n\tp.major = majorVersion\n\tp.minor = minorVersion\n\n\terr := checkCost(cost)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp.cost = cost\n\n\tunencodedSalt := make([]byte, maxSaltSize)\n\t_, err = io.ReadFull(rand.Reader, unencodedSalt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.salt = base64Encode(unencodedSalt)\n\thash, err := bcrypt(password, p.cost, p.salt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp.hash = hash\n\treturn p, err\n}\n\nfunc newFromHash(hashedSecret []byte) (*hashed, error) {\n\tif len(hashedSecret) < minHashSize {\n\t\treturn nil, ErrHashTooShort\n\t}\n\tp := new(hashed)\n\tn, err := p.decodeVersion(hashedSecret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thashedSecret = hashedSecret[n:]\n\tn, err = p.decodeCost(hashedSecret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thashedSecret = hashedSecret[n:]\n\n\t\/\/ The \"+2\" is here because we'll have to append at most 2 '=' to the salt\n\t\/\/ when base64 decoding it in expensiveBlowfishSetup().\n\tp.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)\n\tcopy(p.salt, hashedSecret[:encodedSaltSize])\n\n\thashedSecret = hashedSecret[encodedSaltSize:]\n\tp.hash = make([]byte, len(hashedSecret))\n\tcopy(p.hash, hashedSecret)\n\n\treturn p, nil\n}\n\nfunc bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {\n\tcipherData := make([]byte, len(magicCipherData))\n\tcopy(cipherData, magicCipherData)\n\n\tc, err := expensiveBlowfishSetup(password, uint32(cost), salt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor i := 0; i < 24; i += 8 {\n\t\tfor j := 0; j < 64; j++ {\n\t\t\tc.Encrypt(cipherData[i:i+8], cipherData[i:i+8])\n\t\t}\n\t}\n\n\t\/\/ Bug compatibility with C bcrypt implementations. We only encode 23 of\n\t\/\/ the 24 bytes encrypted.\n\thsh := base64Encode(cipherData[:maxCryptedHashSize])\n\treturn hsh, nil\n}\n\nfunc expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {\n\n\tcsalt, err := base64Decode(salt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Bug compatibility with C bcrypt implementations. They use the trailing\n\t\/\/ NULL in the key string during expansion.\n\tckey := append(key, 0)\n\n\tc, err := blowfish.NewSaltedCipher(ckey, csalt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trounds := 1 << cost\n\tfor i := 0; i < rounds; i++ {\n\t\tblowfish.ExpandKey(ckey, c)\n\t\tblowfish.ExpandKey(csalt, c)\n\t}\n\n\treturn c, nil\n}\n\nfunc (p *hashed) Hash() []byte {\n\tarr := make([]byte, 60)\n\tarr[0] = '$'\n\tarr[1] = p.major\n\tn := 2\n\tif p.minor != 0 {\n\t\tarr[2] = p.minor\n\t\tn = 3\n\t}\n\tarr[n] = '$'\n\tn += 1\n\tcopy(arr[n:], []byte(fmt.Sprintf(\"%02d\", p.cost)))\n\tn += 2\n\tarr[n] = '$'\n\tn += 1\n\tcopy(arr[n:], p.salt)\n\tn += encodedSaltSize\n\tcopy(arr[n:], p.hash)\n\tn += encodedHashSize\n\treturn arr[:n]\n}\n\nfunc (p *hashed) decodeVersion(sbytes []byte) (int, error) {\n\tif sbytes[0] != '$' {\n\t\treturn -1, InvalidHashPrefixError(sbytes[0])\n\t}\n\tif sbytes[1] > majorVersion {\n\t\treturn -1, HashVersionTooNewError(sbytes[1])\n\t}\n\tp.major = sbytes[1]\n\tn := 3\n\tif sbytes[2] != '$' {\n\t\tp.minor = sbytes[2]\n\t\tn++\n\t}\n\treturn n, nil\n}\n\n\/\/ sbytes should begin where decodeVersion left off.\nfunc (p *hashed) decodeCost(sbytes []byte) (int, error) {\n\tcost, err := strconv.Atoi(string(sbytes[0:2]))\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\terr = checkCost(cost)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tp.cost = cost\n\treturn 3, nil\n}\n\nfunc (p *hashed) String() string {\n\treturn fmt.Sprintf(\"&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}\", string(p.hash), p.salt, p.cost, p.major, p.minor)\n}\n\nfunc checkCost(cost int) error {\n\tif cost < MinCost || cost > MaxCost {\n\t\treturn InvalidCostError(cost)\n\t}\n\treturn nil\n}\ngo.crypto\/bcrypt: fix interger overflow for cost == 31 Fixes issue 4803.\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing\n\/\/ algorithm. See http:\/\/www.usenix.org\/event\/usenix99\/provos\/provos.pdf\npackage bcrypt\n\n\/\/ The code is a port of Provos and Mazières's C implementation.\nimport (\n\t\"code.google.com\/p\/go.crypto\/blowfish\"\n\t\"crypto\/rand\"\n\t\"crypto\/subtle\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n)\n\nconst (\n\tMinCost int = 4 \/\/ the minimum allowable cost as passed in to GenerateFromPassword\n\tMaxCost int = 31 \/\/ the maximum allowable cost as passed in to GenerateFromPassword\n\tDefaultCost int = 10 \/\/ the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword\n)\n\n\/\/ The error returned from CompareHashAndPassword when a password and hash do\n\/\/ not match.\nvar ErrMismatchedHashAndPassword = errors.New(\"crypto\/bcrypt: hashedPassword is not the hash of the given password\")\n\n\/\/ The error returned from CompareHashAndPassword when a hash is too short to\n\/\/ be a bcrypt hash.\nvar ErrHashTooShort = errors.New(\"crypto\/bcrypt: hashedSecret too short to be a bcrypted password\")\n\n\/\/ The error returned from CompareHashAndPassword when a hash was created with\n\/\/ a bcrypt algorithm newer than this implementation.\ntype HashVersionTooNewError byte\n\nfunc (hv HashVersionTooNewError) Error() string {\n\treturn fmt.Sprintf(\"crypto\/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'\", byte(hv), majorVersion)\n}\n\n\/\/ The error returned from CompareHashAndPassword when a hash starts with something other than '$'\ntype InvalidHashPrefixError byte\n\nfunc (ih InvalidHashPrefixError) Error() string {\n\treturn fmt.Sprintf(\"crypto\/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'\", byte(ih))\n}\n\ntype InvalidCostError int\n\nfunc (ic InvalidCostError) Error() string {\n\treturn fmt.Sprintf(\"crypto\/bcrypt: cost %d is outside allowed range (%d,%d)\", int(ic), int(MinCost), int(MaxCost))\n}\n\nconst (\n\tmajorVersion = '2'\n\tminorVersion = 'a'\n\tmaxSaltSize = 16\n\tmaxCryptedHashSize = 23\n\tencodedSaltSize = 22\n\tencodedHashSize = 31\n\tminHashSize = 59\n)\n\n\/\/ magicCipherData is an IV for the 64 Blowfish encryption calls in\n\/\/ bcrypt(). It's the string \"OrpheanBeholderScryDoubt\" in big-endian bytes.\nvar magicCipherData = []byte{\n\t0x4f, 0x72, 0x70, 0x68,\n\t0x65, 0x61, 0x6e, 0x42,\n\t0x65, 0x68, 0x6f, 0x6c,\n\t0x64, 0x65, 0x72, 0x53,\n\t0x63, 0x72, 0x79, 0x44,\n\t0x6f, 0x75, 0x62, 0x74,\n}\n\ntype hashed struct {\n\thash []byte\n\tsalt []byte\n\tcost int \/\/ allowed range is MinCost to MaxCost\n\tmajor byte\n\tminor byte\n}\n\n\/\/ GenerateFromPassword returns the bcrypt hash of the password at the given\n\/\/ cost. If the cost given is less than MinCost, the cost will be set to\n\/\/ DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,\n\/\/ to compare the returned hashed password with its cleartext version.\nfunc GenerateFromPassword(password []byte, cost int) ([]byte, error) {\n\tp, err := newFromPassword(password, cost)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p.Hash(), nil\n}\n\n\/\/ CompareHashAndPassword compares a bcrypt hashed password with its possible\n\/\/ plaintext equivalent. Returns nil on success, or an error on failure.\nfunc CompareHashAndPassword(hashedPassword, password []byte) error {\n\tp, err := newFromHash(hashedPassword)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\totherHash, err := bcrypt(password, p.cost, p.salt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\totherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}\n\tif subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {\n\t\treturn nil\n\t}\n\n\treturn ErrMismatchedHashAndPassword\n}\n\n\/\/ Cost returns the hashing cost used to create the given hashed\n\/\/ password. When, in the future, the hashing cost of a password system needs\n\/\/ to be increased in order to adjust for greater computational power, this\n\/\/ function allows one to establish which passwords need to be updated.\nfunc Cost(hashedPassword []byte) (int, error) {\n\tp, err := newFromHash(hashedPassword)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn p.cost, nil\n}\n\nfunc newFromPassword(password []byte, cost int) (*hashed, error) {\n\tif cost < MinCost {\n\t\tcost = DefaultCost\n\t}\n\tp := new(hashed)\n\tp.major = majorVersion\n\tp.minor = minorVersion\n\n\terr := checkCost(cost)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp.cost = cost\n\n\tunencodedSalt := make([]byte, maxSaltSize)\n\t_, err = io.ReadFull(rand.Reader, unencodedSalt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.salt = base64Encode(unencodedSalt)\n\thash, err := bcrypt(password, p.cost, p.salt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp.hash = hash\n\treturn p, err\n}\n\nfunc newFromHash(hashedSecret []byte) (*hashed, error) {\n\tif len(hashedSecret) < minHashSize {\n\t\treturn nil, ErrHashTooShort\n\t}\n\tp := new(hashed)\n\tn, err := p.decodeVersion(hashedSecret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thashedSecret = hashedSecret[n:]\n\tn, err = p.decodeCost(hashedSecret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thashedSecret = hashedSecret[n:]\n\n\t\/\/ The \"+2\" is here because we'll have to append at most 2 '=' to the salt\n\t\/\/ when base64 decoding it in expensiveBlowfishSetup().\n\tp.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)\n\tcopy(p.salt, hashedSecret[:encodedSaltSize])\n\n\thashedSecret = hashedSecret[encodedSaltSize:]\n\tp.hash = make([]byte, len(hashedSecret))\n\tcopy(p.hash, hashedSecret)\n\n\treturn p, nil\n}\n\nfunc bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {\n\tcipherData := make([]byte, len(magicCipherData))\n\tcopy(cipherData, magicCipherData)\n\n\tc, err := expensiveBlowfishSetup(password, uint32(cost), salt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor i := 0; i < 24; i += 8 {\n\t\tfor j := 0; j < 64; j++ {\n\t\t\tc.Encrypt(cipherData[i:i+8], cipherData[i:i+8])\n\t\t}\n\t}\n\n\t\/\/ Bug compatibility with C bcrypt implementations. We only encode 23 of\n\t\/\/ the 24 bytes encrypted.\n\thsh := base64Encode(cipherData[:maxCryptedHashSize])\n\treturn hsh, nil\n}\n\nfunc expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {\n\n\tcsalt, err := base64Decode(salt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Bug compatibility with C bcrypt implementations. They use the trailing\n\t\/\/ NULL in the key string during expansion.\n\tckey := append(key, 0)\n\n\tc, err := blowfish.NewSaltedCipher(ckey, csalt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar i, rounds uint64\n\trounds = 1 << cost\n\tfor i = 0; i < rounds; i++ {\n\t\tblowfish.ExpandKey(ckey, c)\n\t\tblowfish.ExpandKey(csalt, c)\n\t}\n\n\treturn c, nil\n}\n\nfunc (p *hashed) Hash() []byte {\n\tarr := make([]byte, 60)\n\tarr[0] = '$'\n\tarr[1] = p.major\n\tn := 2\n\tif p.minor != 0 {\n\t\tarr[2] = p.minor\n\t\tn = 3\n\t}\n\tarr[n] = '$'\n\tn += 1\n\tcopy(arr[n:], []byte(fmt.Sprintf(\"%02d\", p.cost)))\n\tn += 2\n\tarr[n] = '$'\n\tn += 1\n\tcopy(arr[n:], p.salt)\n\tn += encodedSaltSize\n\tcopy(arr[n:], p.hash)\n\tn += encodedHashSize\n\treturn arr[:n]\n}\n\nfunc (p *hashed) decodeVersion(sbytes []byte) (int, error) {\n\tif sbytes[0] != '$' {\n\t\treturn -1, InvalidHashPrefixError(sbytes[0])\n\t}\n\tif sbytes[1] > majorVersion {\n\t\treturn -1, HashVersionTooNewError(sbytes[1])\n\t}\n\tp.major = sbytes[1]\n\tn := 3\n\tif sbytes[2] != '$' {\n\t\tp.minor = sbytes[2]\n\t\tn++\n\t}\n\treturn n, nil\n}\n\n\/\/ sbytes should begin where decodeVersion left off.\nfunc (p *hashed) decodeCost(sbytes []byte) (int, error) {\n\tcost, err := strconv.Atoi(string(sbytes[0:2]))\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\terr = checkCost(cost)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tp.cost = cost\n\treturn 3, nil\n}\n\nfunc (p *hashed) String() string {\n\treturn fmt.Sprintf(\"&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}\", string(p.hash), p.salt, p.cost, p.major, p.minor)\n}\n\nfunc checkCost(cost int) error {\n\tif cost < MinCost || cost > MaxCost {\n\t\treturn InvalidCostError(cost)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"package cluster\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/crypto\/scrypt\"\n\n\t\"github.com\/lxc\/lxd\/lxd\/config\"\n\t\"github.com\/lxc\/lxd\/lxd\/db\"\n\t\"github.com\/lxc\/lxd\/shared\/validate\"\n)\n\n\/\/ Config holds cluster-wide configuration values.\ntype Config struct {\n\ttx *db.ClusterTx \/\/ DB transaction the values in this config are bound to.\n\tm config.Map \/\/ Low-level map holding the config values.\n}\n\n\/\/ ConfigLoad loads a new Config object with the current cluster configuration\n\/\/ values fetched from the database.\nfunc ConfigLoad(tx *db.ClusterTx) (*Config, error) {\n\t\/\/ Load current raw values from the database, any error is fatal.\n\tvalues, err := tx.Config()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot fetch node config from database: %v\", err)\n\t}\n\n\tm, err := config.SafeLoad(ConfigSchema, values)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load node config: %v\", err)\n\t}\n\n\treturn &Config{tx: tx, m: m}, nil\n}\n\n\/\/ HTTPSAllowedHeaders returns the relevant CORS setting.\nfunc (c *Config) HTTPSAllowedHeaders() string {\n\treturn c.m.GetString(\"core.https_allowed_headers\")\n}\n\n\/\/ HTTPSAllowedMethods returns the relevant CORS setting.\nfunc (c *Config) HTTPSAllowedMethods() string {\n\treturn c.m.GetString(\"core.https_allowed_methods\")\n}\n\n\/\/ HTTPSAllowedOrigin returns the relevant CORS setting.\nfunc (c *Config) HTTPSAllowedOrigin() string {\n\treturn c.m.GetString(\"core.https_allowed_origin\")\n}\n\n\/\/ HTTPSAllowedCredentials returns the relevant CORS setting.\nfunc (c *Config) HTTPSAllowedCredentials() bool {\n\treturn c.m.GetBool(\"core.https_allowed_credentials\")\n}\n\n\/\/ TrustPassword returns the LXD trust password for authenticating clients.\nfunc (c *Config) TrustPassword() string {\n\treturn c.m.GetString(\"core.trust_password\")\n}\n\n\/\/ TrustCACertificates returns whether client certificates are checked\n\/\/ against a CA.\nfunc (c *Config) TrustCACertificates() bool {\n\treturn c.m.GetBool(\"core.trust_ca_certificates\")\n}\n\n\/\/ CandidServer returns all the Candid settings needed to connect to a server.\nfunc (c *Config) CandidServer() (string, string, int64, string) {\n\treturn c.m.GetString(\"candid.api.url\"),\n\t\tc.m.GetString(\"candid.api.key\"),\n\t\tc.m.GetInt64(\"candid.expiry\"),\n\t\tc.m.GetString(\"candid.domains\")\n}\n\n\/\/ RBACServer returns all the Candid settings needed to connect to a server.\nfunc (c *Config) RBACServer() (string, string, int64, string, string, string, string) {\n\treturn c.m.GetString(\"rbac.api.url\"),\n\t\tc.m.GetString(\"rbac.api.key\"),\n\t\tc.m.GetInt64(\"rbac.expiry\"),\n\t\tc.m.GetString(\"rbac.agent.url\"),\n\t\tc.m.GetString(\"rbac.agent.username\"),\n\t\tc.m.GetString(\"rbac.agent.private_key\"),\n\t\tc.m.GetString(\"rbac.agent.public_key\")\n}\n\n\/\/ ProxyHTTPS returns the configured HTTPS proxy, if any.\nfunc (c *Config) ProxyHTTPS() string {\n\treturn c.m.GetString(\"core.proxy_https\")\n}\n\n\/\/ ProxyHTTP returns the configured HTTP proxy, if any.\nfunc (c *Config) ProxyHTTP() string {\n\treturn c.m.GetString(\"core.proxy_http\")\n}\n\n\/\/ ProxyIgnoreHosts returns the configured ignore-hosts proxy setting, if any.\nfunc (c *Config) ProxyIgnoreHosts() string {\n\treturn c.m.GetString(\"core.proxy_ignore_hosts\")\n}\n\n\/\/ HTTPSTrustedProxy returns the configured HTTPS trusted proxy setting, if any.\nfunc (c *Config) HTTPSTrustedProxy() string {\n\treturn c.m.GetString(\"core.https_trusted_proxy\")\n}\n\n\/\/ MAASController the configured MAAS url and key, if any.\nfunc (c *Config) MAASController() (string, string) {\n\turl := c.m.GetString(\"maas.api.url\")\n\tkey := c.m.GetString(\"maas.api.key\")\n\treturn url, key\n}\n\n\/\/ OfflineThreshold returns the configured heartbeat threshold, i.e. the\n\/\/ number of seconds before after which an unresponsive node is considered\n\/\/ offline..\nfunc (c *Config) OfflineThreshold() time.Duration {\n\tn := c.m.GetInt64(\"cluster.offline_threshold\")\n\treturn time.Duration(n) * time.Second\n}\n\n\/\/ ImagesMinimalReplica returns the numbers of nodes for cluster images replication\nfunc (c *Config) ImagesMinimalReplica() int64 {\n\treturn c.m.GetInt64(\"cluster.images_minimal_replica\")\n}\n\n\/\/ MaxVoters returns the maximum number of members in a cluster that will be\n\/\/ assigned the voter role.\nfunc (c *Config) MaxVoters() int64 {\n\treturn c.m.GetInt64(\"cluster.max_voters\")\n}\n\n\/\/ MaxStandBy returns the maximum number of standby members in a cluster that\n\/\/ will be assigned the stand-by role.\nfunc (c *Config) MaxStandBy() int64 {\n\treturn c.m.GetInt64(\"cluster.max_standby\")\n}\n\n\/\/ ShutdownTimeout returns the number of minutes to wait for running operation to complete\n\/\/ before LXD server shut down\nfunc (c *Config) ShutdownTimeout() time.Duration {\n\tn := c.m.GetInt64(\"core.shutdown_timeout\")\n\treturn time.Duration(n) * time.Minute\n}\n\n\/\/ Dump current configuration keys and their values. Keys with values matching\n\/\/ their defaults are omitted.\nfunc (c *Config) Dump() map[string]interface{} {\n\treturn c.m.Dump()\n}\n\n\/\/ Replace the current configuration with the given values.\n\/\/\n\/\/ Return what has actually changed.\nfunc (c *Config) Replace(values map[string]interface{}) (map[string]string, error) {\n\treturn c.update(values)\n}\n\n\/\/ Patch changes only the configuration keys in the given map.\n\/\/\n\/\/ Return what has actually changed.\nfunc (c *Config) Patch(patch map[string]interface{}) (map[string]string, error) {\n\tvalues := c.Dump() \/\/ Use current values as defaults\n\tfor name, value := range patch {\n\t\tvalues[name] = value\n\t}\n\treturn c.update(values)\n}\n\nfunc (c *Config) update(values map[string]interface{}) (map[string]string, error) {\n\tchanged, err := c.m.Change(values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = c.tx.UpdateConfig(changed)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot persist configuration changes: %v\")\n\t}\n\n\treturn changed, nil\n}\n\n\/\/ ConfigGetString is a convenience for loading the cluster configuration and\n\/\/ returning the value of a particular key.\n\/\/\n\/\/ It's a deprecated API meant to be used by call sites that are not\n\/\/ interacting with the database in a transactional way.\nfunc ConfigGetString(cluster *db.Cluster, key string) (string, error) {\n\tconfig, err := configGet(cluster)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn config.m.GetString(key), nil\n}\n\n\/\/ ConfigGetBool is a convenience for loading the cluster configuration and\n\/\/ returning the value of a particular boolean key.\n\/\/\n\/\/ It's a deprecated API meant to be used by call sites that are not\n\/\/ interacting with the database in a transactional way.\nfunc ConfigGetBool(cluster *db.Cluster, key string) (bool, error) {\n\tconfig, err := configGet(cluster)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn config.m.GetBool(key), nil\n}\n\n\/\/ ConfigGetInt64 is a convenience for loading the cluster configuration and\n\/\/ returning the value of a particular key.\n\/\/\n\/\/ It's a deprecated API meant to be used by call sites that are not\n\/\/ interacting with the database in a transactional way.\nfunc ConfigGetInt64(cluster *db.Cluster, key string) (int64, error) {\n\tconfig, err := configGet(cluster)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn config.m.GetInt64(key), nil\n}\n\nfunc configGet(cluster *db.Cluster) (*Config, error) {\n\tvar config *Config\n\terr := cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\tvar err error\n\t\tconfig, err = ConfigLoad(tx)\n\t\treturn err\n\t})\n\treturn config, err\n}\n\n\/\/ ConfigSchema defines available server configuration keys.\nvar ConfigSchema = config.Schema{\n\t\"backups.compression_algorithm\": {Default: \"gzip\", Validator: validate.IsCompressionAlgorithm},\n\t\"cluster.offline_threshold\": {Type: config.Int64, Default: offlineThresholdDefault(), Validator: offlineThresholdValidator},\n\t\"cluster.images_minimal_replica\": {Type: config.Int64, Default: \"3\", Validator: imageMinimalReplicaValidator},\n\t\"cluster.max_voters\": {Type: config.Int64, Default: \"3\", Validator: maxVotersValidator},\n\t\"cluster.max_standby\": {Type: config.Int64, Default: \"2\", Validator: maxStandByValidator},\n\t\"core.https_allowed_headers\": {},\n\t\"core.https_allowed_methods\": {},\n\t\"core.https_allowed_origin\": {},\n\t\"core.https_allowed_credentials\": {Type: config.Bool},\n\t\"core.https_trusted_proxy\": {},\n\t\"core.proxy_http\": {},\n\t\"core.proxy_https\": {},\n\t\"core.proxy_ignore_hosts\": {},\n\t\"core.shutdown_timeout\": {Type: config.Int64, Default: \"5\"},\n\t\"core.trust_password\": {Hidden: true, Setter: passwordSetter},\n\t\"core.trust_ca_certificates\": {Type: config.Bool},\n\t\"candid.api.key\": {},\n\t\"candid.api.url\": {},\n\t\"candid.domains\": {},\n\t\"candid.expiry\": {Type: config.Int64, Default: \"3600\"},\n\t\"images.auto_update_cached\": {Type: config.Bool, Default: \"true\"},\n\t\"images.auto_update_interval\": {Type: config.Int64, Default: \"6\"},\n\t\"images.compression_algorithm\": {Default: \"gzip\", Validator: validate.IsCompressionAlgorithm},\n\t\"images.default_architecture\": {Validator: validate.IsArchitecture},\n\t\"images.remote_cache_expiry\": {Type: config.Int64, Default: \"10\"},\n\t\"maas.api.key\": {},\n\t\"maas.api.url\": {},\n\t\"rbac.agent.url\": {},\n\t\"rbac.agent.username\": {},\n\t\"rbac.agent.private_key\": {},\n\t\"rbac.agent.public_key\": {},\n\t\"rbac.api.expiry\": {Type: config.Int64, Default: \"3600\"},\n\t\"rbac.api.key\": {},\n\t\"rbac.api.url\": {},\n\t\"rbac.expiry\": {Type: config.Int64, Default: \"3600\"},\n\n\t\/\/ Keys deprecated since the implementation of the storage api.\n\t\"storage.lvm_fstype\": {Setter: deprecatedStorage, Default: \"ext4\"},\n\t\"storage.lvm_mount_options\": {Setter: deprecatedStorage, Default: \"discard\"},\n\t\"storage.lvm_thinpool_name\": {Setter: deprecatedStorage, Default: \"LXDThinPool\"},\n\t\"storage.lvm_vg_name\": {Setter: deprecatedStorage},\n\t\"storage.lvm_volume_size\": {Setter: deprecatedStorage, Default: \"10GiB\"},\n\t\"storage.zfs_pool_name\": {Setter: deprecatedStorage},\n\t\"storage.zfs_remove_snapshots\": {Setter: deprecatedStorage, Type: config.Bool},\n\t\"storage.zfs_use_refquota\": {Setter: deprecatedStorage, Type: config.Bool},\n\n\t\/\/ OVN networking global keys.\n\t\"network.ovn.integration_bridge\": {Default: \"br-int\"},\n\t\"network.ovn.northbound_connection\": {Default: \"unix:\/var\/run\/ovn\/ovnnb_db.sock\"},\n}\n\nfunc offlineThresholdDefault() string {\n\treturn strconv.Itoa(db.DefaultOfflineThreshold)\n}\n\nfunc offlineThresholdValidator(value string) error {\n\tminThreshold := 10\n\n\t\/\/ Ensure that the given value is greater than the heartbeat interval,\n\t\/\/ which is the lower bound granularity of the offline check.\n\tthreshold, err := strconv.Atoi(value)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Offline threshold is not a number\")\n\t}\n\n\tif threshold <= minThreshold {\n\t\treturn fmt.Errorf(\"Value must be greater than '%d'\", minThreshold)\n\t}\n\n\treturn nil\n}\n\nfunc imageMinimalReplicaValidator(value string) error {\n\tcount, err := strconv.Atoi(value)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Minimal image replica count is not a number\")\n\t}\n\n\tif count < 1 && count != -1 {\n\t\treturn fmt.Errorf(\"Invalid value for image replica count\")\n\t}\n\n\treturn nil\n}\n\nfunc maxVotersValidator(value string) error {\n\tn, err := strconv.Atoi(value)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Value is not a number\")\n\t}\n\n\tif n < 3 || n%2 != 1 {\n\t\treturn fmt.Errorf(\"Value must be an odd number equal to or higher than 3\")\n\t}\n\n\treturn nil\n}\n\nfunc maxStandByValidator(value string) error {\n\tn, err := strconv.Atoi(value)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Value is not a number\")\n\t}\n\n\tif n < 0 || n > 5 {\n\t\treturn fmt.Errorf(\"Value must be between 0 and 5\")\n\t}\n\n\treturn nil\n}\n\nfunc passwordSetter(value string) (string, error) {\n\t\/\/ Nothing to do on unset\n\tif value == \"\" {\n\t\treturn value, nil\n\t}\n\n\t\/\/ Hash the password\n\tbuf := make([]byte, 32)\n\t_, err := io.ReadFull(rand.Reader, buf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\thash, err := scrypt.Key([]byte(value), buf, 1<<14, 8, 1, 64)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbuf = append(buf, hash...)\n\tvalue = hex.EncodeToString(buf)\n\n\treturn value, nil\n}\n\nfunc deprecatedStorage(value string) (string, error) {\n\tif value == \"\" {\n\t\treturn \"\", nil\n\t}\n\treturn \"\", fmt.Errorf(\"deprecated: use storage pool configuration\")\n}\nlxd\/cluster\/config: Wraps images.default_architecture with validate.Optional due to IsOneOf change in IsArchitecturepackage cluster\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/crypto\/scrypt\"\n\n\t\"github.com\/lxc\/lxd\/lxd\/config\"\n\t\"github.com\/lxc\/lxd\/lxd\/db\"\n\t\"github.com\/lxc\/lxd\/shared\/validate\"\n)\n\n\/\/ Config holds cluster-wide configuration values.\ntype Config struct {\n\ttx *db.ClusterTx \/\/ DB transaction the values in this config are bound to.\n\tm config.Map \/\/ Low-level map holding the config values.\n}\n\n\/\/ ConfigLoad loads a new Config object with the current cluster configuration\n\/\/ values fetched from the database.\nfunc ConfigLoad(tx *db.ClusterTx) (*Config, error) {\n\t\/\/ Load current raw values from the database, any error is fatal.\n\tvalues, err := tx.Config()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot fetch node config from database: %v\", err)\n\t}\n\n\tm, err := config.SafeLoad(ConfigSchema, values)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load node config: %v\", err)\n\t}\n\n\treturn &Config{tx: tx, m: m}, nil\n}\n\n\/\/ HTTPSAllowedHeaders returns the relevant CORS setting.\nfunc (c *Config) HTTPSAllowedHeaders() string {\n\treturn c.m.GetString(\"core.https_allowed_headers\")\n}\n\n\/\/ HTTPSAllowedMethods returns the relevant CORS setting.\nfunc (c *Config) HTTPSAllowedMethods() string {\n\treturn c.m.GetString(\"core.https_allowed_methods\")\n}\n\n\/\/ HTTPSAllowedOrigin returns the relevant CORS setting.\nfunc (c *Config) HTTPSAllowedOrigin() string {\n\treturn c.m.GetString(\"core.https_allowed_origin\")\n}\n\n\/\/ HTTPSAllowedCredentials returns the relevant CORS setting.\nfunc (c *Config) HTTPSAllowedCredentials() bool {\n\treturn c.m.GetBool(\"core.https_allowed_credentials\")\n}\n\n\/\/ TrustPassword returns the LXD trust password for authenticating clients.\nfunc (c *Config) TrustPassword() string {\n\treturn c.m.GetString(\"core.trust_password\")\n}\n\n\/\/ TrustCACertificates returns whether client certificates are checked\n\/\/ against a CA.\nfunc (c *Config) TrustCACertificates() bool {\n\treturn c.m.GetBool(\"core.trust_ca_certificates\")\n}\n\n\/\/ CandidServer returns all the Candid settings needed to connect to a server.\nfunc (c *Config) CandidServer() (string, string, int64, string) {\n\treturn c.m.GetString(\"candid.api.url\"),\n\t\tc.m.GetString(\"candid.api.key\"),\n\t\tc.m.GetInt64(\"candid.expiry\"),\n\t\tc.m.GetString(\"candid.domains\")\n}\n\n\/\/ RBACServer returns all the Candid settings needed to connect to a server.\nfunc (c *Config) RBACServer() (string, string, int64, string, string, string, string) {\n\treturn c.m.GetString(\"rbac.api.url\"),\n\t\tc.m.GetString(\"rbac.api.key\"),\n\t\tc.m.GetInt64(\"rbac.expiry\"),\n\t\tc.m.GetString(\"rbac.agent.url\"),\n\t\tc.m.GetString(\"rbac.agent.username\"),\n\t\tc.m.GetString(\"rbac.agent.private_key\"),\n\t\tc.m.GetString(\"rbac.agent.public_key\")\n}\n\n\/\/ ProxyHTTPS returns the configured HTTPS proxy, if any.\nfunc (c *Config) ProxyHTTPS() string {\n\treturn c.m.GetString(\"core.proxy_https\")\n}\n\n\/\/ ProxyHTTP returns the configured HTTP proxy, if any.\nfunc (c *Config) ProxyHTTP() string {\n\treturn c.m.GetString(\"core.proxy_http\")\n}\n\n\/\/ ProxyIgnoreHosts returns the configured ignore-hosts proxy setting, if any.\nfunc (c *Config) ProxyIgnoreHosts() string {\n\treturn c.m.GetString(\"core.proxy_ignore_hosts\")\n}\n\n\/\/ HTTPSTrustedProxy returns the configured HTTPS trusted proxy setting, if any.\nfunc (c *Config) HTTPSTrustedProxy() string {\n\treturn c.m.GetString(\"core.https_trusted_proxy\")\n}\n\n\/\/ MAASController the configured MAAS url and key, if any.\nfunc (c *Config) MAASController() (string, string) {\n\turl := c.m.GetString(\"maas.api.url\")\n\tkey := c.m.GetString(\"maas.api.key\")\n\treturn url, key\n}\n\n\/\/ OfflineThreshold returns the configured heartbeat threshold, i.e. the\n\/\/ number of seconds before after which an unresponsive node is considered\n\/\/ offline..\nfunc (c *Config) OfflineThreshold() time.Duration {\n\tn := c.m.GetInt64(\"cluster.offline_threshold\")\n\treturn time.Duration(n) * time.Second\n}\n\n\/\/ ImagesMinimalReplica returns the numbers of nodes for cluster images replication\nfunc (c *Config) ImagesMinimalReplica() int64 {\n\treturn c.m.GetInt64(\"cluster.images_minimal_replica\")\n}\n\n\/\/ MaxVoters returns the maximum number of members in a cluster that will be\n\/\/ assigned the voter role.\nfunc (c *Config) MaxVoters() int64 {\n\treturn c.m.GetInt64(\"cluster.max_voters\")\n}\n\n\/\/ MaxStandBy returns the maximum number of standby members in a cluster that\n\/\/ will be assigned the stand-by role.\nfunc (c *Config) MaxStandBy() int64 {\n\treturn c.m.GetInt64(\"cluster.max_standby\")\n}\n\n\/\/ ShutdownTimeout returns the number of minutes to wait for running operation to complete\n\/\/ before LXD server shut down\nfunc (c *Config) ShutdownTimeout() time.Duration {\n\tn := c.m.GetInt64(\"core.shutdown_timeout\")\n\treturn time.Duration(n) * time.Minute\n}\n\n\/\/ Dump current configuration keys and their values. Keys with values matching\n\/\/ their defaults are omitted.\nfunc (c *Config) Dump() map[string]interface{} {\n\treturn c.m.Dump()\n}\n\n\/\/ Replace the current configuration with the given values.\n\/\/\n\/\/ Return what has actually changed.\nfunc (c *Config) Replace(values map[string]interface{}) (map[string]string, error) {\n\treturn c.update(values)\n}\n\n\/\/ Patch changes only the configuration keys in the given map.\n\/\/\n\/\/ Return what has actually changed.\nfunc (c *Config) Patch(patch map[string]interface{}) (map[string]string, error) {\n\tvalues := c.Dump() \/\/ Use current values as defaults\n\tfor name, value := range patch {\n\t\tvalues[name] = value\n\t}\n\treturn c.update(values)\n}\n\nfunc (c *Config) update(values map[string]interface{}) (map[string]string, error) {\n\tchanged, err := c.m.Change(values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = c.tx.UpdateConfig(changed)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot persist configuration changes: %v\")\n\t}\n\n\treturn changed, nil\n}\n\n\/\/ ConfigGetString is a convenience for loading the cluster configuration and\n\/\/ returning the value of a particular key.\n\/\/\n\/\/ It's a deprecated API meant to be used by call sites that are not\n\/\/ interacting with the database in a transactional way.\nfunc ConfigGetString(cluster *db.Cluster, key string) (string, error) {\n\tconfig, err := configGet(cluster)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn config.m.GetString(key), nil\n}\n\n\/\/ ConfigGetBool is a convenience for loading the cluster configuration and\n\/\/ returning the value of a particular boolean key.\n\/\/\n\/\/ It's a deprecated API meant to be used by call sites that are not\n\/\/ interacting with the database in a transactional way.\nfunc ConfigGetBool(cluster *db.Cluster, key string) (bool, error) {\n\tconfig, err := configGet(cluster)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn config.m.GetBool(key), nil\n}\n\n\/\/ ConfigGetInt64 is a convenience for loading the cluster configuration and\n\/\/ returning the value of a particular key.\n\/\/\n\/\/ It's a deprecated API meant to be used by call sites that are not\n\/\/ interacting with the database in a transactional way.\nfunc ConfigGetInt64(cluster *db.Cluster, key string) (int64, error) {\n\tconfig, err := configGet(cluster)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn config.m.GetInt64(key), nil\n}\n\nfunc configGet(cluster *db.Cluster) (*Config, error) {\n\tvar config *Config\n\terr := cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\tvar err error\n\t\tconfig, err = ConfigLoad(tx)\n\t\treturn err\n\t})\n\treturn config, err\n}\n\n\/\/ ConfigSchema defines available server configuration keys.\nvar ConfigSchema = config.Schema{\n\t\"backups.compression_algorithm\": {Default: \"gzip\", Validator: validate.IsCompressionAlgorithm},\n\t\"cluster.offline_threshold\": {Type: config.Int64, Default: offlineThresholdDefault(), Validator: offlineThresholdValidator},\n\t\"cluster.images_minimal_replica\": {Type: config.Int64, Default: \"3\", Validator: imageMinimalReplicaValidator},\n\t\"cluster.max_voters\": {Type: config.Int64, Default: \"3\", Validator: maxVotersValidator},\n\t\"cluster.max_standby\": {Type: config.Int64, Default: \"2\", Validator: maxStandByValidator},\n\t\"core.https_allowed_headers\": {},\n\t\"core.https_allowed_methods\": {},\n\t\"core.https_allowed_origin\": {},\n\t\"core.https_allowed_credentials\": {Type: config.Bool},\n\t\"core.https_trusted_proxy\": {},\n\t\"core.proxy_http\": {},\n\t\"core.proxy_https\": {},\n\t\"core.proxy_ignore_hosts\": {},\n\t\"core.shutdown_timeout\": {Type: config.Int64, Default: \"5\"},\n\t\"core.trust_password\": {Hidden: true, Setter: passwordSetter},\n\t\"core.trust_ca_certificates\": {Type: config.Bool},\n\t\"candid.api.key\": {},\n\t\"candid.api.url\": {},\n\t\"candid.domains\": {},\n\t\"candid.expiry\": {Type: config.Int64, Default: \"3600\"},\n\t\"images.auto_update_cached\": {Type: config.Bool, Default: \"true\"},\n\t\"images.auto_update_interval\": {Type: config.Int64, Default: \"6\"},\n\t\"images.compression_algorithm\": {Default: \"gzip\", Validator: validate.IsCompressionAlgorithm},\n\t\"images.default_architecture\": {Validator: validate.Optional(validate.IsArchitecture)},\n\t\"images.remote_cache_expiry\": {Type: config.Int64, Default: \"10\"},\n\t\"maas.api.key\": {},\n\t\"maas.api.url\": {},\n\t\"rbac.agent.url\": {},\n\t\"rbac.agent.username\": {},\n\t\"rbac.agent.private_key\": {},\n\t\"rbac.agent.public_key\": {},\n\t\"rbac.api.expiry\": {Type: config.Int64, Default: \"3600\"},\n\t\"rbac.api.key\": {},\n\t\"rbac.api.url\": {},\n\t\"rbac.expiry\": {Type: config.Int64, Default: \"3600\"},\n\n\t\/\/ Keys deprecated since the implementation of the storage api.\n\t\"storage.lvm_fstype\": {Setter: deprecatedStorage, Default: \"ext4\"},\n\t\"storage.lvm_mount_options\": {Setter: deprecatedStorage, Default: \"discard\"},\n\t\"storage.lvm_thinpool_name\": {Setter: deprecatedStorage, Default: \"LXDThinPool\"},\n\t\"storage.lvm_vg_name\": {Setter: deprecatedStorage},\n\t\"storage.lvm_volume_size\": {Setter: deprecatedStorage, Default: \"10GiB\"},\n\t\"storage.zfs_pool_name\": {Setter: deprecatedStorage},\n\t\"storage.zfs_remove_snapshots\": {Setter: deprecatedStorage, Type: config.Bool},\n\t\"storage.zfs_use_refquota\": {Setter: deprecatedStorage, Type: config.Bool},\n\n\t\/\/ OVN networking global keys.\n\t\"network.ovn.integration_bridge\": {Default: \"br-int\"},\n\t\"network.ovn.northbound_connection\": {Default: \"unix:\/var\/run\/ovn\/ovnnb_db.sock\"},\n}\n\nfunc offlineThresholdDefault() string {\n\treturn strconv.Itoa(db.DefaultOfflineThreshold)\n}\n\nfunc offlineThresholdValidator(value string) error {\n\tminThreshold := 10\n\n\t\/\/ Ensure that the given value is greater than the heartbeat interval,\n\t\/\/ which is the lower bound granularity of the offline check.\n\tthreshold, err := strconv.Atoi(value)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Offline threshold is not a number\")\n\t}\n\n\tif threshold <= minThreshold {\n\t\treturn fmt.Errorf(\"Value must be greater than '%d'\", minThreshold)\n\t}\n\n\treturn nil\n}\n\nfunc imageMinimalReplicaValidator(value string) error {\n\tcount, err := strconv.Atoi(value)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Minimal image replica count is not a number\")\n\t}\n\n\tif count < 1 && count != -1 {\n\t\treturn fmt.Errorf(\"Invalid value for image replica count\")\n\t}\n\n\treturn nil\n}\n\nfunc maxVotersValidator(value string) error {\n\tn, err := strconv.Atoi(value)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Value is not a number\")\n\t}\n\n\tif n < 3 || n%2 != 1 {\n\t\treturn fmt.Errorf(\"Value must be an odd number equal to or higher than 3\")\n\t}\n\n\treturn nil\n}\n\nfunc maxStandByValidator(value string) error {\n\tn, err := strconv.Atoi(value)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Value is not a number\")\n\t}\n\n\tif n < 0 || n > 5 {\n\t\treturn fmt.Errorf(\"Value must be between 0 and 5\")\n\t}\n\n\treturn nil\n}\n\nfunc passwordSetter(value string) (string, error) {\n\t\/\/ Nothing to do on unset\n\tif value == \"\" {\n\t\treturn value, nil\n\t}\n\n\t\/\/ Hash the password\n\tbuf := make([]byte, 32)\n\t_, err := io.ReadFull(rand.Reader, buf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\thash, err := scrypt.Key([]byte(value), buf, 1<<14, 8, 1, 64)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbuf = append(buf, hash...)\n\tvalue = hex.EncodeToString(buf)\n\n\treturn value, nil\n}\n\nfunc deprecatedStorage(value string) (string, error) {\n\tif value == \"\" {\n\t\treturn \"\", nil\n\t}\n\treturn \"\", fmt.Errorf(\"deprecated: use storage pool configuration\")\n}\n<|endoftext|>"} {"text":"package cluster\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"gopkg.in\/inconshreveable\/log15.v2\"\n\n\t\"github.com\/lxc\/lxd\/client\"\n\t\"github.com\/lxc\/lxd\/lxd\/db\"\n\t\"github.com\/lxc\/lxd\/lxd\/endpoints\"\n\t\"github.com\/lxc\/lxd\/lxd\/revert\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\t\"github.com\/lxc\/lxd\/shared\/logger\"\n)\n\n\/\/ eventHubMinHosts is the minimum number of members that must have the event-hub role to trigger switching into\n\/\/ event-hub mode (where cluster members will only connect to event-hub members rather than all members when\n\/\/ operating in the normal full-mesh mode).\nconst eventHubMinHosts = 2\n\n\/\/ EventMode indicates the event distribution mode.\ntype EventMode string\n\n\/\/ EventModeFullMesh is when every cluster member connects to every other cluster member to pull events.\nconst EventModeFullMesh EventMode = \"full-mesh\"\n\n\/\/ EventModeHubServer is when the cluster is operating in event-hub mode and this server is designated as a hub\n\/\/ server, meaning that it will only connect to the other event-hub members and not other members.\nconst EventModeHubServer EventMode = \"hub-server\"\n\n\/\/ EventModeHubClient is when the cluster is operating in event-hub mode and this member is designated as a hub\n\/\/ client, meaning that it is expected to connect to the event-hub members.\nconst EventModeHubClient EventMode = \"hub-client\"\n\nvar listeners = map[string]*lxd.EventListener{}\nvar listenersNotify = map[chan struct{}][]string{}\nvar listenersLock sync.Mutex\nvar listenersUpdateLock sync.Mutex\n\n\/\/ ServerEventMode returns the event distribution mode that this local server is operating in.\nfunc ServerEventMode() EventMode {\n\tlistenersLock.Lock()\n\tdefer listenersLock.Unlock()\n\n\treturn eventMode\n}\n\n\/\/ RoleInSlice returns whether or not the rule is within the roles list.\nfunc RoleInSlice(role db.ClusterRole, roles []db.ClusterRole) bool {\n\tfor _, r := range roles {\n\t\tif r == role {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/ EventListenerWait waits for there to be listener connected to the specified address, or one of the event hubs\n\/\/ if operating in event hub mode.\nfunc EventListenerWait(ctx context.Context, address string) error {\n\t\/\/ Check if there is already a listener.\n\tlistenersLock.Lock()\n\tlistener, found := listeners[address]\n\tif found && listener.IsActive() {\n\t\tlistenersLock.Unlock()\n\t\treturn nil\n\t}\n\n\tlistenAddresses := []string{address}\n\n\t\/\/ If not setup a notification for when the desired address or any of the event hubs connect.\n\tconnected := make(chan struct{})\n\tlistenersNotify[connected] = listenAddresses\n\tlistenersLock.Unlock()\n\n\tdefer func() {\n\t\tlistenersLock.Lock()\n\t\tdelete(listenersNotify, connected)\n\t\tlistenersLock.Unlock()\n\t}()\n\n\t\/\/ Wait for the connected channel to be closed (indicating a new listener has been connected), and return.\n\tselect {\n\tcase <-connected:\n\t\treturn nil\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}\n\n\/\/ EventsUpdateListeners refreshes the cluster event listener connections.\nfunc EventsUpdateListeners(endpoints *endpoints.Endpoints, cluster *db.Cluster, serverCert func() *shared.CertInfo, members map[int64]APIHeartbeatMember, f func(int64, api.Event)) {\n\tlistenersUpdateLock.Lock()\n\tdefer listenersUpdateLock.Unlock()\n\n\t\/\/ If no heartbeat members provided, populate from global database.\n\tif members == nil {\n\t\tvar dbMembers []db.NodeInfo\n\t\tvar offlineThreshold time.Duration\n\n\t\terr := cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\t\tvar err error\n\n\t\t\tdbMembers, err = tx.GetNodes()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tofflineThreshold, err = tx.GetNodeOfflineThreshold()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\tlogger.Warn(\"Failed to get current cluster members\", log.Ctx{\"err\": err})\n\t\t\treturn\n\t\t}\n\n\t\tmembers = make(map[int64]APIHeartbeatMember, len(dbMembers))\n\t\tfor _, dbMember := range dbMembers {\n\t\t\tmembers[dbMember.ID] = APIHeartbeatMember{\n\t\t\t\tID: dbMember.ID,\n\t\t\t\tName: dbMember.Name,\n\t\t\t\tAddress: dbMember.Address,\n\t\t\t\tLastHeartbeat: dbMember.Heartbeat,\n\t\t\t\tOnline: !dbMember.IsOffline(offlineThreshold),\n\t\t\t\tRoles: dbMember.Roles,\n\t\t\t}\n\t\t}\n\t}\n\n\tnetworkAddress := endpoints.NetworkAddress()\n\n\tkeepListeners := make(map[string]struct{})\n\twg := sync.WaitGroup{}\n\tfor _, member := range members {\n\t\t\/\/ Don't bother trying to connect to ourselves or offline members.\n\t\tif member.Address == networkAddress || !member.Online {\n\t\t\tcontinue\n\t\t}\n\n\t\tlistenersLock.Lock()\n\t\tlistener, ok := listeners[member.Address]\n\n\t\t\/\/ If the member already has a listener associated to it, check that the listener is still active.\n\t\t\/\/ If it is, just move on to next member, but if not then we'll try to connect again.\n\t\tif ok {\n\t\t\tif listener.IsActive() {\n\t\t\t\tkeepListeners[member.Address] = struct{}{} \/\/ Add to current listeners list.\n\t\t\t\tlistenersLock.Unlock()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Disconnect and delete listener, but don't delete any listenersNotify entry as there\n\t\t\t\/\/ might be something waiting for a future connection.\n\t\t\tlistener.Disconnect()\n\t\t\tdelete(listeners, member.Address)\n\t\t\tlogger.Info(\"Removed inactive member event listener client\", log.Ctx{\"local\": networkAddress, \"remote\": member.Address})\n\t\t}\n\t\tlistenersLock.Unlock()\n\n\t\tkeepListeners[member.Address] = struct{}{} \/\/ Add to current listeners list.\n\n\t\t\/\/ Connect to remote concurrently and add to active listeners if successful.\n\t\twg.Add(1)\n\t\tgo func(m APIHeartbeatMember) {\n\t\t\tdefer wg.Done()\n\t\t\tlistener, err := eventsConnect(m.Address, endpoints.NetworkCert(), serverCert())\n\t\t\tif err != nil {\n\t\t\t\tlogger.Warn(\"Failed adding member event listener client\", log.Ctx{\"local\": networkAddress, \"remote\": m.Address, \"err\": err})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlistener.AddHandler(nil, func(event api.Event) { f(m.ID, event) })\n\n\t\t\tlistenersLock.Lock()\n\t\t\tlisteners[m.Address] = listener\n\n\t\t\t\/\/ Indicate to any notifiers waiting for this member's address that it is connected.\n\t\t\tfor connected, notifyAddresses := range listenersNotify {\n\t\t\t\tif shared.StringInSlice(m.Address, notifyAddresses) {\n\t\t\t\t\tclose(connected)\n\t\t\t\t\tdelete(listenersNotify, connected)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlogger.Info(\"Added member event listener client\", log.Ctx{\"local\": networkAddress, \"remote\": m.Address})\n\t\t\tlistenersLock.Unlock()\n\t\t}(member)\n\t}\n\n\twg.Wait()\n\n\t\/\/ Disconnect and delete any out of date listeners and their notifiers.\n\tlistenersLock.Lock()\n\tfor address, listener := range listeners {\n\t\tif _, found := keepListeners[address]; !found {\n\t\t\tlistener.Disconnect()\n\t\t\tdelete(listeners, address)\n\t\t\tlogger.Info(\"Removed old member event listener client\", log.Ctx{\"local\": networkAddress, \"remote\": address})\n\t\t}\n\t}\n\tlistenersLock.Unlock()\n}\n\n\/\/ Establish a client connection to get events from the given node.\nfunc eventsConnect(address string, networkCert *shared.CertInfo, serverCert *shared.CertInfo) (*lxd.EventListener, error) {\n\tclient, err := Connect(address, networkCert, serverCert, nil, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert := revert.New()\n\trevert.Add(func() {\n\t\tclient.Disconnect()\n\t})\n\n\tlistener, err := client.GetEventsAllProjects()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert.Success()\n\treturn listener, nil\n}\nlxd\/cluster\/events: Updates EventsUpdateListeners to only connect to event-hub serverspackage cluster\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"gopkg.in\/inconshreveable\/log15.v2\"\n\n\t\"github.com\/lxc\/lxd\/client\"\n\t\"github.com\/lxc\/lxd\/lxd\/db\"\n\t\"github.com\/lxc\/lxd\/lxd\/endpoints\"\n\t\"github.com\/lxc\/lxd\/lxd\/revert\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\t\"github.com\/lxc\/lxd\/shared\/logger\"\n)\n\n\/\/ eventHubMinHosts is the minimum number of members that must have the event-hub role to trigger switching into\n\/\/ event-hub mode (where cluster members will only connect to event-hub members rather than all members when\n\/\/ operating in the normal full-mesh mode).\nconst eventHubMinHosts = 2\n\n\/\/ EventMode indicates the event distribution mode.\ntype EventMode string\n\n\/\/ EventModeFullMesh is when every cluster member connects to every other cluster member to pull events.\nconst EventModeFullMesh EventMode = \"full-mesh\"\n\n\/\/ EventModeHubServer is when the cluster is operating in event-hub mode and this server is designated as a hub\n\/\/ server, meaning that it will only connect to the other event-hub members and not other members.\nconst EventModeHubServer EventMode = \"hub-server\"\n\n\/\/ EventModeHubClient is when the cluster is operating in event-hub mode and this member is designated as a hub\n\/\/ client, meaning that it is expected to connect to the event-hub members.\nconst EventModeHubClient EventMode = \"hub-client\"\n\nvar eventMode EventMode = EventModeFullMesh\nvar eventHubAddresses []string\nvar listeners = map[string]*lxd.EventListener{}\nvar listenersNotify = map[chan struct{}][]string{}\nvar listenersLock sync.Mutex\nvar listenersUpdateLock sync.Mutex\n\n\/\/ ServerEventMode returns the event distribution mode that this local server is operating in.\nfunc ServerEventMode() EventMode {\n\tlistenersLock.Lock()\n\tdefer listenersLock.Unlock()\n\n\treturn eventMode\n}\n\n\/\/ RoleInSlice returns whether or not the rule is within the roles list.\nfunc RoleInSlice(role db.ClusterRole, roles []db.ClusterRole) bool {\n\tfor _, r := range roles {\n\t\tif r == role {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n\/\/ EventListenerWait waits for there to be listener connected to the specified address, or one of the event hubs\n\/\/ if operating in event hub mode.\nfunc EventListenerWait(ctx context.Context, address string) error {\n\t\/\/ Check if there is already a listener.\n\tlistenersLock.Lock()\n\tlistener, found := listeners[address]\n\tif found && listener.IsActive() {\n\t\tlistenersLock.Unlock()\n\t\treturn nil\n\t}\n\n\tlistenAddresses := []string{address}\n\n\t\/\/ If not setup a notification for when the desired address or any of the event hubs connect.\n\tconnected := make(chan struct{})\n\tlistenersNotify[connected] = listenAddresses\n\tlistenersLock.Unlock()\n\n\tdefer func() {\n\t\tlistenersLock.Lock()\n\t\tdelete(listenersNotify, connected)\n\t\tlistenersLock.Unlock()\n\t}()\n\n\t\/\/ Wait for the connected channel to be closed (indicating a new listener has been connected), and return.\n\tselect {\n\tcase <-connected:\n\t\treturn nil\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}\n\n\/\/ hubAddresses returns the addresses of members with event-hub role, and the event mode of the server.\n\/\/ The event mode will only be hub-server or hub-client if at least eventHubMinHosts have an event-hub role.\n\/\/ Otherwise the mode will be full-mesh.\nfunc hubAddresses(localAddress string, members map[int64]APIHeartbeatMember) ([]string, EventMode) {\n\tvar hubAddresses []string\n\tvar localHasHubRole bool\n\n\t\/\/ Do a first pass of members to count the members with event-hub role, and whether we are a hub server.\n\tfor _, member := range members {\n\t\tif RoleInSlice(db.ClusterRoleEventHub, member.Roles) {\n\t\t\thubAddresses = append(hubAddresses, member.Address)\n\n\t\t\tif member.Address == localAddress {\n\t\t\t\tlocalHasHubRole = true\n\t\t\t}\n\t\t}\n\t}\n\n\teventMode := EventModeFullMesh\n\tif len(hubAddresses) >= eventHubMinHosts {\n\t\tif localHasHubRole {\n\t\t\teventMode = EventModeHubServer\n\t\t} else {\n\t\t\teventMode = EventModeHubClient\n\t\t}\n\t}\n\n\treturn hubAddresses, eventMode\n}\n\n\/\/ EventsUpdateListeners refreshes the cluster event listener connections.\nfunc EventsUpdateListeners(endpoints *endpoints.Endpoints, cluster *db.Cluster, serverCert func() *shared.CertInfo, members map[int64]APIHeartbeatMember, f func(int64, api.Event)) {\n\tlistenersUpdateLock.Lock()\n\tdefer listenersUpdateLock.Unlock()\n\n\t\/\/ If no heartbeat members provided, populate from global database.\n\tif members == nil {\n\t\tvar dbMembers []db.NodeInfo\n\t\tvar offlineThreshold time.Duration\n\n\t\terr := cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\t\tvar err error\n\n\t\t\tdbMembers, err = tx.GetNodes()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tofflineThreshold, err = tx.GetNodeOfflineThreshold()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\tlogger.Warn(\"Failed to get current cluster members\", log.Ctx{\"err\": err})\n\t\t\treturn\n\t\t}\n\n\t\tmembers = make(map[int64]APIHeartbeatMember, len(dbMembers))\n\t\tfor _, dbMember := range dbMembers {\n\t\t\tmembers[dbMember.ID] = APIHeartbeatMember{\n\t\t\t\tID: dbMember.ID,\n\t\t\t\tName: dbMember.Name,\n\t\t\t\tAddress: dbMember.Address,\n\t\t\t\tLastHeartbeat: dbMember.Heartbeat,\n\t\t\t\tOnline: !dbMember.IsOffline(offlineThreshold),\n\t\t\t\tRoles: dbMember.Roles,\n\t\t\t}\n\t\t}\n\t}\n\n\tnetworkAddress := endpoints.NetworkAddress()\n\thubAddresses, localEventMode := hubAddresses(networkAddress, members)\n\n\t\/\/ Store event hub addresses in global slice.\n\tlistenersLock.Lock()\n\teventHubAddresses = hubAddresses\n\teventMode = localEventMode\n\tlistenersLock.Unlock()\n\n\tkeepListeners := make(map[string]struct{})\n\twg := sync.WaitGroup{}\n\tfor _, member := range members {\n\t\t\/\/ Don't bother trying to connect to ourselves or offline members.\n\t\tif member.Address == networkAddress || !member.Online {\n\t\t\tcontinue\n\t\t}\n\n\t\tif localEventMode != EventModeFullMesh && !RoleInSlice(db.ClusterRoleEventHub, member.Roles) {\n\t\t\tcontinue \/\/ Skip non-event-hub members if we are operating in event-hub mode.\n\t\t}\n\n\t\tlistenersLock.Lock()\n\t\tlistener, ok := listeners[member.Address]\n\n\t\t\/\/ If the member already has a listener associated to it, check that the listener is still active.\n\t\t\/\/ If it is, just move on to next member, but if not then we'll try to connect again.\n\t\tif ok {\n\t\t\tif listener.IsActive() {\n\t\t\t\tkeepListeners[member.Address] = struct{}{} \/\/ Add to current listeners list.\n\t\t\t\tlistenersLock.Unlock()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Disconnect and delete listener, but don't delete any listenersNotify entry as there\n\t\t\t\/\/ might be something waiting for a future connection.\n\t\t\tlistener.Disconnect()\n\t\t\tdelete(listeners, member.Address)\n\t\t\tlogger.Info(\"Removed inactive member event listener client\", log.Ctx{\"local\": networkAddress, \"remote\": member.Address})\n\t\t}\n\t\tlistenersLock.Unlock()\n\n\t\tkeepListeners[member.Address] = struct{}{} \/\/ Add to current listeners list.\n\n\t\t\/\/ Connect to remote concurrently and add to active listeners if successful.\n\t\twg.Add(1)\n\t\tgo func(m APIHeartbeatMember) {\n\t\t\tdefer wg.Done()\n\t\t\tlistener, err := eventsConnect(m.Address, endpoints.NetworkCert(), serverCert())\n\t\t\tif err != nil {\n\t\t\t\tlogger.Warn(\"Failed adding member event listener client\", log.Ctx{\"local\": networkAddress, \"remote\": m.Address, \"err\": err})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlistener.AddHandler(nil, func(event api.Event) { f(m.ID, event) })\n\n\t\t\tlistenersLock.Lock()\n\t\t\tlisteners[m.Address] = listener\n\n\t\t\t\/\/ Indicate to any notifiers waiting for this member's address that it is connected.\n\t\t\tfor connected, notifyAddresses := range listenersNotify {\n\t\t\t\tif shared.StringInSlice(m.Address, notifyAddresses) {\n\t\t\t\t\tclose(connected)\n\t\t\t\t\tdelete(listenersNotify, connected)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlogger.Info(\"Added member event listener client\", log.Ctx{\"local\": networkAddress, \"remote\": m.Address})\n\t\t\tlistenersLock.Unlock()\n\t\t}(member)\n\t}\n\n\twg.Wait()\n\n\t\/\/ Disconnect and delete any out of date listeners and their notifiers.\n\tlistenersLock.Lock()\n\tfor address, listener := range listeners {\n\t\tif _, found := keepListeners[address]; !found {\n\t\t\tlistener.Disconnect()\n\t\t\tdelete(listeners, address)\n\t\t\tlogger.Info(\"Removed old member event listener client\", log.Ctx{\"local\": networkAddress, \"remote\": address})\n\t\t}\n\t}\n\tlistenersLock.Unlock()\n}\n\n\/\/ Establish a client connection to get events from the given node.\nfunc eventsConnect(address string, networkCert *shared.CertInfo, serverCert *shared.CertInfo) (*lxd.EventListener, error) {\n\tclient, err := Connect(address, networkCert, serverCert, nil, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert := revert.New()\n\trevert.Add(func() {\n\t\tclient.Disconnect()\n\t})\n\n\tlistener, err := client.GetEventsAllProjects()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert.Success()\n\treturn listener, nil\n}\n<|endoftext|>"} {"text":"package models\n\nimport (\n\t\"database\/sql\"\n\n\t_ \"github.com\/mattn\/go-sqlite3\"\n)\n\ntype Backup struct {\n\tID int `json:\"id\"`\n\tName string `json:\"name\"`\n\tStarted string `json:\"started\"`\n\tFinished string `json:\"finished\"`\n\tDuration string `json:\"duration\"`\n\tStatus string `json:\"status\"`\n}\n\ntype BackupCollection struct {\n\tBackups []Backup `json:\"items\"`\n}\n\nfunc GetBackups(db *sql.DB) BackupCollection {\n\tsql := \"SELECT * FROM backups\"\n\trows, err := db.Query(sql)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t\/\/ Cleanup if exit\n\tdefer rows.Close()\n\n\tresult := BackupCollection{}\n\n\tfor rows.Next() {\n\t\tbackup := Backup{}\n\t\terr2 := rows.Scan(&backup.ID, &backup.Name)\n\n\t\tif err2 != nil {\n\t\t\tpanic(err2)\n\t\t}\n\n\t\tresult.Backups = append(result.Backups, backup)\n\t}\n\treturn result\n}\n\nfunc PutBackup(db *sql.DB, name string, starting string, finished string, duration string, status string) (int64, error) {\n\tsql := \"INSERT INTO backups(name, starting, finished, duration, status) VALUES(?,?,?,?,?)\"\n\n\t\/\/ Create prepared sql statement\n\tstmt, err := db.Prepare(sql)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer stmt.Close()\n\n\tresult, err2 := stmt.Exec(name)\n\n\tif err2 != nil {\n\t\tpanic(err)\n\t}\n\n\treturn result.LastInsertId()\n\n}\n🐛 Fix PutBackup and GetBackup on Backup Modelpackage models\n\nimport (\n\t\"database\/sql\"\n\n\t_ \"github.com\/mattn\/go-sqlite3\"\n)\n\ntype Backup struct {\n\tID int `json:\"id\"`\n\tName string `json:\"name\"`\n\tStarted string `json:\"started\"`\n\tFinished string `json:\"finished\"`\n\tDuration string `json:\"duration\"`\n\tStatus string `json:\"status\"`\n}\n\ntype BackupCollection struct {\n\tBackups []Backup `json:\"items\"`\n}\n\nfunc GetBackups(db *sql.DB) BackupCollection {\n\tsql := \"SELECT * FROM backups\"\n\trows, err := db.Query(sql)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t\/\/ Cleanup if exit\n\tdefer rows.Close()\n\n\tresult := BackupCollection{}\n\n\tfor rows.Next() {\n\t\tbackup := Backup{}\n\t\terr2 := rows.Scan(&backup.ID, &backup.Name, &backup.Started, &backup.Finished, &backup.Duration, &backup.Status)\n\n\t\tif err2 != nil {\n\t\t\tpanic(err2)\n\t\t}\n\n\t\tresult.Backups = append(result.Backups, backup)\n\t}\n\treturn result\n}\n\nfunc PutBackup(db *sql.DB, name string, starting string, finished string, duration string, status string) (int64, error) {\n\tsql := \"INSERT INTO backups(name, starting, finished, duration, status) VALUES(?, ?, ?, ?, ?)\"\n\n\t\/\/ Create prepared sql statement\n\tstmt, err := db.Prepare(sql)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer stmt.Close()\n\n\tresult, err2 := stmt.Exec(name, starting, finished, duration, status)\n\n\tif err2 != nil {\n\t\tpanic(err)\n\t}\n\n\treturn result.LastInsertId()\n\n}\n<|endoftext|>"} {"text":"package models\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Shopify\/sarama\"\n)\n\nvar (\n\tDefaultMessageList *MessageManager\n\tkafkaTopic = \"franz\"\n\tkafkaBroker = \"localhost:9092\"\n)\n\ntype Message struct {\n\tID int64 \/\/ Unique identifier\n\tTitle string \/\/ Description\n\tDone bool \/\/ Is this Message done?\n}\n\n\/\/ NewMessage creates a new message given a title, that can't be empty.\nfunc NewMessage(title string) (*Message, error) {\n\tif title == \"\" {\n\t\treturn nil, fmt.Errorf(\"empty title\")\n\t}\n\treturn &Message{0, title, false}, nil\n}\n\n\/\/ MessageManager manages a list of messages in memory.\ntype MessageManager struct {\n\tmessages []*Message\n\tlastID int64\n}\n\n\/\/ NewMessageManager returns an empty MessageManager.\nfunc NewMessageManager() *MessageManager {\n\treturn &MessageManager{}\n}\n\n\/\/ Save saves the given Message in the MessageManager.\nfunc (m *MessageManager) Save(message *Message) error {\n\tif message.ID == 0 {\n\t\tm.lastID++\n\t\tmessage.ID = m.lastID\n\t\tm.messages = append(m.messages, cloneMessage(message))\n\t\treturn nil\n\t}\n\n\tfor i, t := range m.messages {\n\t\tif t.ID == message.ID {\n\t\t\tm.messages[i] = cloneMessage(message)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"unknown message\")\n}\n\n\/\/ cloneMessage creates and returns a deep copy of the given Message.\nfunc cloneMessage(t *Message) *Message {\n\tc := *t\n\treturn &c\n}\n\n\/\/ All returns the list of all the Messages in the MessageManager.\nfunc (m *MessageManager) All() []*Message {\n\treturn m.messages\n}\n\n\/\/ Find returns the Message with the given id in the MessageManager and a boolean\n\/\/ indicating if the id was found.\nfunc (m *MessageManager) Find(ID int64) (*Message, bool) {\n\tfor _, t := range m.messages {\n\t\tif t.ID == ID {\n\t\t\treturn t, true\n\t\t}\n\t}\n\treturn nil, false\n}\n\nfunc (m *MessageManager) Send(message *Message) error {\n\tkafkaMessage := &sarama.ProducerMessage{\n\t\tTopic: kafkaTopic,\n\t\tKey: nil,\n\t\tValue: sarama.StringEncoder(message.Title),\n\t}\n\n\tkafkaProducer, err := sarama.NewSyncProducer([]string{kafkaBroker}, nil)\n\tif err != nil {\n\t\t\/\/TODO Cosmin\n\t}\n\tdefer func() {\n\t\tif errClose := kafkaProducer.Close(); errClose != nil {\n\t\t\t\/\/TODO Cosmin\n\t\t}\n\t}()\n\n\t_, _, errSend := kafkaProducer.SendMessage(kafkaMessage)\n\treturn errSend\n}\n\nfunc init() {\n\tDefaultMessageList = NewMessageManager()\n}\nadding error handling at message sendingpackage models\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Shopify\/sarama\"\n\t\"github.com\/astaxie\/beego\"\n)\n\nvar (\n\tDefaultMessageList *MessageManager\n\tkafkaTopic = \"franz\"\n\tkafkaBroker = \"localhost:9092\"\n)\n\ntype Message struct {\n\tID int64 \/\/ Unique identifier\n\tTitle string \/\/ Description\n\tDone bool \/\/ Is this Message done?\n}\n\n\/\/ NewMessage creates a new message given a title, that can't be empty.\nfunc NewMessage(title string) (*Message, error) {\n\tif title == \"\" {\n\t\treturn nil, fmt.Errorf(\"empty title\")\n\t}\n\treturn &Message{0, title, false}, nil\n}\n\n\/\/ MessageManager manages a list of messages in memory.\ntype MessageManager struct {\n\tmessages []*Message\n\tlastID int64\n}\n\n\/\/ NewMessageManager returns an empty MessageManager.\nfunc NewMessageManager() *MessageManager {\n\treturn &MessageManager{}\n}\n\n\/\/ Save saves the given Message in the MessageManager.\nfunc (m *MessageManager) Save(message *Message) error {\n\tif message.ID == 0 {\n\t\tm.lastID++\n\t\tmessage.ID = m.lastID\n\t\tm.messages = append(m.messages, cloneMessage(message))\n\t\treturn nil\n\t}\n\n\tfor i, t := range m.messages {\n\t\tif t.ID == message.ID {\n\t\t\tm.messages[i] = cloneMessage(message)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"unknown message\")\n}\n\n\/\/ cloneMessage creates and returns a deep copy of the given Message.\nfunc cloneMessage(t *Message) *Message {\n\tc := *t\n\treturn &c\n}\n\n\/\/ All returns the list of all the Messages in the MessageManager.\nfunc (m *MessageManager) All() []*Message {\n\treturn m.messages\n}\n\n\/\/ Find returns the Message with the given id in the MessageManager and a boolean\n\/\/ indicating if the id was found.\nfunc (m *MessageManager) Find(ID int64) (*Message, bool) {\n\tfor _, t := range m.messages {\n\t\tif t.ID == ID {\n\t\t\treturn t, true\n\t\t}\n\t}\n\treturn nil, false\n}\n\nfunc (m *MessageManager) Send(message *Message) error {\n\tkafkaMessage := &sarama.ProducerMessage{\n\t\tTopic: kafkaTopic,\n\t\tKey: nil,\n\t\tValue: sarama.StringEncoder(message.Title),\n\t}\n\n\tkafkaProducer, err := sarama.NewSyncProducer([]string{kafkaBroker}, nil)\n\tif err != nil {\n\t\tbeego.Error(\"error when creating Kafka SyncProducer\", err)\n\t}\n\tdefer func() {\n\t\tif errClose := kafkaProducer.Close(); errClose != nil {\n\t\t\tbeego.Error(\"error when closing Kafka SyncProducer\", errClose)\n\t\t}\n\t}()\n\n\t_, _, errSend := kafkaProducer.SendMessage(kafkaMessage)\n\treturn errSend\n}\n\nfunc init() {\n\tDefaultMessageList = NewMessageManager()\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2014 Unknwon\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\npackage models\n\nimport (\n\t\"errors\"\n\t\/\/ \"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Unknwon\/com\"\n\n\t\"github.com\/gpmgo\/switch\/modules\/archive\"\n\t\"github.com\/gpmgo\/switch\/modules\/log\"\n\t\"github.com\/gpmgo\/switch\/modules\/qiniu\"\n\t\"github.com\/gpmgo\/switch\/modules\/setting\"\n)\n\nvar (\n\tErrRevisionIsLocal = errors.New(\"Revision archive is in local\")\n\tErrPackageNotExist = errors.New(\"Package does not exist\")\n\tErrRevisionNotExist = errors.New(\"Revision does not exist\")\n)\n\ntype Storage int\n\nconst (\n\tLOCAL Storage = iota\n\tQINIU\n)\n\n\/\/ Revision represents a revision of a Go package.\ntype Revision struct {\n\tId int64\n\tPkgId int64 `xorm:\"UNIQUE(s)\"`\n\tPkg *Package `xorm:\"-\"`\n\tRevision string `xorm:\"UNIQUE(s)\"`\n\tStorage\n\tSize int64\n\tUpdated time.Time `xorm:\"UPDATED\"`\n}\n\nfunc (r *Revision) GetPackage() (err error) {\n\tif r.Pkg != nil {\n\t\treturn nil\n\t}\n\tr.Pkg, err = GetPakcageById(r.PkgId)\n\treturn err\n}\n\n\/\/ KeyName returns QiNiu key name.\nfunc (r *Revision) KeyName() (string, error) {\n\tif r.Storage == LOCAL {\n\t\treturn \"\", ErrRevisionIsLocal\n\t}\n\tif err := r.GetPackage(); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn r.Pkg.ImportPath + \"-\" + r.Revision + archive.GetExtension(r.Pkg.ImportPath), nil\n}\n\n\/\/ GetRevision returns revision by given pakcage ID and revision.\nfunc GetRevision(pkgId int64, rev string) (*Revision, error) {\n\tr := &Revision{\n\t\tPkgId: pkgId,\n\t\tRevision: rev,\n\t}\n\thas, err := x.Get(r)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if !has {\n\t\treturn nil, ErrRevisionNotExist\n\t}\n\treturn r, nil\n}\n\n\/\/ UpdateRevision updates revision information.\nfunc UpdateRevision(rev *Revision) error {\n\t_, err := x.Id(rev.Id).Update(rev)\n\treturn err\n}\n\n\/\/ DeleteRevisionById delete revision by given ID.\nfunc DeleteRevisionById(revId int64) error {\n\t_, err := x.Id(revId).Delete(new(Revision))\n\treturn err\n}\n\n\/\/ GetLocalRevisions returns all revisions that archives are saved locally.\nfunc GetLocalRevisions() ([]*Revision, error) {\n\trevs := make([]*Revision, 0, 10)\n\terr := x.Where(\"storage=0\").Find(&revs)\n\treturn revs, err\n}\n\n\/\/ GetRevisionsByPkgId returns a list of revisions of given package ID.\nfunc GetRevisionsByPkgId(pkgId int64) ([]*Revision, error) {\n\trevs := make([]*Revision, 0, 10)\n\terr := x.Where(\"pkg_id=?\", pkgId).Find(&revs)\n\treturn revs, err\n}\n\n\/\/ Package represents a Go package.\ntype Package struct {\n\tId int64\n\tImportPath string `xorm:\"UNIQUE\"`\n\tDescription string\n\tHomepage string\n\tIssues string\n\tDownloadCount int64\n\tRecentDownload int64\n\tIsValidated bool `xorm:\"DEFAULT 0\"`\n\tCreated time.Time `xorm:\"CREATED\"`\n}\n\nfunc (pkg *Package) GetRevisions() ([]*Revision, error) {\n\treturn GetRevisionsByPkgId(pkg.Id)\n}\n\n\/\/ NewPackage creates\nfunc NewPackage(importPath string) (*Package, error) {\n\tpkg := &Package{\n\t\tImportPath: importPath,\n\t}\n\tif _, err := x.Insert(pkg); err != nil {\n\t\treturn nil, err\n\t}\n\treturn pkg, nil\n}\n\n\/\/ GetPakcageById returns a package by given ID.\nfunc GetPakcageById(pkgId int64) (*Package, error) {\n\tpkg := &Package{}\n\thas, err := x.Id(pkgId).Get(pkg)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if !has {\n\t\treturn nil, ErrPackageNotExist\n\t}\n\treturn pkg, nil\n}\n\n\/\/ GetPakcageByPath returns a package by given import path.\nfunc GetPakcageByPath(importPath string) (*Package, error) {\n\tpkg := &Package{\n\t\tImportPath: importPath,\n\t}\n\thas, err := x.Get(pkg)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if !has {\n\t\treturn nil, ErrPackageNotExist\n\t}\n\treturn pkg, nil\n}\n\n\/\/ CheckPkg checks if versioned package is in records, and download it when needed.\nfunc CheckPkg(importPath, rev string) (*Revision, error) {\n\t\/\/ Check package record.\n\tpkg, err := GetPakcageByPath(importPath)\n\tif err != nil {\n\t\tif err != ErrPackageNotExist {\n\t\t\treturn nil, err\n\t\t}\n\t\tblocked, blockErr, err := IsPackageBlocked(importPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if blocked {\n\t\t\treturn nil, blockErr\n\t\t}\n\t}\n\n\tn := archive.NewNode(importPath, rev)\n\n\t\/\/ Get and check revision record.\n\tif err = n.GetRevision(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar r *Revision\n\tif pkg != nil {\n\t\tr, err = GetRevision(pkg.Id, n.Revision)\n\t\tif err != nil && err != ErrRevisionNotExist {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ return nil, fmt.Errorf(\"Revision: %s\", n.Revision)\n\n\tif r == nil || (r.Storage == LOCAL && !com.IsFile(n.ArchivePath)) {\n\t\tif err := n.Download(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif pkg == nil {\n\t\tpkg, err = NewPackage(n.ImportPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif r == nil {\n\t\tr = &Revision{\n\t\t\tPkgId: pkg.Id,\n\t\t\tRevision: n.Revision,\n\t\t}\n\t\t_, err = x.Insert(r)\n\t} else {\n\t\t_, err = x.Id(r.Id).Update(r)\n\t}\n\treturn r, nil\n}\n\n\/\/ IncreasePackageDownloadCount increase package download count by 1.\nfunc IncreasePackageDownloadCount(importPath string) error {\n\tpkg, err := GetPakcageByPath(importPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpkg.DownloadCount++\n\tpkg.RecentDownload++\n\t_, err = x.Id(pkg.Id).Update(pkg)\n\treturn err\n}\n\n\/\/ SearchPackages searchs packages by given keyword.\nfunc SearchPackages(keys string) ([]*Package, error) {\n\tkeys = strings.TrimSpace(keys)\n\tif len(keys) == 0 {\n\t\treturn nil, nil\n\t}\n\tkey := strings.Split(keys, \" \")[0]\n\tif len(key) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tpkgs := make([]*Package, 0, 50)\n\terr := x.Limit(50).Where(\"name like '%\" + keys + \"%'\").Find(&pkgs)\n\treturn pkgs, err\n}\n\nconst _EXPIRE_DURATION = -1 * 24 * 30 * 3 * time.Hour\n\nfunc cleanExpireRevesions() {\n\tif err := x.Where(\"updatedmodels\/package.go: fix log depth\/\/ Copyright 2014 Unknwon\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\npackage models\n\nimport (\n\t\"errors\"\n\t\/\/ \"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Unknwon\/com\"\n\n\t\"github.com\/gpmgo\/switch\/modules\/archive\"\n\t\"github.com\/gpmgo\/switch\/modules\/log\"\n\t\"github.com\/gpmgo\/switch\/modules\/qiniu\"\n\t\"github.com\/gpmgo\/switch\/modules\/setting\"\n)\n\nvar (\n\tErrRevisionIsLocal = errors.New(\"Revision archive is in local\")\n\tErrPackageNotExist = errors.New(\"Package does not exist\")\n\tErrRevisionNotExist = errors.New(\"Revision does not exist\")\n)\n\ntype Storage int\n\nconst (\n\tLOCAL Storage = iota\n\tQINIU\n)\n\n\/\/ Revision represents a revision of a Go package.\ntype Revision struct {\n\tId int64\n\tPkgId int64 `xorm:\"UNIQUE(s)\"`\n\tPkg *Package `xorm:\"-\"`\n\tRevision string `xorm:\"UNIQUE(s)\"`\n\tStorage\n\tSize int64\n\tUpdated time.Time `xorm:\"UPDATED\"`\n}\n\nfunc (r *Revision) GetPackage() (err error) {\n\tif r.Pkg != nil {\n\t\treturn nil\n\t}\n\tr.Pkg, err = GetPakcageById(r.PkgId)\n\treturn err\n}\n\n\/\/ KeyName returns QiNiu key name.\nfunc (r *Revision) KeyName() (string, error) {\n\tif r.Storage == LOCAL {\n\t\treturn \"\", ErrRevisionIsLocal\n\t}\n\tif err := r.GetPackage(); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn r.Pkg.ImportPath + \"-\" + r.Revision + archive.GetExtension(r.Pkg.ImportPath), nil\n}\n\n\/\/ GetRevision returns revision by given pakcage ID and revision.\nfunc GetRevision(pkgId int64, rev string) (*Revision, error) {\n\tr := &Revision{\n\t\tPkgId: pkgId,\n\t\tRevision: rev,\n\t}\n\thas, err := x.Get(r)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if !has {\n\t\treturn nil, ErrRevisionNotExist\n\t}\n\treturn r, nil\n}\n\n\/\/ UpdateRevision updates revision information.\nfunc UpdateRevision(rev *Revision) error {\n\t_, err := x.Id(rev.Id).Update(rev)\n\treturn err\n}\n\n\/\/ DeleteRevisionById delete revision by given ID.\nfunc DeleteRevisionById(revId int64) error {\n\t_, err := x.Id(revId).Delete(new(Revision))\n\treturn err\n}\n\n\/\/ GetLocalRevisions returns all revisions that archives are saved locally.\nfunc GetLocalRevisions() ([]*Revision, error) {\n\trevs := make([]*Revision, 0, 10)\n\terr := x.Where(\"storage=0\").Find(&revs)\n\treturn revs, err\n}\n\n\/\/ GetRevisionsByPkgId returns a list of revisions of given package ID.\nfunc GetRevisionsByPkgId(pkgId int64) ([]*Revision, error) {\n\trevs := make([]*Revision, 0, 10)\n\terr := x.Where(\"pkg_id=?\", pkgId).Find(&revs)\n\treturn revs, err\n}\n\n\/\/ Package represents a Go package.\ntype Package struct {\n\tId int64\n\tImportPath string `xorm:\"UNIQUE\"`\n\tDescription string\n\tHomepage string\n\tIssues string\n\tDownloadCount int64\n\tRecentDownload int64\n\tIsValidated bool `xorm:\"DEFAULT 0\"`\n\tCreated time.Time `xorm:\"CREATED\"`\n}\n\nfunc (pkg *Package) GetRevisions() ([]*Revision, error) {\n\treturn GetRevisionsByPkgId(pkg.Id)\n}\n\n\/\/ NewPackage creates\nfunc NewPackage(importPath string) (*Package, error) {\n\tpkg := &Package{\n\t\tImportPath: importPath,\n\t}\n\tif _, err := x.Insert(pkg); err != nil {\n\t\treturn nil, err\n\t}\n\treturn pkg, nil\n}\n\n\/\/ GetPakcageById returns a package by given ID.\nfunc GetPakcageById(pkgId int64) (*Package, error) {\n\tpkg := &Package{}\n\thas, err := x.Id(pkgId).Get(pkg)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if !has {\n\t\treturn nil, ErrPackageNotExist\n\t}\n\treturn pkg, nil\n}\n\n\/\/ GetPakcageByPath returns a package by given import path.\nfunc GetPakcageByPath(importPath string) (*Package, error) {\n\tpkg := &Package{\n\t\tImportPath: importPath,\n\t}\n\thas, err := x.Get(pkg)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if !has {\n\t\treturn nil, ErrPackageNotExist\n\t}\n\treturn pkg, nil\n}\n\n\/\/ CheckPkg checks if versioned package is in records, and download it when needed.\nfunc CheckPkg(importPath, rev string) (*Revision, error) {\n\t\/\/ Check package record.\n\tpkg, err := GetPakcageByPath(importPath)\n\tif err != nil {\n\t\tif err != ErrPackageNotExist {\n\t\t\treturn nil, err\n\t\t}\n\t\tblocked, blockErr, err := IsPackageBlocked(importPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if blocked {\n\t\t\treturn nil, blockErr\n\t\t}\n\t}\n\n\tn := archive.NewNode(importPath, rev)\n\n\t\/\/ Get and check revision record.\n\tif err = n.GetRevision(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar r *Revision\n\tif pkg != nil {\n\t\tr, err = GetRevision(pkg.Id, n.Revision)\n\t\tif err != nil && err != ErrRevisionNotExist {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ return nil, fmt.Errorf(\"Revision: %s\", n.Revision)\n\n\tif r == nil || (r.Storage == LOCAL && !com.IsFile(n.ArchivePath)) {\n\t\tif err := n.Download(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif pkg == nil {\n\t\tpkg, err = NewPackage(n.ImportPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif r == nil {\n\t\tr = &Revision{\n\t\t\tPkgId: pkg.Id,\n\t\t\tRevision: n.Revision,\n\t\t}\n\t\t_, err = x.Insert(r)\n\t} else {\n\t\t_, err = x.Id(r.Id).Update(r)\n\t}\n\treturn r, nil\n}\n\n\/\/ IncreasePackageDownloadCount increase package download count by 1.\nfunc IncreasePackageDownloadCount(importPath string) error {\n\tpkg, err := GetPakcageByPath(importPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpkg.DownloadCount++\n\tpkg.RecentDownload++\n\t_, err = x.Id(pkg.Id).Update(pkg)\n\treturn err\n}\n\n\/\/ SearchPackages searchs packages by given keyword.\nfunc SearchPackages(keys string) ([]*Package, error) {\n\tkeys = strings.TrimSpace(keys)\n\tif len(keys) == 0 {\n\t\treturn nil, nil\n\t}\n\tkey := strings.Split(keys, \" \")[0]\n\tif len(key) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tpkgs := make([]*Package, 0, 50)\n\terr := x.Limit(50).Where(\"name like '%\" + keys + \"%'\").Find(&pkgs)\n\treturn pkgs, err\n}\n\nconst _EXPIRE_DURATION = -1 * 24 * 30 * 3 * time.Hour\n\nfunc cleanExpireRevesions() {\n\tif err := x.Where(\"updated"} {"text":"\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nA trivial example of wrapping a C library in Go.\nFor a more complex example and explanation,\nsee ..\/gmp\/gmp.go.\n*\/\n\npackage stdio\n\n\/*\n#include \n#include \n#include \n#include \n\nchar* greeting = \"hello, world\";\n*\/\nimport \"C\"\nimport \"unsafe\"\n\ntype File C.FILE\n\nvar Stdout = (*File)(C.stdout)\nvar Stderr = (*File)(C.stderr)\n\n\/\/ Test reference to library symbol.\n\/\/ Stdout and stderr are too special to be a reliable test.\nvar myerr = C.sys_errlist\n\nfunc (f *File) WriteString(s string) {\n\tp := C.CString(s)\n\tC.fputs(p, (*C.FILE)(f))\n\tC.free(unsafe.Pointer(p))\n\tf.Flush()\n}\n\nfunc (f *File) Flush() {\n\tC.fflush((*C.FILE)(f))\n}\n\nvar Greeting = C.GoString(C.greeting)\ngo\/build: fix windows build by commenting out references to stdout and stderr in cgotest\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nA trivial example of wrapping a C library in Go.\nFor a more complex example and explanation,\nsee ..\/gmp\/gmp.go.\n*\/\n\npackage stdio\n\n\/*\n#include \n#include \n#include \n#include \n\nchar* greeting = \"hello, world\";\n*\/\nimport \"C\"\nimport \"unsafe\"\n\ntype File C.FILE\n\n\/\/ TODO(brainman): uncomment once stdout and stderr references are working on Windows.\n\/\/var Stdout = (*File)(C.stdout)\n\/\/var Stderr = (*File)(C.stderr)\n\n\/\/ Test reference to library symbol.\n\/\/ Stdout and stderr are too special to be a reliable test.\nvar myerr = C.sys_errlist\n\nfunc (f *File) WriteString(s string) {\n\tp := C.CString(s)\n\tC.fputs(p, (*C.FILE)(f))\n\tC.free(unsafe.Pointer(p))\n\tf.Flush()\n}\n\nfunc (f *File) Flush() {\n\tC.fflush((*C.FILE)(f))\n}\n\nvar Greeting = C.GoString(C.greeting)\n<|endoftext|>"} {"text":"package mpeg4file\n\ntype mdat struct{\n\tsize uint32\n\tlargeSize uint64\n\tboxtype uint32\n\tdata []byte\n}\n\nfunc NewMdat (s uint32, payload []byte) *mdat{\n\tnewMdat:=new(mdat)\n\tnewMdat.size=s\n\tnewMdat.data = payload\n\treturn newMdat\n}\n\nfunc NewMdatLargeSize (s uint64, payload []byte) *mdat{\n\tnewMdat:=new(mdat)\n\tnewMdat.size=1\n\tnewMdat.largeSize = s\n\tnewMdat.data = payload\n\treturn newMdat\n}\n\nfunc (m *mdat) SetSize (s uint64){\n\tif s>4294967295 {\n\t\tm.size = uint32(s)\n\t}else{\n\t\tm.size = 1\n\t\tm.largeSize = s\n\t}\n}updated mdatpackage mpeg4file\n\ntype mdat struct{\n\tsize uint32\n\tlargeSize uint64\n\tboxtype uint32\n\tdata []byte\n}\n\nfunc NewMdat (s uint64, payload []byte) *mdat{\n\tnewMdat:=new(mdat)\n\tnewMdat.SetSize(s)\n\tnewMdat.data = payload\n\treturn newMdat\n}\n\nfunc (m *mdat) SetSize (s uint64){\n\tif s==0{\n\t\tm.size=0\n\t} else {\n\t\tif s>4294967295 {\n\t\t\tm.size = uint32(s)\n\t\t}else{\n\t\t\tm.size = 1\n\t\t\tm.largeSize = s\n\t\t}\n\t}\n}<|endoftext|>"} {"text":"package gonameparts\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestLooksCorporate(t *testing.T) {\n\tt.Parallel()\n\tn := nameString{FullName: \"Sprockets Inc\"}\n\n\tres := n.looksCorporate()\n\n\tif res != true {\n\t\tt.Errorf(\"Expected true. Actual %v\", res)\n\t}\n\n}\n\nfunc TestSearchParts(t *testing.T) {\n\tt.Parallel()\n\tn := nameString{FullName: \"Mr. James Polera\"}\n\n\tres := n.searchParts(&salutations)\n\n\tif res != 0 {\n\t\tt.Errorf(\"Expected true. Actual %v\", res)\n\t}\n\n}\n\nfunc TestClean(t *testing.T) {\n\tt.Parallel()\n\tn := nameString{FullName: \"Mr. James Polera\"}\n\n\tres := n.cleaned()\n\n\tif res[0] != \"Mr\" {\n\t\tt.Errorf(\"Expected 'Mr'. Actual %v\", res[0])\n\t}\n\n}\n\nfunc TestLocateSalutation(t *testing.T) {\n\tt.Parallel()\n\tn := nameString{FullName: \"Mr. James Polera\"}\n\n\tres := n.find(\"salutation\")\n\n\tif res != 0 {\n\t\tt.Errorf(\"Expected 0. Actual %v\", res)\n\t}\n}\n\nfunc TestHasComma(t *testing.T) {\n\tt.Parallel()\n\tn := nameString{FullName: \"Polera, James\"}\n\tres := n.hasComma()\n\n\tif res != true {\n\t\tt.Errorf(\"Expected true. Actual %v\", res)\n\t}\n\n}\n\nfunc TestNormalize(t *testing.T) {\n\tt.Parallel()\n\tn := nameString{FullName: \"Polera, James\"}\n\tres := n.normalize()\n\n\tif res[0] != \"James\" {\n\t\tt.Errorf(\"Expected James. Actual %v\", res[0])\n\t}\n\n\tif res[1] != \"Polera\" {\n\t\tt.Errorf(\"Expected Polera. Actual %v\", res[1])\n\t}\n\n}\n\nfunc TestParseAllFields(t *testing.T) {\n\tt.Parallel()\n\tres := Parse(\"Mr. James J. Polera Jr. Esq.\")\n\n\tif res.Salutation != \"Mr.\" {\n\t\tt.Errorf(\"Expected 'Mr.'. Actual %v\", res.Salutation)\n\t}\n\n\tif res.FirstName != \"James\" {\n\t\tt.Errorf(\"Expected 'James'. Actual %v\", res.FirstName)\n\t}\n\n\tif res.MiddleName != \"J.\" {\n\t\tt.Errorf(\"Expected 'J.'. Actual %v\", res.MiddleName)\n\t}\n\n\tif res.LastName != \"Polera\" {\n\t\tt.Errorf(\"Expected 'Polera'. Actual %v\", res.LastName)\n\t}\n\n\tif res.Generation != \"Jr.\" {\n\t\tt.Errorf(\"Expected 'Jr.'. Actual %v\", res.Generation)\n\t}\n\n\tif res.Suffix != \"Esq.\" {\n\t\tt.Errorf(\"Expected 'Esq.'. Actual %v\", res.Suffix)\n\t}\n}\n\nfunc TestParseFirstLast(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"James Polera\")\n\tif res.FirstName != \"James\" {\n\t\tt.Errorf(\"Expected 'James'. Actual %v\", res.FirstName)\n\t}\n\n\tif res.LastName != \"Polera\" {\n\t\tt.Errorf(\"Expected 'Polera'. Actual %v\", res.LastName)\n\t}\n}\n\nfunc TestLastNamePrefix(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"Otto von Bismark\")\n\n\tif res.FirstName != \"Otto\" {\n\t\tt.Errorf(\"Expected 'Otto'. Actual %v\", res.FirstName)\n\t}\n\n\tif res.LastName != \"von Bismark\" {\n\t\tt.Errorf(\"Expected 'von Bismark'. Actual %v\", res.LastName)\n\t}\n\n}\n\nfunc TestAliases(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"James Polera a\/k\/a Batman\")\n\n\tif res.Aliases[0].FirstName != \"Batman\" {\n\t\tt.Errorf(\"Expected 'Batman'. Actual: %v\", res.Aliases[0].FirstName)\n\t}\n\n}\n\nfunc TestNickname(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"Philip Francis 'The Scooter' Rizzuto\")\n\n\tif res.Nickname != \"'The Scooter'\" {\n\t\tt.Errorf(\"Expected 'The Scooter'. Actual: %v\", res.Nickname)\n\t}\n}\n\nfunc TestStripSupplemental(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"Philip Francis 'The Scooter' Rizzuto, deceased\")\n\n\tif res.FirstName != \"Philip\" {\n\t\tt.Errorf(\"Expected 'Philip'. Actual: %v\", res.FirstName)\n\t}\n\n\tif res.MiddleName != \"Francis\" {\n\t\tt.Errorf(\"Expected 'Francis'. Actual: %v\", res.MiddleName)\n\t}\n\n\tif res.Nickname != \"'The Scooter'\" {\n\t\tt.Errorf(\"Expected 'The Scooter'. Actual: %v\", res.Nickname)\n\t}\n\n\tif res.LastName != \"Rizzuto\" {\n\t\tt.Errorf(\"Expected 'Rizzuto'. Actual: %v\", res.LastName)\n\t}\n}\n\nfunc TestLongPrefixedLastName(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"Saleh ibn Tariq ibn Khalid al-Fulan\")\n\n\tif res.FirstName != \"Saleh\" {\n\t\tt.Errorf(\"Expected 'Saleh'. Actual: %v\", res.FirstName)\n\t}\n\n\tif res.LastName != \"ibn Tariq ibn Khalid al-Fulan\" {\n\t\tt.Errorf(\"Expected 'ibn Tariq ibn Khalid al-Fulan'. Actual: %v\", res.LastName)\n\n\t}\n}\n\nfunc TestMisplacedApostrophe(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"John O' Hurley\")\n\n\tif res.FirstName != \"John\" {\n\t\tt.Errorf(\"Expected 'John'. Actual: %v\", res.FirstName)\n\t}\n\n\tif res.LastName != \"O'Hurley\" {\n\t\tt.Errorf(\"Expected 'O'Hurley'. Actual: %v\", res.LastName)\n\t}\n\n}\n\nfunc TestMultipleAKA(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"Tony Stark a\/k\/a Ironman a\/k\/a Stark, Anthony a\/k\/a Anthony Edward \\\"Tony\\\" Stark\")\n\n\tif len(res.Aliases) != 3 {\n\t\tt.Errorf(\"Expected 3 aliases. Actual: %v\", len(res.Aliases))\n\t}\n\n\tif res.FirstName != \"Tony\" {\n\t\tt.Errorf(\"Expected 'Tony'. Actual: %v\", res.FirstName)\n\t}\n\n\tif res.LastName != \"Stark\" {\n\t\tt.Errorf(\"Expected 'Stark'. Actual: %v\", res.LastName)\n\t}\n\n}\n\nfunc TestBuildFullName(t *testing.T) {\n\tres := Parse(\"President George Herbert Walker Bush\")\n\n\tif res.FullName != \"President George Herbert Walker Bush\" {\n\n\t\tt.Errorf(\"Expected 'President George Herbert Walker Bush'. Actual: %v\", res.FullName)\n\t}\n\n}\n\nfunc TestDottedAka(t *testing.T) {\n\tres := Parse(\"James Polera a.k.a James K. Polera\")\n\tif len(res.Aliases) != 1 {\n\t\tt.Errorf(\"Expected 1 alias. Actual: %v\", len(res.Aliases))\n\t}\n}\n\nfunc TestUnicodeCharsInName(t *testing.T) {\n\tres := Parse(\"König Ludwig\")\n\n\tif res.FirstName != \"König\" {\n\t\tt.Errorf(\"Expected 'König'. Actual: %v\", res.FirstName)\n\n\t}\n}\n\nfunc ExampleParse() {\n\tres := Parse(\"Thurston Howell III\")\n\tfmt.Println(\"FirstName:\", res.FirstName)\n\tfmt.Println(\"LastName:\", res.LastName)\n\tfmt.Println(\"Generation:\", res.Generation)\n\n\t\/\/ Output:\n\t\/\/ FirstName: Thurston\n\t\/\/ LastName: Howell\n\t\/\/ Generation: III\n\n}\n\nfunc ExampleParse_second() {\n\n\tres := Parse(\"President George Herbert Walker Bush\")\n\tfmt.Println(\"Salutation:\", res.Salutation)\n\tfmt.Println(\"FirstName:\", res.FirstName)\n\tfmt.Println(\"MiddleName:\", res.MiddleName)\n\tfmt.Println(\"LastName:\", res.LastName)\n\n\t\/\/ Output:\n\t\/\/ Salutation: President\n\t\/\/ FirstName: George\n\t\/\/ MiddleName: Herbert Walker\n\t\/\/ LastName: Bush\n\n}\nAdded tabs in name testpackage gonameparts\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestLooksCorporate(t *testing.T) {\n\tt.Parallel()\n\tn := nameString{FullName: \"Sprockets Inc\"}\n\n\tres := n.looksCorporate()\n\n\tif res != true {\n\t\tt.Errorf(\"Expected true. Actual %v\", res)\n\t}\n\n}\n\nfunc TestSearchParts(t *testing.T) {\n\tt.Parallel()\n\tn := nameString{FullName: \"Mr. James Polera\"}\n\n\tres := n.searchParts(&salutations)\n\n\tif res != 0 {\n\t\tt.Errorf(\"Expected true. Actual %v\", res)\n\t}\n\n}\n\nfunc TestClean(t *testing.T) {\n\tt.Parallel()\n\tn := nameString{FullName: \"Mr. James Polera\"}\n\n\tres := n.cleaned()\n\n\tif res[0] != \"Mr\" {\n\t\tt.Errorf(\"Expected 'Mr'. Actual %v\", res[0])\n\t}\n\n}\n\nfunc TestLocateSalutation(t *testing.T) {\n\tt.Parallel()\n\tn := nameString{FullName: \"Mr. James Polera\"}\n\n\tres := n.find(\"salutation\")\n\n\tif res != 0 {\n\t\tt.Errorf(\"Expected 0. Actual %v\", res)\n\t}\n}\n\nfunc TestHasComma(t *testing.T) {\n\tt.Parallel()\n\tn := nameString{FullName: \"Polera, James\"}\n\tres := n.hasComma()\n\n\tif res != true {\n\t\tt.Errorf(\"Expected true. Actual %v\", res)\n\t}\n\n}\n\nfunc TestNormalize(t *testing.T) {\n\tt.Parallel()\n\tn := nameString{FullName: \"Polera, James\"}\n\tres := n.normalize()\n\n\tif res[0] != \"James\" {\n\t\tt.Errorf(\"Expected James. Actual %v\", res[0])\n\t}\n\n\tif res[1] != \"Polera\" {\n\t\tt.Errorf(\"Expected Polera. Actual %v\", res[1])\n\t}\n\n}\n\nfunc TestParseAllFields(t *testing.T) {\n\tt.Parallel()\n\tres := Parse(\"Mr. James J. Polera Jr. Esq.\")\n\n\tif res.Salutation != \"Mr.\" {\n\t\tt.Errorf(\"Expected 'Mr.'. Actual %v\", res.Salutation)\n\t}\n\n\tif res.FirstName != \"James\" {\n\t\tt.Errorf(\"Expected 'James'. Actual %v\", res.FirstName)\n\t}\n\n\tif res.MiddleName != \"J.\" {\n\t\tt.Errorf(\"Expected 'J.'. Actual %v\", res.MiddleName)\n\t}\n\n\tif res.LastName != \"Polera\" {\n\t\tt.Errorf(\"Expected 'Polera'. Actual %v\", res.LastName)\n\t}\n\n\tif res.Generation != \"Jr.\" {\n\t\tt.Errorf(\"Expected 'Jr.'. Actual %v\", res.Generation)\n\t}\n\n\tif res.Suffix != \"Esq.\" {\n\t\tt.Errorf(\"Expected 'Esq.'. Actual %v\", res.Suffix)\n\t}\n}\n\nfunc TestParseFirstLast(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"James Polera\")\n\tif res.FirstName != \"James\" {\n\t\tt.Errorf(\"Expected 'James'. Actual %v\", res.FirstName)\n\t}\n\n\tif res.LastName != \"Polera\" {\n\t\tt.Errorf(\"Expected 'Polera'. Actual %v\", res.LastName)\n\t}\n}\n\nfunc TestLastNamePrefix(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"Otto von Bismark\")\n\n\tif res.FirstName != \"Otto\" {\n\t\tt.Errorf(\"Expected 'Otto'. Actual %v\", res.FirstName)\n\t}\n\n\tif res.LastName != \"von Bismark\" {\n\t\tt.Errorf(\"Expected 'von Bismark'. Actual %v\", res.LastName)\n\t}\n\n}\n\nfunc TestAliases(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"James Polera a\/k\/a Batman\")\n\n\tif res.Aliases[0].FirstName != \"Batman\" {\n\t\tt.Errorf(\"Expected 'Batman'. Actual: %v\", res.Aliases[0].FirstName)\n\t}\n\n}\n\nfunc TestNickname(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"Philip Francis 'The Scooter' Rizzuto\")\n\n\tif res.Nickname != \"'The Scooter'\" {\n\t\tt.Errorf(\"Expected 'The Scooter'. Actual: %v\", res.Nickname)\n\t}\n}\n\nfunc TestStripSupplemental(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"Philip Francis 'The Scooter' Rizzuto, deceased\")\n\n\tif res.FirstName != \"Philip\" {\n\t\tt.Errorf(\"Expected 'Philip'. Actual: %v\", res.FirstName)\n\t}\n\n\tif res.MiddleName != \"Francis\" {\n\t\tt.Errorf(\"Expected 'Francis'. Actual: %v\", res.MiddleName)\n\t}\n\n\tif res.Nickname != \"'The Scooter'\" {\n\t\tt.Errorf(\"Expected 'The Scooter'. Actual: %v\", res.Nickname)\n\t}\n\n\tif res.LastName != \"Rizzuto\" {\n\t\tt.Errorf(\"Expected 'Rizzuto'. Actual: %v\", res.LastName)\n\t}\n}\n\nfunc TestLongPrefixedLastName(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"Saleh ibn Tariq ibn Khalid al-Fulan\")\n\n\tif res.FirstName != \"Saleh\" {\n\t\tt.Errorf(\"Expected 'Saleh'. Actual: %v\", res.FirstName)\n\t}\n\n\tif res.LastName != \"ibn Tariq ibn Khalid al-Fulan\" {\n\t\tt.Errorf(\"Expected 'ibn Tariq ibn Khalid al-Fulan'. Actual: %v\", res.LastName)\n\n\t}\n}\n\nfunc TestMisplacedApostrophe(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"John O' Hurley\")\n\n\tif res.FirstName != \"John\" {\n\t\tt.Errorf(\"Expected 'John'. Actual: %v\", res.FirstName)\n\t}\n\n\tif res.LastName != \"O'Hurley\" {\n\t\tt.Errorf(\"Expected 'O'Hurley'. Actual: %v\", res.LastName)\n\t}\n\n}\n\nfunc TestMultipleAKA(t *testing.T) {\n\tt.Parallel()\n\n\tres := Parse(\"Tony Stark a\/k\/a Ironman a\/k\/a Stark, Anthony a\/k\/a Anthony Edward \\\"Tony\\\" Stark\")\n\n\tif len(res.Aliases) != 3 {\n\t\tt.Errorf(\"Expected 3 aliases. Actual: %v\", len(res.Aliases))\n\t}\n\n\tif res.FirstName != \"Tony\" {\n\t\tt.Errorf(\"Expected 'Tony'. Actual: %v\", res.FirstName)\n\t}\n\n\tif res.LastName != \"Stark\" {\n\t\tt.Errorf(\"Expected 'Stark'. Actual: %v\", res.LastName)\n\t}\n\n}\n\nfunc TestBuildFullName(t *testing.T) {\n\tres := Parse(\"President George Herbert Walker Bush\")\n\n\tif res.FullName != \"President George Herbert Walker Bush\" {\n\n\t\tt.Errorf(\"Expected 'President George Herbert Walker Bush'. Actual: %v\", res.FullName)\n\t}\n\n}\n\nfunc TestDottedAka(t *testing.T) {\n\tres := Parse(\"James Polera a.k.a James K. Polera\")\n\tif len(res.Aliases) != 1 {\n\t\tt.Errorf(\"Expected 1 alias. Actual: %v\", len(res.Aliases))\n\t}\n}\n\nfunc TestUnicodeCharsInName(t *testing.T) {\n\tres := Parse(\"König Ludwig\")\n\n\tif res.FirstName != \"König\" {\n\t\tt.Errorf(\"Expected 'König'. Actual: %v\", res.FirstName)\n\t}\n}\n\nfunc TestTabsInName(t *testing.T) {\n\tres := Parse(\"Dr. James\\tPolera\\tEsq.\")\n\n\tif res.Salutation != \"Dr.\" {\n\t\tt.Errorf(\"Expected 'Dr.'. Actual: %v\", res.Salutation)\n\t}\n\n\tif res.FirstName != \"James\" {\n\t\tt.Errorf(\"Expected 'James'. Actual: %v\", res.FirstName)\n\t}\n\n\tif res.LastName != \"Polera\" {\n\t\tt.Errorf(\"Expected 'Polera'. Actual: %v\", res.LastName)\n\t}\n\n\tif res.Suffix != \"Esq.\" {\n\t\tt.Errorf(\"Expected 'Esq.'. Actual: %v\", res.Suffix)\n\t}\n}\n\nfunc ExampleParse() {\n\tres := Parse(\"Thurston Howell III\")\n\tfmt.Println(\"FirstName:\", res.FirstName)\n\tfmt.Println(\"LastName:\", res.LastName)\n\tfmt.Println(\"Generation:\", res.Generation)\n\n\t\/\/ Output:\n\t\/\/ FirstName: Thurston\n\t\/\/ LastName: Howell\n\t\/\/ Generation: III\n\n}\n\nfunc ExampleParse_second() {\n\n\tres := Parse(\"President George Herbert Walker Bush\")\n\tfmt.Println(\"Salutation:\", res.Salutation)\n\tfmt.Println(\"FirstName:\", res.FirstName)\n\tfmt.Println(\"MiddleName:\", res.MiddleName)\n\tfmt.Println(\"LastName:\", res.LastName)\n\n\t\/\/ Output:\n\t\/\/ Salutation: President\n\t\/\/ FirstName: George\n\t\/\/ MiddleName: Herbert Walker\n\t\/\/ LastName: Bush\n\n}\n<|endoftext|>"} {"text":"\/\/ +build go1.7\n\npackage nethttp\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptrace\"\n\t\"net\/url\"\n\n\t\"github.com\/opentracing\/opentracing-go\"\n\t\"github.com\/opentracing\/opentracing-go\/ext\"\n\t\"github.com\/opentracing\/opentracing-go\/log\"\n)\n\ntype contextKey int\n\nconst (\n\tkeyTracer contextKey = iota\n)\n\nconst defaultComponentName = \"net\/http\"\n\n\/\/ Transport wraps a RoundTripper. If a request is being traced with\n\/\/ Tracer, Transport will inject the current span into the headers,\n\/\/ and set HTTP related tags on the span.\ntype Transport struct {\n\t\/\/ The actual RoundTripper to use for the request. A nil\n\t\/\/ RoundTripper defaults to http.DefaultTransport.\n\thttp.RoundTripper\n}\n\ntype clientOptions struct {\n\toperationName string\n\tcomponentName string\n\turlTagFunc func(u *url.URL) string\n\tdisableClientTrace bool\n\tdisableInjectSpanContext bool\n\tspanObserver func(span opentracing.Span, r *http.Request)\n}\n\n\/\/ ClientOption contols the behavior of TraceRequest.\ntype ClientOption func(*clientOptions)\n\n\/\/ OperationName returns a ClientOption that sets the operation\n\/\/ name for the client-side span.\nfunc OperationName(operationName string) ClientOption {\n\treturn func(options *clientOptions) {\n\t\toptions.operationName = operationName\n\t}\n}\n\n\/\/ URLTagFunc returns a ClientOption that uses given function f\n\/\/ to set the span's http.url tag. Can be used to change the default\n\/\/ http.url tag, eg to redact sensitive information.\nfunc URLTagFunc(f func(u *url.URL) string) ClientOption {\n\treturn func(options *clientOptions) {\n\t\toptions.urlTagFunc = f\n\t}\n}\n\n\/\/ ComponentName returns a ClientOption that sets the component\n\/\/ name for the client-side span.\nfunc ComponentName(componentName string) ClientOption {\n\treturn func(options *clientOptions) {\n\t\toptions.componentName = componentName\n\t}\n}\n\n\/\/ ClientTrace returns a ClientOption that turns on or off\n\/\/ extra instrumentation via httptrace.WithClientTrace.\nfunc ClientTrace(enabled bool) ClientOption {\n\treturn func(options *clientOptions) {\n\t\toptions.disableClientTrace = !enabled\n\t}\n}\n\n\/\/ InjectSpanContext returns a ClientOption that turns on or off\n\/\/ injection of the Span context in the request HTTP headers.\n\/\/ If this option is not used, the default behaviour is to\n\/\/ inject the span context.\nfunc InjectSpanContext(enabled bool) ClientOption {\n\treturn func(options *clientOptions) {\n\t\toptions.disableInjectSpanContext = !enabled\n\t}\n}\n\n\/\/ ClientSpanObserver returns a ClientOption that observes the span\n\/\/ for the client-side span.\nfunc ClientSpanObserver(f func(span opentracing.Span, r *http.Request)) ClientOption {\n\treturn func(options *clientOptions) {\n\t\toptions.spanObserver = f\n\t}\n}\n\n\/\/ TraceRequest adds a ClientTracer to req, tracing the request and\n\/\/ all requests caused due to redirects. When tracing requests this\n\/\/ way you must also use Transport.\n\/\/\n\/\/ Example:\n\/\/\n\/\/ \tfunc AskGoogle(ctx context.Context) error {\n\/\/ \t\tclient := &http.Client{Transport: &nethttp.Transport{}}\n\/\/ \t\treq, err := http.NewRequest(\"GET\", \"http:\/\/google.com\", nil)\n\/\/ \t\tif err != nil {\n\/\/ \t\t\treturn err\n\/\/ \t\t}\n\/\/ \t\treq = req.WithContext(ctx) \/\/ extend existing trace, if any\n\/\/\n\/\/ \t\treq, ht := nethttp.TraceRequest(tracer, req)\n\/\/ \t\tdefer ht.Finish()\n\/\/\n\/\/ \t\tres, err := client.Do(req)\n\/\/ \t\tif err != nil {\n\/\/ \t\t\treturn err\n\/\/ \t\t}\n\/\/ \t\tres.Body.Close()\n\/\/ \t\treturn nil\n\/\/ \t}\nfunc TraceRequest(tr opentracing.Tracer, req *http.Request, options ...ClientOption) (*http.Request, *Tracer) {\n\topts := &clientOptions{\n\t\turlTagFunc: func(u *url.URL) string {\n\t\t\treturn u.String()\n\t\t},\n\t\tspanObserver: func(_ opentracing.Span, _ *http.Request) {},\n\t}\n\tfor _, opt := range options {\n\t\topt(opts)\n\t}\n\tht := &Tracer{tr: tr, opts: opts}\n\tctx := req.Context()\n\tif !opts.disableClientTrace {\n\t\tctx = httptrace.WithClientTrace(ctx, ht.clientTrace())\n\t}\n\treq = req.WithContext(context.WithValue(ctx, keyTracer, ht))\n\treturn req, ht\n}\n\ntype closeTracker struct {\n\tio.ReadCloser\n\tsp opentracing.Span\n}\n\nfunc (c closeTracker) Close() error {\n\terr := c.ReadCloser.Close()\n\tc.sp.LogFields(log.String(\"event\", \"ClosedBody\"))\n\tc.sp.Finish()\n\treturn err\n}\n\ntype writerCloseTracker struct {\n\tio.ReadWriteCloser\n\tsp opentracing.Span\n}\n\nfunc (c writerCloseTracker) Close() error {\n\terr := c.ReadWriteCloser.Close()\n\tc.sp.LogFields(log.String(\"event\", \"ClosedBody\"))\n\tc.sp.Finish()\n\treturn err\n}\n\n\/\/ TracerFromRequest retrieves the Tracer from the request. If the request does\n\/\/ not have a Tracer it will return nil.\nfunc TracerFromRequest(req *http.Request) *Tracer {\n\ttr, ok := req.Context().Value(keyTracer).(*Tracer)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn tr\n}\n\n\/\/ RoundTrip implements the RoundTripper interface.\nfunc (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {\n\trt := t.RoundTripper\n\tif rt == nil {\n\t\trt = http.DefaultTransport\n\t}\n\ttracer := TracerFromRequest(req)\n\tif tracer == nil {\n\t\treturn rt.RoundTrip(req)\n\t}\n\n\ttracer.start(req)\n\n\text.HTTPMethod.Set(tracer.sp, req.Method)\n\text.HTTPUrl.Set(tracer.sp, tracer.opts.urlTagFunc(req.URL))\n\text.PeerAddress.Set(tracer.sp, req.URL.Host)\n\ttracer.opts.spanObserver(tracer.sp, req)\n\n\tif !tracer.opts.disableInjectSpanContext {\n\t\tcarrier := opentracing.HTTPHeadersCarrier(req.Header)\n\t\ttracer.sp.Tracer().Inject(tracer.sp.Context(), opentracing.HTTPHeaders, carrier)\n\t}\n\n\tresp, err := rt.RoundTrip(req)\n\n\tif err != nil {\n\t\ttracer.sp.Finish()\n\t\treturn resp, err\n\t}\n\text.HTTPStatusCode.Set(tracer.sp, uint16(resp.StatusCode))\n\tif resp.StatusCode >= http.StatusInternalServerError {\n\t\text.Error.Set(tracer.sp, true)\n\t}\n\tif req.Method == \"HEAD\" {\n\t\ttracer.sp.Finish()\n\t} else {\n\t\treadWriteCloser, ok := resp.Body.(io.ReadWriteCloser)\n\t\tif ok {\n\t\t\tresp.Body = writerCloseTracker{readWriteCloser, tracer.sp}\n\t\t} else {\n\t\t\tresp.Body = closeTracker{resp.Body, tracer.sp}\n\t\t}\n\t}\n\treturn resp, nil\n}\n\n\/\/ Tracer holds tracing details for one HTTP request.\ntype Tracer struct {\n\ttr opentracing.Tracer\n\troot opentracing.Span\n\tsp opentracing.Span\n\topts *clientOptions\n}\n\nfunc (h *Tracer) start(req *http.Request) opentracing.Span {\n\tif h.root == nil {\n\t\tparent := opentracing.SpanFromContext(req.Context())\n\t\tvar spanctx opentracing.SpanContext\n\t\tif parent != nil {\n\t\t\tspanctx = parent.Context()\n\t\t}\n\t\toperationName := h.opts.operationName\n\t\tif operationName == \"\" {\n\t\t\toperationName = \"HTTP Client\"\n\t\t}\n\t\troot := h.tr.StartSpan(operationName, opentracing.ChildOf(spanctx))\n\t\th.root = root\n\t}\n\n\tctx := h.root.Context()\n\th.sp = h.tr.StartSpan(\"HTTP \"+req.Method, opentracing.ChildOf(ctx), ext.SpanKindRPCClient)\n\n\tcomponentName := h.opts.componentName\n\tif componentName == \"\" {\n\t\tcomponentName = defaultComponentName\n\t}\n\text.Component.Set(h.sp, componentName)\n\n\treturn h.sp\n}\n\n\/\/ Finish finishes the span of the traced request.\nfunc (h *Tracer) Finish() {\n\tif h.root != nil {\n\t\th.root.Finish()\n\t}\n}\n\n\/\/ Span returns the root span of the traced request. This function\n\/\/ should only be called after the request has been executed.\nfunc (h *Tracer) Span() opentracing.Span {\n\treturn h.root\n}\n\nfunc (h *Tracer) clientTrace() *httptrace.ClientTrace {\n\treturn &httptrace.ClientTrace{\n\t\tGetConn: h.getConn,\n\t\tGotConn: h.gotConn,\n\t\tPutIdleConn: h.putIdleConn,\n\t\tGotFirstResponseByte: h.gotFirstResponseByte,\n\t\tGot100Continue: h.got100Continue,\n\t\tDNSStart: h.dnsStart,\n\t\tDNSDone: h.dnsDone,\n\t\tConnectStart: h.connectStart,\n\t\tConnectDone: h.connectDone,\n\t\tWroteHeaders: h.wroteHeaders,\n\t\tWait100Continue: h.wait100Continue,\n\t\tWroteRequest: h.wroteRequest,\n\t}\n}\n\nfunc (h *Tracer) getConn(hostPort string) {\n\th.sp.LogFields(log.String(\"event\", \"GetConn\"), log.String(\"hostPort\", hostPort))\n}\n\nfunc (h *Tracer) gotConn(info httptrace.GotConnInfo) {\n\th.sp.SetTag(\"net\/http.reused\", info.Reused)\n\th.sp.SetTag(\"net\/http.was_idle\", info.WasIdle)\n\th.sp.LogFields(log.String(\"event\", \"GotConn\"))\n}\n\nfunc (h *Tracer) putIdleConn(error) {\n\th.sp.LogFields(log.String(\"event\", \"PutIdleConn\"))\n}\n\nfunc (h *Tracer) gotFirstResponseByte() {\n\th.sp.LogFields(log.String(\"event\", \"GotFirstResponseByte\"))\n}\n\nfunc (h *Tracer) got100Continue() {\n\th.sp.LogFields(log.String(\"event\", \"Got100Continue\"))\n}\n\nfunc (h *Tracer) dnsStart(info httptrace.DNSStartInfo) {\n\th.sp.LogFields(\n\t\tlog.String(\"event\", \"DNSStart\"),\n\t\tlog.String(\"host\", info.Host),\n\t)\n}\n\nfunc (h *Tracer) dnsDone(info httptrace.DNSDoneInfo) {\n\tfields := []log.Field{log.String(\"event\", \"DNSDone\")}\n\tfor _, addr := range info.Addrs {\n\t\tfields = append(fields, log.String(\"addr\", addr.String()))\n\t}\n\tif info.Err != nil {\n\t\tfields = append(fields, log.Error(info.Err))\n\t}\n\th.sp.LogFields(fields...)\n}\n\nfunc (h *Tracer) connectStart(network, addr string) {\n\th.sp.LogFields(\n\t\tlog.String(\"event\", \"ConnectStart\"),\n\t\tlog.String(\"network\", network),\n\t\tlog.String(\"addr\", addr),\n\t)\n}\n\nfunc (h *Tracer) connectDone(network, addr string, err error) {\n\tif err != nil {\n\t\th.sp.LogFields(\n\t\t\tlog.String(\"message\", \"ConnectDone\"),\n\t\t\tlog.String(\"network\", network),\n\t\t\tlog.String(\"addr\", addr),\n\t\t\tlog.String(\"event\", \"error\"),\n\t\t\tlog.Error(err),\n\t\t)\n\t} else {\n\t\th.sp.LogFields(\n\t\t\tlog.String(\"event\", \"ConnectDone\"),\n\t\t\tlog.String(\"network\", network),\n\t\t\tlog.String(\"addr\", addr),\n\t\t)\n\t}\n}\n\nfunc (h *Tracer) wroteHeaders() {\n\th.sp.LogFields(log.String(\"event\", \"WroteHeaders\"))\n}\n\nfunc (h *Tracer) wait100Continue() {\n\th.sp.LogFields(log.String(\"event\", \"Wait100Continue\"))\n}\n\nfunc (h *Tracer) wroteRequest(info httptrace.WroteRequestInfo) {\n\tif info.Err != nil {\n\t\th.sp.LogFields(\n\t\t\tlog.String(\"message\", \"WroteRequest\"),\n\t\t\tlog.String(\"event\", \"error\"),\n\t\t\tlog.Error(info.Err),\n\t\t)\n\t\text.Error.Set(h.sp, true)\n\t} else {\n\t\th.sp.LogFields(log.String(\"event\", \"WroteRequest\"))\n\t}\n}\nUse local variable for span (#63)\/\/go:build go1.7\n\/\/ +build go1.7\n\npackage nethttp\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"net\/http\"\n\t\"net\/http\/httptrace\"\n\t\"net\/url\"\n\n\t\"github.com\/opentracing\/opentracing-go\"\n\t\"github.com\/opentracing\/opentracing-go\/ext\"\n\t\"github.com\/opentracing\/opentracing-go\/log\"\n)\n\ntype contextKey int\n\nconst (\n\tkeyTracer contextKey = iota\n)\n\nconst defaultComponentName = \"net\/http\"\n\n\/\/ Transport wraps a RoundTripper. If a request is being traced with\n\/\/ Tracer, Transport will inject the current span into the headers,\n\/\/ and set HTTP related tags on the span.\ntype Transport struct {\n\t\/\/ The actual RoundTripper to use for the request. A nil\n\t\/\/ RoundTripper defaults to http.DefaultTransport.\n\thttp.RoundTripper\n}\n\ntype clientOptions struct {\n\toperationName string\n\tcomponentName string\n\turlTagFunc func(u *url.URL) string\n\tdisableClientTrace bool\n\tdisableInjectSpanContext bool\n\tspanObserver func(span opentracing.Span, r *http.Request)\n}\n\n\/\/ ClientOption contols the behavior of TraceRequest.\ntype ClientOption func(*clientOptions)\n\n\/\/ OperationName returns a ClientOption that sets the operation\n\/\/ name for the client-side span.\nfunc OperationName(operationName string) ClientOption {\n\treturn func(options *clientOptions) {\n\t\toptions.operationName = operationName\n\t}\n}\n\n\/\/ URLTagFunc returns a ClientOption that uses given function f\n\/\/ to set the span's http.url tag. Can be used to change the default\n\/\/ http.url tag, eg to redact sensitive information.\nfunc URLTagFunc(f func(u *url.URL) string) ClientOption {\n\treturn func(options *clientOptions) {\n\t\toptions.urlTagFunc = f\n\t}\n}\n\n\/\/ ComponentName returns a ClientOption that sets the component\n\/\/ name for the client-side span.\nfunc ComponentName(componentName string) ClientOption {\n\treturn func(options *clientOptions) {\n\t\toptions.componentName = componentName\n\t}\n}\n\n\/\/ ClientTrace returns a ClientOption that turns on or off\n\/\/ extra instrumentation via httptrace.WithClientTrace.\nfunc ClientTrace(enabled bool) ClientOption {\n\treturn func(options *clientOptions) {\n\t\toptions.disableClientTrace = !enabled\n\t}\n}\n\n\/\/ InjectSpanContext returns a ClientOption that turns on or off\n\/\/ injection of the Span context in the request HTTP headers.\n\/\/ If this option is not used, the default behaviour is to\n\/\/ inject the span context.\nfunc InjectSpanContext(enabled bool) ClientOption {\n\treturn func(options *clientOptions) {\n\t\toptions.disableInjectSpanContext = !enabled\n\t}\n}\n\n\/\/ ClientSpanObserver returns a ClientOption that observes the span\n\/\/ for the client-side span.\nfunc ClientSpanObserver(f func(span opentracing.Span, r *http.Request)) ClientOption {\n\treturn func(options *clientOptions) {\n\t\toptions.spanObserver = f\n\t}\n}\n\n\/\/ TraceRequest adds a ClientTracer to req, tracing the request and\n\/\/ all requests caused due to redirects. When tracing requests this\n\/\/ way you must also use Transport.\n\/\/\n\/\/ Example:\n\/\/\n\/\/ \tfunc AskGoogle(ctx context.Context) error {\n\/\/ \t\tclient := &http.Client{Transport: &nethttp.Transport{}}\n\/\/ \t\treq, err := http.NewRequest(\"GET\", \"http:\/\/google.com\", nil)\n\/\/ \t\tif err != nil {\n\/\/ \t\t\treturn err\n\/\/ \t\t}\n\/\/ \t\treq = req.WithContext(ctx) \/\/ extend existing trace, if any\n\/\/\n\/\/ \t\treq, ht := nethttp.TraceRequest(tracer, req)\n\/\/ \t\tdefer ht.Finish()\n\/\/\n\/\/ \t\tres, err := client.Do(req)\n\/\/ \t\tif err != nil {\n\/\/ \t\t\treturn err\n\/\/ \t\t}\n\/\/ \t\tres.Body.Close()\n\/\/ \t\treturn nil\n\/\/ \t}\nfunc TraceRequest(tr opentracing.Tracer, req *http.Request, options ...ClientOption) (*http.Request, *Tracer) {\n\topts := &clientOptions{\n\t\turlTagFunc: func(u *url.URL) string {\n\t\t\treturn u.String()\n\t\t},\n\t\tspanObserver: func(_ opentracing.Span, _ *http.Request) {},\n\t}\n\tfor _, opt := range options {\n\t\topt(opts)\n\t}\n\tht := &Tracer{tr: tr, opts: opts}\n\tctx := req.Context()\n\tif !opts.disableClientTrace {\n\t\tctx = httptrace.WithClientTrace(ctx, ht.clientTrace())\n\t}\n\treq = req.WithContext(context.WithValue(ctx, keyTracer, ht))\n\treturn req, ht\n}\n\ntype closeTracker struct {\n\tio.ReadCloser\n\tsp opentracing.Span\n}\n\nfunc (c closeTracker) Close() error {\n\terr := c.ReadCloser.Close()\n\tc.sp.LogFields(log.String(\"event\", \"ClosedBody\"))\n\tc.sp.Finish()\n\treturn err\n}\n\ntype writerCloseTracker struct {\n\tio.ReadWriteCloser\n\tsp opentracing.Span\n}\n\nfunc (c writerCloseTracker) Close() error {\n\terr := c.ReadWriteCloser.Close()\n\tc.sp.LogFields(log.String(\"event\", \"ClosedBody\"))\n\tc.sp.Finish()\n\treturn err\n}\n\n\/\/ TracerFromRequest retrieves the Tracer from the request. If the request does\n\/\/ not have a Tracer it will return nil.\nfunc TracerFromRequest(req *http.Request) *Tracer {\n\ttr, ok := req.Context().Value(keyTracer).(*Tracer)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn tr\n}\n\n\/\/ RoundTrip implements the RoundTripper interface.\nfunc (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {\n\trt := t.RoundTripper\n\tif rt == nil {\n\t\trt = http.DefaultTransport\n\t}\n\ttracer := TracerFromRequest(req)\n\tif tracer == nil {\n\t\treturn rt.RoundTrip(req)\n\t}\n\n\tsp := tracer.start(req)\n\n\text.HTTPMethod.Set(sp, req.Method)\n\text.HTTPUrl.Set(sp, tracer.opts.urlTagFunc(req.URL))\n\text.PeerAddress.Set(sp, req.URL.Host)\n\ttracer.opts.spanObserver(sp, req)\n\n\tif !tracer.opts.disableInjectSpanContext {\n\t\tcarrier := opentracing.HTTPHeadersCarrier(req.Header)\n\t\tsp.Tracer().Inject(sp.Context(), opentracing.HTTPHeaders, carrier)\n\t}\n\n\tresp, err := rt.RoundTrip(req)\n\n\tif err != nil {\n\t\tsp.Finish()\n\t\treturn resp, err\n\t}\n\text.HTTPStatusCode.Set(sp, uint16(resp.StatusCode))\n\tif resp.StatusCode >= http.StatusInternalServerError {\n\t\text.Error.Set(sp, true)\n\t}\n\tif req.Method == \"HEAD\" {\n\t\tsp.Finish()\n\t} else {\n\t\treadWriteCloser, ok := resp.Body.(io.ReadWriteCloser)\n\t\tif ok {\n\t\t\tresp.Body = writerCloseTracker{readWriteCloser, sp}\n\t\t} else {\n\t\t\tresp.Body = closeTracker{resp.Body, sp}\n\t\t}\n\t}\n\treturn resp, nil\n}\n\n\/\/ Tracer holds tracing details for one HTTP request.\ntype Tracer struct {\n\ttr opentracing.Tracer\n\troot opentracing.Span\n\tsp opentracing.Span\n\topts *clientOptions\n}\n\nfunc (h *Tracer) start(req *http.Request) opentracing.Span {\n\tif h.root == nil {\n\t\tparent := opentracing.SpanFromContext(req.Context())\n\t\tvar spanctx opentracing.SpanContext\n\t\tif parent != nil {\n\t\t\tspanctx = parent.Context()\n\t\t}\n\t\toperationName := h.opts.operationName\n\t\tif operationName == \"\" {\n\t\t\toperationName = \"HTTP Client\"\n\t\t}\n\t\troot := h.tr.StartSpan(operationName, opentracing.ChildOf(spanctx))\n\t\th.root = root\n\t}\n\n\tctx := h.root.Context()\n\th.sp = h.tr.StartSpan(\"HTTP \"+req.Method, opentracing.ChildOf(ctx), ext.SpanKindRPCClient)\n\n\tcomponentName := h.opts.componentName\n\tif componentName == \"\" {\n\t\tcomponentName = defaultComponentName\n\t}\n\text.Component.Set(h.sp, componentName)\n\n\treturn h.sp\n}\n\n\/\/ Finish finishes the span of the traced request.\nfunc (h *Tracer) Finish() {\n\tif h.root != nil {\n\t\th.root.Finish()\n\t}\n}\n\n\/\/ Span returns the root span of the traced request. This function\n\/\/ should only be called after the request has been executed.\nfunc (h *Tracer) Span() opentracing.Span {\n\treturn h.root\n}\n\nfunc (h *Tracer) clientTrace() *httptrace.ClientTrace {\n\treturn &httptrace.ClientTrace{\n\t\tGetConn: h.getConn,\n\t\tGotConn: h.gotConn,\n\t\tPutIdleConn: h.putIdleConn,\n\t\tGotFirstResponseByte: h.gotFirstResponseByte,\n\t\tGot100Continue: h.got100Continue,\n\t\tDNSStart: h.dnsStart,\n\t\tDNSDone: h.dnsDone,\n\t\tConnectStart: h.connectStart,\n\t\tConnectDone: h.connectDone,\n\t\tWroteHeaders: h.wroteHeaders,\n\t\tWait100Continue: h.wait100Continue,\n\t\tWroteRequest: h.wroteRequest,\n\t}\n}\n\nfunc (h *Tracer) getConn(hostPort string) {\n\th.sp.LogFields(log.String(\"event\", \"GetConn\"), log.String(\"hostPort\", hostPort))\n}\n\nfunc (h *Tracer) gotConn(info httptrace.GotConnInfo) {\n\th.sp.SetTag(\"net\/http.reused\", info.Reused)\n\th.sp.SetTag(\"net\/http.was_idle\", info.WasIdle)\n\th.sp.LogFields(log.String(\"event\", \"GotConn\"))\n}\n\nfunc (h *Tracer) putIdleConn(error) {\n\th.sp.LogFields(log.String(\"event\", \"PutIdleConn\"))\n}\n\nfunc (h *Tracer) gotFirstResponseByte() {\n\th.sp.LogFields(log.String(\"event\", \"GotFirstResponseByte\"))\n}\n\nfunc (h *Tracer) got100Continue() {\n\th.sp.LogFields(log.String(\"event\", \"Got100Continue\"))\n}\n\nfunc (h *Tracer) dnsStart(info httptrace.DNSStartInfo) {\n\th.sp.LogFields(\n\t\tlog.String(\"event\", \"DNSStart\"),\n\t\tlog.String(\"host\", info.Host),\n\t)\n}\n\nfunc (h *Tracer) dnsDone(info httptrace.DNSDoneInfo) {\n\tfields := []log.Field{log.String(\"event\", \"DNSDone\")}\n\tfor _, addr := range info.Addrs {\n\t\tfields = append(fields, log.String(\"addr\", addr.String()))\n\t}\n\tif info.Err != nil {\n\t\tfields = append(fields, log.Error(info.Err))\n\t}\n\th.sp.LogFields(fields...)\n}\n\nfunc (h *Tracer) connectStart(network, addr string) {\n\th.sp.LogFields(\n\t\tlog.String(\"event\", \"ConnectStart\"),\n\t\tlog.String(\"network\", network),\n\t\tlog.String(\"addr\", addr),\n\t)\n}\n\nfunc (h *Tracer) connectDone(network, addr string, err error) {\n\tif err != nil {\n\t\th.sp.LogFields(\n\t\t\tlog.String(\"message\", \"ConnectDone\"),\n\t\t\tlog.String(\"network\", network),\n\t\t\tlog.String(\"addr\", addr),\n\t\t\tlog.String(\"event\", \"error\"),\n\t\t\tlog.Error(err),\n\t\t)\n\t} else {\n\t\th.sp.LogFields(\n\t\t\tlog.String(\"event\", \"ConnectDone\"),\n\t\t\tlog.String(\"network\", network),\n\t\t\tlog.String(\"addr\", addr),\n\t\t)\n\t}\n}\n\nfunc (h *Tracer) wroteHeaders() {\n\th.sp.LogFields(log.String(\"event\", \"WroteHeaders\"))\n}\n\nfunc (h *Tracer) wait100Continue() {\n\th.sp.LogFields(log.String(\"event\", \"Wait100Continue\"))\n}\n\nfunc (h *Tracer) wroteRequest(info httptrace.WroteRequestInfo) {\n\tif info.Err != nil {\n\t\th.sp.LogFields(\n\t\t\tlog.String(\"message\", \"WroteRequest\"),\n\t\t\tlog.String(\"event\", \"error\"),\n\t\t\tlog.Error(info.Err),\n\t\t)\n\t\text.Error.Set(h.sp, true)\n\t} else {\n\t\th.sp.LogFields(log.String(\"event\", \"WroteRequest\"))\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/buildkite\/buildkite-metrics\/backend\"\n\t\"github.com\/buildkite\/buildkite-metrics\/collector\"\n\t\"github.com\/eawsy\/aws-lambda-go\/service\/lambda\/runtime\"\n\t\"gopkg.in\/buildkite\/go-buildkite.v2\/buildkite\"\n)\n\nfunc handle(evt json.RawMessage, ctx *runtime.Context) (interface{}, error) {\n\torg := os.Getenv(\"BUILDKITE_ORG\")\n\ttoken := os.Getenv(\"BUILDKITE_TOKEN\")\n\tbackendOpt := os.Getenv(\"BUILDKITE_BACKEND\")\n\tqueue := os.Getenv(\"BUILDKITE_QUEUE\")\n\tquiet := os.Getenv(\"BUILDKITE_QUIET\")\n\n\tif quiet == \"1\" || quiet == \"false\" {\n\t\tlog.SetOutput(ioutil.Discard)\n\t}\n\n\tconfig, err := buildkite.NewTokenConfig(token, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := buildkite.NewClient(config.Client())\n\tt := time.Now()\n\n\tcol := collector.New(client, collector.Opts{\n\t\tOrgSlug: org,\n\t\tHistorical: time.Hour * 24,\n\t})\n\n\tif queue != \"\" {\n\t\tcol.Queue = queue\n\t}\n\n\tvar bk backend.Backend\n\tif backendOpt == \"statsd\" {\n\t\tbk, err = backend.NewStatsDBackend(os.Getenv(\"STATSD_HOST\"), strings.ToLower(os.Getenv(\"STATSD_TAGS\")) == \"true\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tbk = &backend.CloudWatchBackend{}\n\t}\n\n\tres, err := col.Collect()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres.Dump()\n\n\terr = bk.Collect(res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Printf(\"Finished in %s\", time.Now().Sub(t))\n\treturn \"\", nil\n}\n\nfunc init() {\n\truntime.HandleFunc(handle)\n}\nAdd retry for failed bk calls to lambdapackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/buildkite\/buildkite-metrics\/backend\"\n\t\"github.com\/buildkite\/buildkite-metrics\/collector\"\n\t\"github.com\/eawsy\/aws-lambda-go\/service\/lambda\/runtime\"\n\t\"gopkg.in\/buildkite\/go-buildkite.v2\/buildkite\"\n)\n\nfunc handle(evt json.RawMessage, ctx *runtime.Context) (interface{}, error) {\n\torg := os.Getenv(\"BUILDKITE_ORG\")\n\ttoken := os.Getenv(\"BUILDKITE_TOKEN\")\n\tbackendOpt := os.Getenv(\"BUILDKITE_BACKEND\")\n\tqueue := os.Getenv(\"BUILDKITE_QUEUE\")\n\tquiet := os.Getenv(\"BUILDKITE_QUIET\")\n\n\tif quiet == \"1\" || quiet == \"false\" {\n\t\tlog.SetOutput(ioutil.Discard)\n\t}\n\n\tconfig, err := buildkite.NewTokenConfig(token, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := buildkite.NewClient(config.Client())\n\tt := time.Now()\n\n\tcol := collector.New(client, collector.Opts{\n\t\tOrgSlug: org,\n\t\tHistorical: time.Hour * 24,\n\t})\n\n\tif queue != \"\" {\n\t\tcol.Queue = queue\n\t}\n\n\tvar bk backend.Backend\n\tif backendOpt == \"statsd\" {\n\t\tbk, err = backend.NewStatsDBackend(os.Getenv(\"STATSD_HOST\"), strings.ToLower(os.Getenv(\"STATSD_TAGS\")) == \"true\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tbk = &backend.CloudWatchBackend{}\n\t}\n\n\treturn \"\", retry(time.Minute, func() error {\n\t\tres, err := col.Collect()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tres.Dump()\n\n\t\terr = bk.Collect(res)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Printf(\"Finished in %s\", time.Now().Sub(t))\n\t\treturn nil\n\t})\n}\n\nfunc retry(timeout time.Duration, callback func() error) (err error) {\n\tt0 := time.Now()\n\ti := 0\n\tfor {\n\t\ti++\n\n\t\terr = callback()\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\n\t\tdelta := time.Now().Sub(t0)\n\t\tif delta > timeout {\n\t\t\treturn fmt.Errorf(\"after %d attempts (during %s), last error: %s\", i, delta, err)\n\t\t}\n\n\t\ttime.Sleep(time.Second * 2)\n\t\tlog.Println(\"retrying after error:\", err)\n\t}\n}\n\nfunc init() {\n\truntime.HandleFunc(handle)\n}\n<|endoftext|>"} {"text":"package peco\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/mattn\/go-runewidth\"\n\t\"github.com\/nsf\/termbox-go\"\n)\n\ntype Layout interface {\n\tClearStatus(time.Duration)\n\tPrintStatus(string)\n\tDrawScreen([]Match)\n}\n\n\/\/ Utility function\nfunc mergeAttribute(a, b termbox.Attribute) termbox.Attribute {\n\tif a&0x0F == 0 || b&0x0F == 0 {\n\t\treturn a | b\n\t} else {\n\t\treturn ((a - 1) | (b - 1)) + 1\n\t}\n}\n\n\/\/ Utility function\nfunc printScreen(x, y int, fg, bg termbox.Attribute, msg string, fill bool) {\n\tfor len(msg) > 0 {\n\t\tc, w := utf8.DecodeRuneInString(msg)\n\t\tif c == utf8.RuneError {\n\t\t\tc = '?'\n\t\t\tw = 1\n\t\t}\n\t\tmsg = msg[w:]\n\t\ttermbox.SetCell(x, y, c, fg, bg)\n\t\tx += runewidth.RuneWidth(c)\n\t}\n\n\tif !fill {\n\t\treturn\n\t}\n\n\twidth, _ := termbox.Size()\n\tfor ; x < width; x++ {\n\t\ttermbox.SetCell(x, y, ' ', fg, bg)\n\t}\n}\n\n\/\/ UserPrompt draws the prompt line\ntype UserPrompt struct {\n\t*Ctx\n\tprefix string\n\tprefixLen int\n}\n\nfunc NewUserPrompt(ctx *Ctx) *UserPrompt {\n\tprefix := ctx.config.Prompt\n\tif len(prefix) <= 0 { \/\/ default\n\t\tprefix = \"QUERY>\"\n\t}\n\tprefixLen := runewidth.StringWidth(prefix)\n\n\treturn &UserPrompt{\n\t\tCtx: ctx,\n\t\tprefix: prefix,\n\t\tprefixLen: prefixLen,\n\t}\n}\n\nfunc (u UserPrompt) Draw() {\n\t\/\/ print \"QUERY>\"\n\tprintScreen(0, 0, u.config.Style.BasicFG(), u.config.Style.BasicBG(), u.prefix, false)\n\n\tif u.caretPos <= 0 {\n\t\tu.caretPos = 0 \/\/ sanity\n\t}\n\n\tif u.caretPos > len(u.query) {\n\t\tu.caretPos = len(u.query)\n\t}\n\n\tif u.caretPos == len(u.query) {\n\t\t\/\/ the entire string + the caret after the string\n\t\tfg := u.config.Style.QueryFG()\n\t\tbg := u.config.Style.QueryBG()\n\t\tqs := string(u.query)\n\t\tql := runewidth.StringWidth(qs)\n\t\tprintScreen(u.prefixLen+1, 0, fg, bg, qs, false)\n\t\tprintScreen(u.prefixLen+1+ql, 0, fg|termbox.AttrReverse, bg|termbox.AttrReverse, \" \", false)\n\t\tprintScreen(u.prefixLen+1+ql+1, 0, fg, bg, \"\", true)\n\t} else {\n\t\t\/\/ the caret is in the middle of the string\n\t\tprev := 0\n\t\tfg := u.config.Style.QueryFG()\n\t\tbg := u.config.Style.QueryBG()\n\t\tfor i, r := range u.query {\n\t\t\tif i == u.caretPos {\n\t\t\t\tfg |= termbox.AttrReverse\n\t\t\t\tbg |= termbox.AttrReverse\n\t\t\t}\n\t\t\ttermbox.SetCell(u.prefixLen+1+prev, 0, r, fg, bg)\n\t\t\tprev += runewidth.RuneWidth(r)\n\t\t}\n\t}\n\n\twidth, _ := termbox.Size()\n\n\tpmsg := fmt.Sprintf(\"%s [%d\/%d]\", u.Matcher().String(), u.currentPage.index, u.maxPage)\n\tprintScreen(width-runewidth.StringWidth(pmsg), 0, u.config.Style.BasicFG(), u.config.Style.BasicBG(), pmsg, false)\n}\n\n\/\/ StatusBar draws the status message bar\ntype StatusBar struct {\n\t*Ctx\n\tclearTimer *time.Timer\n}\n\nfunc NewStatusBar(ctx *Ctx) *StatusBar {\n\treturn &StatusBar{\n\t\tctx,\n\t\tnil,\n\t}\n}\n\nfunc (s *StatusBar) stopTimer() {\n\tif t := s.clearTimer; t != nil {\n\t\tt.Stop()\n\t}\n}\n\nfunc (s *StatusBar) ClearStatus(d time.Duration) {\n\ts.stopTimer()\n\ts.clearTimer = time.AfterFunc(d, func() {\n\t\ts.PrintStatus(\"\")\n\t})\n}\n\nfunc (s *StatusBar) PrintStatus(msg string) {\n\ts.stopTimer()\n\n\tw, h := termbox.Size()\n\n\twidth := runewidth.StringWidth(msg)\n\tfor width > w {\n\t\t_, rw := utf8.DecodeRuneInString(msg)\n\t\twidth = width - rw\n\t\tmsg = msg[rw:]\n\t}\n\n\tvar pad []byte\n\tif w > width {\n\t\tpad = make([]byte, w-width)\n\t\tfor i := 0; i < w-width; i++ {\n\t\t\tpad[i] = ' '\n\t\t}\n\t}\n\n\tfgAttr := s.config.Style.BasicFG()\n\tbgAttr := s.config.Style.BasicBG()\n\n\tif w > width {\n\t\tprintScreen(0, h-2, fgAttr, bgAttr, string(pad), false)\n\t}\n\n\tif width > 0 {\n\t\tprintScreen(w-width, h-2, fgAttr|termbox.AttrReverse|termbox.AttrBold, bgAttr|termbox.AttrReverse, msg, false)\n\t}\n\ttermbox.Flush()\n}\n\ntype basicLayout struct {\n\t*Ctx\n\t*StatusBar\n\t*UserPrompt\n}\n\n\/\/ DefaultLayout implements the top-down layout\ntype DefaultLayout struct {\n\t*basicLayout\n}\ntype BottomUpLayout struct {\n\t*basicLayout\n}\n\nfunc NewDefaultLayout(ctx *Ctx) *DefaultLayout {\n\treturn &DefaultLayout{\n\t\t&basicLayout{\n\t\t\tCtx: ctx,\n\t\t\tStatusBar: NewStatusBar(ctx),\n\t\t\tUserPrompt: NewUserPrompt(ctx),\n\t\t},\n\t}\n}\n\nfunc (l *DefaultLayout) DrawScreen(targets []Match) {\n\tfgAttr := l.config.Style.BasicFG()\n\tbgAttr := l.config.Style.BasicBG()\n\n\tif err := termbox.Clear(fgAttr, bgAttr); err != nil {\n\t\treturn\n\t}\n\n\tif l.currentLine > len(targets) && len(targets) > 0 {\n\t\tl.currentLine = len(targets)\n\t}\n\n\t_, height := termbox.Size()\n\tperPage := height - 4\n\nCALCULATE_PAGE:\n\tcurrentPage := l.currentPage\n\tcurrentPage.index = ((l.currentLine - 1) \/ perPage) + 1\n\tif currentPage.index <= 0 {\n\t\tcurrentPage.index = 1\n\t}\n\tcurrentPage.offset = (currentPage.index - 1) * perPage\n\tcurrentPage.perPage = perPage\n\tif len(targets) == 0 {\n\t\tl.maxPage = 1\n\t} else {\n\t\tl.maxPage = ((len(targets) + perPage - 1) \/ perPage)\n\t}\n\n\tif l.maxPage < currentPage.index {\n\t\tif len(targets) == 0 && len(l.query) == 0 {\n\t\t\t\/\/ wait for targets\n\t\t\treturn\n\t\t}\n\t\tl.currentLine = currentPage.offset\n\t\tgoto CALCULATE_PAGE\n\t}\n\n\tl.UserPrompt.Draw()\n\n\tfor n := 1; n <= perPage; n++ {\n\t\tswitch {\n\t\tcase n+currentPage.offset == l.currentLine:\n\t\t\tfgAttr = l.config.Style.SelectedFG()\n\t\t\tbgAttr = l.config.Style.SelectedBG()\n\t\tcase l.selection.Has(n+currentPage.offset) || l.SelectedRange().Has(n+currentPage.offset):\n\t\t\tfgAttr = l.config.Style.SavedSelectionFG()\n\t\t\tbgAttr = l.config.Style.SavedSelectionBG()\n\t\tdefault:\n\t\t\tfgAttr = l.config.Style.BasicFG()\n\t\t\tbgAttr = l.config.Style.BasicBG()\n\t\t}\n\n\t\ttargetIdx := currentPage.offset + n - 1\n\t\tif targetIdx >= len(targets) {\n\t\t\tbreak\n\t\t}\n\n\t\ttarget := targets[targetIdx]\n\t\tline := target.Line()\n\t\tmatches := target.Indices()\n\t\tif matches == nil {\n\t\t\tprintScreen(0, n, fgAttr, bgAttr, line, true)\n\t\t} else {\n\t\t\tprev := 0\n\t\t\tindex := 0\n\t\t\tfor _, m := range matches {\n\t\t\t\tif m[0] > index {\n\t\t\t\t\tc := line[index:m[0]]\n\t\t\t\t\tprintScreen(prev, n, fgAttr, bgAttr, c, false)\n\t\t\t\t\tprev += runewidth.StringWidth(c)\n\t\t\t\t\tindex += len(c)\n\t\t\t\t}\n\t\t\t\tc := line[m[0]:m[1]]\n\t\t\t\tprintScreen(prev, n, l.config.Style.MatchedFG(), mergeAttribute(bgAttr, l.config.Style.MatchedBG()), c, true)\n\t\t\t\tprev += runewidth.StringWidth(c)\n\t\t\t\tindex += len(c)\n\t\t\t}\n\n\t\t\tm := matches[len(matches)-1]\n\t\t\tif m[0] > index {\n\t\t\t\tprintScreen(prev, n, l.config.Style.QueryFG(), mergeAttribute(bgAttr, l.config.Style.QueryBG()), line[m[0]:m[1]], true)\n\t\t\t} else if len(line) > m[1] {\n\t\t\t\tprintScreen(prev, n, fgAttr, bgAttr, line[m[1]:len(line)], true)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := termbox.Flush(); err != nil {\n\t\treturn\n\t}\n}\nrip out page calculationpackage peco\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/mattn\/go-runewidth\"\n\t\"github.com\/nsf\/termbox-go\"\n)\n\ntype Layout interface {\n\tClearStatus(time.Duration)\n\tPrintStatus(string)\n\tDrawScreen([]Match)\n}\n\n\/\/ Utility function\nfunc mergeAttribute(a, b termbox.Attribute) termbox.Attribute {\n\tif a&0x0F == 0 || b&0x0F == 0 {\n\t\treturn a | b\n\t} else {\n\t\treturn ((a - 1) | (b - 1)) + 1\n\t}\n}\n\n\/\/ Utility function\nfunc printScreen(x, y int, fg, bg termbox.Attribute, msg string, fill bool) {\n\tfor len(msg) > 0 {\n\t\tc, w := utf8.DecodeRuneInString(msg)\n\t\tif c == utf8.RuneError {\n\t\t\tc = '?'\n\t\t\tw = 1\n\t\t}\n\t\tmsg = msg[w:]\n\t\ttermbox.SetCell(x, y, c, fg, bg)\n\t\tx += runewidth.RuneWidth(c)\n\t}\n\n\tif !fill {\n\t\treturn\n\t}\n\n\twidth, _ := termbox.Size()\n\tfor ; x < width; x++ {\n\t\ttermbox.SetCell(x, y, ' ', fg, bg)\n\t}\n}\n\n\/\/ UserPrompt draws the prompt line\ntype UserPrompt struct {\n\t*Ctx\n\tprefix string\n\tprefixLen int\n}\n\nfunc NewUserPrompt(ctx *Ctx) *UserPrompt {\n\tprefix := ctx.config.Prompt\n\tif len(prefix) <= 0 { \/\/ default\n\t\tprefix = \"QUERY>\"\n\t}\n\tprefixLen := runewidth.StringWidth(prefix)\n\n\treturn &UserPrompt{\n\t\tCtx: ctx,\n\t\tprefix: prefix,\n\t\tprefixLen: prefixLen,\n\t}\n}\n\nfunc (u UserPrompt) Draw() {\n\t\/\/ print \"QUERY>\"\n\tprintScreen(0, 0, u.config.Style.BasicFG(), u.config.Style.BasicBG(), u.prefix, false)\n\n\tif u.caretPos <= 0 {\n\t\tu.caretPos = 0 \/\/ sanity\n\t}\n\n\tif u.caretPos > len(u.query) {\n\t\tu.caretPos = len(u.query)\n\t}\n\n\tif u.caretPos == len(u.query) {\n\t\t\/\/ the entire string + the caret after the string\n\t\tfg := u.config.Style.QueryFG()\n\t\tbg := u.config.Style.QueryBG()\n\t\tqs := string(u.query)\n\t\tql := runewidth.StringWidth(qs)\n\t\tprintScreen(u.prefixLen+1, 0, fg, bg, qs, false)\n\t\tprintScreen(u.prefixLen+1+ql, 0, fg|termbox.AttrReverse, bg|termbox.AttrReverse, \" \", false)\n\t\tprintScreen(u.prefixLen+1+ql+1, 0, fg, bg, \"\", true)\n\t} else {\n\t\t\/\/ the caret is in the middle of the string\n\t\tprev := 0\n\t\tfg := u.config.Style.QueryFG()\n\t\tbg := u.config.Style.QueryBG()\n\t\tfor i, r := range u.query {\n\t\t\tif i == u.caretPos {\n\t\t\t\tfg |= termbox.AttrReverse\n\t\t\t\tbg |= termbox.AttrReverse\n\t\t\t}\n\t\t\ttermbox.SetCell(u.prefixLen+1+prev, 0, r, fg, bg)\n\t\t\tprev += runewidth.RuneWidth(r)\n\t\t}\n\t}\n\n\twidth, _ := termbox.Size()\n\n\tpmsg := fmt.Sprintf(\"%s [%d\/%d]\", u.Matcher().String(), u.currentPage.index, u.maxPage)\n\tprintScreen(width-runewidth.StringWidth(pmsg), 0, u.config.Style.BasicFG(), u.config.Style.BasicBG(), pmsg, false)\n}\n\n\/\/ StatusBar draws the status message bar\ntype StatusBar struct {\n\t*Ctx\n\tclearTimer *time.Timer\n}\n\nfunc NewStatusBar(ctx *Ctx) *StatusBar {\n\treturn &StatusBar{\n\t\tctx,\n\t\tnil,\n\t}\n}\n\nfunc (s *StatusBar) stopTimer() {\n\tif t := s.clearTimer; t != nil {\n\t\tt.Stop()\n\t}\n}\n\nfunc (s *StatusBar) ClearStatus(d time.Duration) {\n\ts.stopTimer()\n\ts.clearTimer = time.AfterFunc(d, func() {\n\t\ts.PrintStatus(\"\")\n\t})\n}\n\nfunc (s *StatusBar) PrintStatus(msg string) {\n\ts.stopTimer()\n\n\tw, h := termbox.Size()\n\n\twidth := runewidth.StringWidth(msg)\n\tfor width > w {\n\t\t_, rw := utf8.DecodeRuneInString(msg)\n\t\twidth = width - rw\n\t\tmsg = msg[rw:]\n\t}\n\n\tvar pad []byte\n\tif w > width {\n\t\tpad = make([]byte, w-width)\n\t\tfor i := 0; i < w-width; i++ {\n\t\t\tpad[i] = ' '\n\t\t}\n\t}\n\n\tfgAttr := s.config.Style.BasicFG()\n\tbgAttr := s.config.Style.BasicBG()\n\n\tif w > width {\n\t\tprintScreen(0, h-2, fgAttr, bgAttr, string(pad), false)\n\t}\n\n\tif width > 0 {\n\t\tprintScreen(w-width, h-2, fgAttr|termbox.AttrReverse|termbox.AttrBold, bgAttr|termbox.AttrReverse, msg, false)\n\t}\n\ttermbox.Flush()\n}\n\ntype basicLayout struct {\n\t*Ctx\n\t*StatusBar\n\t*UserPrompt\n}\n\n\/\/ DefaultLayout implements the top-down layout\ntype DefaultLayout struct {\n\t*basicLayout\n}\ntype BottomUpLayout struct {\n\t*basicLayout\n}\n\nfunc NewDefaultLayout(ctx *Ctx) *DefaultLayout {\n\treturn &DefaultLayout{\n\t\t&basicLayout{\n\t\t\tCtx: ctx,\n\t\t\tStatusBar: NewStatusBar(ctx),\n\t\t\tUserPrompt: NewUserPrompt(ctx),\n\t\t},\n\t}\n}\n\nfunc (l *DefaultLayout) CalculatePage(targets []Match, perPage int) error {\nCALCULATE_PAGE:\n\tcurrentPage := l.currentPage\n\tcurrentPage.index = ((l.currentLine - 1) \/ perPage) + 1\n\tif currentPage.index <= 0 {\n\t\tcurrentPage.index = 1\n\t}\n\tcurrentPage.offset = (currentPage.index - 1) * perPage\n\tcurrentPage.perPage = perPage\n\tif len(targets) == 0 {\n\t\tl.maxPage = 1\n\t} else {\n\t\tl.maxPage = ((len(targets) + perPage - 1) \/ perPage)\n\t}\n\n\tif l.maxPage < currentPage.index {\n\t\tif len(targets) == 0 && len(l.query) == 0 {\n\t\t\t\/\/ wait for targets\n\t\t\treturn fmt.Errorf(\"no targets or query. nothing to do\")\n\t\t}\n\t\tl.currentLine = currentPage.offset\n\t\tgoto CALCULATE_PAGE\n\t}\n\n\treturn nil\n}\n\nfunc (l *DefaultLayout) DrawScreen(targets []Match) {\n\tfgAttr := l.config.Style.BasicFG()\n\tbgAttr := l.config.Style.BasicBG()\n\n\tif err := termbox.Clear(fgAttr, bgAttr); err != nil {\n\t\treturn\n\t}\n\n\tif l.currentLine > len(targets) && len(targets) > 0 {\n\t\tl.currentLine = len(targets)\n\t}\n\n\t_, height := termbox.Size()\n\tperPage := height - 4\n\n\tif err := l.CalculatePage(targets, perPage); err != nil {\n\t\treturn\n\t}\n\n\tl.UserPrompt.Draw()\n\tcurrentPage := l.currentPage\n\n\tfor n := 1; n <= perPage; n++ {\n\t\tswitch {\n\t\tcase n+currentPage.offset == l.currentLine:\n\t\t\tfgAttr = l.config.Style.SelectedFG()\n\t\t\tbgAttr = l.config.Style.SelectedBG()\n\t\tcase l.selection.Has(n+currentPage.offset) || l.SelectedRange().Has(n+currentPage.offset):\n\t\t\tfgAttr = l.config.Style.SavedSelectionFG()\n\t\t\tbgAttr = l.config.Style.SavedSelectionBG()\n\t\tdefault:\n\t\t\tfgAttr = l.config.Style.BasicFG()\n\t\t\tbgAttr = l.config.Style.BasicBG()\n\t\t}\n\n\t\ttargetIdx := currentPage.offset + n - 1\n\t\tif targetIdx >= len(targets) {\n\t\t\tbreak\n\t\t}\n\n\t\ttarget := targets[targetIdx]\n\t\tline := target.Line()\n\t\tmatches := target.Indices()\n\t\tif matches == nil {\n\t\t\tprintScreen(0, n, fgAttr, bgAttr, line, true)\n\t\t} else {\n\t\t\tprev := 0\n\t\t\tindex := 0\n\t\t\tfor _, m := range matches {\n\t\t\t\tif m[0] > index {\n\t\t\t\t\tc := line[index:m[0]]\n\t\t\t\t\tprintScreen(prev, n, fgAttr, bgAttr, c, false)\n\t\t\t\t\tprev += runewidth.StringWidth(c)\n\t\t\t\t\tindex += len(c)\n\t\t\t\t}\n\t\t\t\tc := line[m[0]:m[1]]\n\t\t\t\tprintScreen(prev, n, l.config.Style.MatchedFG(), mergeAttribute(bgAttr, l.config.Style.MatchedBG()), c, true)\n\t\t\t\tprev += runewidth.StringWidth(c)\n\t\t\t\tindex += len(c)\n\t\t\t}\n\n\t\t\tm := matches[len(matches)-1]\n\t\t\tif m[0] > index {\n\t\t\t\tprintScreen(prev, n, l.config.Style.QueryFG(), mergeAttribute(bgAttr, l.config.Style.QueryBG()), line[m[0]:m[1]], true)\n\t\t\t} else if len(line) > m[1] {\n\t\t\t\tprintScreen(prev, n, fgAttr, bgAttr, line[m[1]:len(line)], true)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := termbox.Flush(); err != nil {\n\t\treturn\n\t}\n}\n<|endoftext|>"} {"text":"\/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n * * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and\/or other materials provided with the\n * distribution.\n * * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n *\/\n\npackage grpc\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\/credentials\"\n\t\"google.golang.org\/grpc\/grpclog\"\n\t\"google.golang.org\/grpc\/transport\"\n)\n\nvar (\n\t\/\/ ErrUnspecTarget indicates that the target address is unspecified.\n\tErrUnspecTarget = errors.New(\"grpc: target is unspecified\")\n\t\/\/ ErrNoTransportSecurity indicates that there is no transport security\n\t\/\/ being set for ClientConn. Users should either set one or explicityly\n\t\/\/ call WithInsecure DialOption to disable security.\n\tErrNoTransportSecurity = errors.New(\"grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)\")\n\t\/\/ ErrCredentialsMisuse indicates that users want to transmit security infomation\n\t\/\/ (e.g., oauth2 token) which requires secure connection on an insecure\n\t\/\/ connection.\n\tErrCredentialsMisuse = errors.New(\"grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)\")\n\t\/\/ ErrClientConnClosing indicates that the operation is illegal because\n\t\/\/ the session is closing.\n\tErrClientConnClosing = errors.New(\"grpc: the client connection is closing\")\n\t\/\/ ErrClientConnTimeout indicates that the connection could not be\n\t\/\/ established or re-established within the specified timeout.\n\tErrClientConnTimeout = errors.New(\"grpc: timed out trying to connect\")\n\t\/\/ minimum time to give a connection to complete\n\tminConnectTimeout = 20 * time.Second\n)\n\n\/\/ dialOptions configure a Dial call. dialOptions are set by the DialOption\n\/\/ values passed to Dial.\ntype dialOptions struct {\n\tcodec Codec\n\tblock bool\n\tinsecure bool\n\tcopts transport.ConnectOptions\n}\n\n\/\/ DialOption configures how we set up the connection.\ntype DialOption func(*dialOptions)\n\n\/\/ WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.\nfunc WithCodec(c Codec) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.codec = c\n\t}\n}\n\n\/\/ WithBlock returns a DialOption which makes caller of Dial blocks until the underlying\n\/\/ connection is up. Without this, Dial returns immediately and connecting the server\n\/\/ happens in background.\nfunc WithBlock() DialOption {\n\treturn func(o *dialOptions) {\n\t\to.block = true\n\t}\n}\n\nfunc WithInsecure() DialOption {\n\treturn func(o *dialOptions) {\n\t\to.insecure = true\n\t}\n}\n\n\/\/ WithTransportCredentials returns a DialOption which configures a\n\/\/ connection level security credentials (e.g., TLS\/SSL).\nfunc WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.copts.AuthOptions = append(o.copts.AuthOptions, creds)\n\t}\n}\n\n\/\/ WithPerRPCCredentials returns a DialOption which sets\n\/\/ credentials which will place auth state on each outbound RPC.\nfunc WithPerRPCCredentials(creds credentials.Credentials) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.copts.AuthOptions = append(o.copts.AuthOptions, creds)\n\t}\n}\n\n\/\/ WithTimeout returns a DialOption that configures a timeout for dialing a client connection.\nfunc WithTimeout(d time.Duration) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.copts.Timeout = d\n\t}\n}\n\n\/\/ WithDialer returns a DialOption that specifies a function to use for dialing network addresses.\nfunc WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.copts.Dialer = f\n\t}\n}\n\n\/\/ WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.\nfunc WithUserAgent(s string) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.copts.UserAgent = s\n\t}\n}\n\n\/\/ Dial creates a client connection the given target.\nfunc Dial(target string, opts ...DialOption) (*ClientConn, error) {\n\tif target == \"\" {\n\t\treturn nil, ErrUnspecTarget\n\t}\n\tcc := &ClientConn{\n\t\ttarget: target,\n\t\tshutdownChan: make(chan struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(&cc.dopts)\n\t}\n\tif !cc.dopts.insecure {\n\t\tvar ok bool\n\t\tfor _, c := range cc.dopts.copts.AuthOptions {\n\t\t\tif _, ok := c.(credentials.TransportAuthenticator); !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tok = true\n\t\t}\n\t\tif !ok {\n\t\t\treturn nil, ErrNoTransportSecurity\n\t\t}\n\t} else {\n\t\tfor _, c := range cc.dopts.copts.AuthOptions {\n\t\t\tif c.RequireTransportSecurity() {\n\t\t\t\treturn nil, ErrCredentialsMisuse\n\t\t\t}\n\t\t}\n\t}\n\tcolonPos := strings.LastIndex(target, \":\")\n\tif colonPos == -1 {\n\t\tcolonPos = len(target)\n\t}\n\tcc.authority = target[:colonPos]\n\tif cc.dopts.codec == nil {\n\t\t\/\/ Set the default codec.\n\t\tcc.dopts.codec = protoCodec{}\n\t}\n\tcc.stateCV = sync.NewCond(&cc.mu)\n\tif cc.dopts.block {\n\t\tif err := cc.resetTransport(false); err != nil {\n\t\t\tcc.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ Start to monitor the error status of transport.\n\t\tgo cc.transportMonitor()\n\t} else {\n\t\t\/\/ Start a goroutine connecting to the server asynchronously.\n\t\tgo func() {\n\t\t\tif err := cc.resetTransport(false); err != nil {\n\t\t\t\tgrpclog.Printf(\"Failed to dial %s: %v; please retry.\", target, err)\n\t\t\t\tcc.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgo cc.transportMonitor()\n\t\t}()\n\t}\n\treturn cc, nil\n}\n\n\/\/ ConnectivityState indicates the state of a client connection.\ntype ConnectivityState int\n\nconst (\n\t\/\/ Idle indicates the ClientConn is idle.\n\tIdle ConnectivityState = iota\n\t\/\/ Connecting indicates the ClienConn is connecting.\n\tConnecting\n\t\/\/ Ready indicates the ClientConn is ready for work.\n\tReady\n\t\/\/ TransientFailure indicates the ClientConn has seen a failure but expects to recover.\n\tTransientFailure\n\t\/\/ Shutdown indicates the ClientConn has stated shutting down.\n\tShutdown\n)\n\nfunc (s ConnectivityState) String() string {\n\tswitch s {\n\tcase Idle:\n\t\treturn \"IDLE\"\n\tcase Connecting:\n\t\treturn \"CONNECTING\"\n\tcase Ready:\n\t\treturn \"READY\"\n\tcase TransientFailure:\n\t\treturn \"TRANSIENT_FAILURE\"\n\tcase Shutdown:\n\t\treturn \"SHUTDOWN\"\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown connectivity state: %d\", s))\n\t}\n}\n\n\/\/ ClientConn represents a client connection to an RPC service.\ntype ClientConn struct {\n\ttarget string\n\tauthority string\n\tdopts dialOptions\n\tshutdownChan chan struct{}\n\n\tmu sync.Mutex\n\tstate ConnectivityState\n\tstateCV *sync.Cond\n\t\/\/ ready is closed and becomes nil when a new transport is up or failed\n\t\/\/ due to timeout.\n\tready chan struct{}\n\t\/\/ Every time a new transport is created, this is incremented by 1. Used\n\t\/\/ to avoid trying to recreate a transport while the new one is already\n\t\/\/ under construction.\n\ttransportSeq int\n\ttransport transport.ClientTransport\n}\n\n\/\/ State returns the connectivity state of the ClientConn\nfunc (cc *ClientConn) State() ConnectivityState {\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\treturn cc.state\n}\n\n\/\/ WaitForStateChange blocks until the state changes to something other than the sourceState\n\/\/ or timeout fires. It returns false if timeout fires and true otherwise.\nfunc (cc *ClientConn) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool {\n\tstart := time.Now()\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\tif sourceState != cc.state {\n\t\treturn true\n\t}\n\texpired := timeout <= time.Since(start)\n\tif expired {\n\t\treturn false\n\t}\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-time.After(timeout - time.Since(start)):\n\t\t\tcc.mu.Lock()\n\t\t\texpired = true\n\t\t\tcc.stateCV.Broadcast()\n\t\t\tcc.mu.Unlock()\n\t\tcase <-done:\n\t\t}\n\t}()\n\tdefer close(done)\n\tfor sourceState == cc.state {\n\t\tcc.stateCV.Wait()\n\t\tif expired {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (cc *ClientConn) resetTransport(closeTransport bool) error {\n\tvar retries int\n\tstart := time.Now()\n\tfor {\n\t\tcc.mu.Lock()\n\t\tcc.state = Connecting\n\t\tcc.stateCV.Broadcast()\n\t\tt := cc.transport\n\t\tts := cc.transportSeq\n\t\t\/\/ Avoid wait() picking up a dying transport unnecessarily.\n\t\tcc.transportSeq = 0\n\t\tif cc.state == Shutdown {\n\t\t\tcc.mu.Unlock()\n\t\t\treturn ErrClientConnClosing\n\t\t}\n\t\tcc.mu.Unlock()\n\t\tif closeTransport {\n\t\t\tt.Close()\n\t\t}\n\t\t\/\/ Adjust timeout for the current try.\n\t\tcopts := cc.dopts.copts\n\t\tif copts.Timeout < 0 {\n\t\t\tcc.Close()\n\t\t\treturn ErrClientConnTimeout\n\t\t}\n\t\tif copts.Timeout > 0 {\n\t\t\tcopts.Timeout -= time.Since(start)\n\t\t\tif copts.Timeout <= 0 {\n\t\t\t\tcc.Close()\n\t\t\t\treturn ErrClientConnTimeout\n\t\t\t}\n\t\t}\n\t\tsleepTime := backoff(retries)\n\t\ttimeout := sleepTime\n\t\tif timeout < minConnectTimeout {\n\t\t\ttimeout = minConnectTimeout\n\t\t}\n\t\tif copts.Timeout == 0 || copts.Timeout > timeout {\n\t\t\tcopts.Timeout = timeout\n\t\t}\n\t\tconnectTime := time.Now()\n\t\tnewTransport, err := transport.NewClientTransport(cc.target, &copts)\n\t\tif err != nil {\n\t\t\tcc.mu.Lock()\n\t\t\tcc.state = TransientFailure\n\t\t\tcc.stateCV.Broadcast()\n\t\t\tcc.mu.Unlock()\n\t\t\tsleepTime -= time.Since(connectTime)\n\t\t\tif sleepTime < 0 {\n\t\t\t\tsleepTime = 0\n\t\t\t}\n\t\t\t\/\/ Fail early before falling into sleep.\n\t\t\tif cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) {\n\t\t\t\tcc.Close()\n\t\t\t\treturn ErrClientConnTimeout\n\t\t\t}\n\t\t\tcloseTransport = false\n\t\t\ttime.Sleep(sleepTime)\n\t\t\tretries++\n\t\t\tgrpclog.Printf(\"grpc: ClientConn.resetTransport failed to create client transport: %v; Reconnecting to %q\", err, cc.target)\n\t\t\tcontinue\n\t\t}\n\t\tcc.mu.Lock()\n\t\tif cc.state == Shutdown {\n\t\t\t\/\/ cc.Close() has been invoked.\n\t\t\tcc.mu.Unlock()\n\t\t\tnewTransport.Close()\n\t\t\treturn ErrClientConnClosing\n\t\t}\n\t\tcc.state = Ready\n\t\tcc.stateCV.Broadcast()\n\t\tcc.transport = newTransport\n\t\tcc.transportSeq = ts + 1\n\t\tif cc.ready != nil {\n\t\t\tclose(cc.ready)\n\t\t\tcc.ready = nil\n\t\t}\n\t\tcc.mu.Unlock()\n\t\treturn nil\n\t}\n}\n\n\/\/ Run in a goroutine to track the error in transport and create the\n\/\/ new transport if an error happens. It returns when the channel is closing.\nfunc (cc *ClientConn) transportMonitor() {\n\tfor {\n\t\tselect {\n\t\t\/\/ shutdownChan is needed to detect the teardown when\n\t\t\/\/ the ClientConn is idle (i.e., no RPC in flight).\n\t\tcase <-cc.shutdownChan:\n\t\t\treturn\n\t\tcase <-cc.transport.Error():\n\t\t\tcc.mu.Lock()\n\t\t\tcc.state = TransientFailure\n\t\t\tcc.stateCV.Broadcast()\n\t\t\tcc.mu.Unlock()\n\t\t\tif err := cc.resetTransport(true); err != nil {\n\t\t\t\t\/\/ The ClientConn is closing.\n\t\t\t\tgrpclog.Printf(\"grpc: ClientConn.transportMonitor exits due to: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\n\/\/ When wait returns, either the new transport is up or ClientConn is\n\/\/ closing. Used to avoid working on a dying transport. It updates and\n\/\/ returns the transport and its version when there is no error.\nfunc (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTransport, int, error) {\n\tfor {\n\t\tcc.mu.Lock()\n\t\tswitch {\n\t\tcase cc.state == Shutdown:\n\t\t\tcc.mu.Unlock()\n\t\t\treturn nil, 0, ErrClientConnClosing\n\t\tcase ts < cc.transportSeq:\n\t\t\t\/\/ Worked on a dying transport. Try the new one immediately.\n\t\t\tdefer cc.mu.Unlock()\n\t\t\treturn cc.transport, cc.transportSeq, nil\n\t\tdefault:\n\t\t\tready := cc.ready\n\t\t\tif ready == nil {\n\t\t\t\tready = make(chan struct{})\n\t\t\t\tcc.ready = ready\n\t\t\t}\n\t\t\tcc.mu.Unlock()\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn nil, 0, transport.ContextErr(ctx.Err())\n\t\t\t\/\/ Wait until the new transport is ready or failed.\n\t\t\tcase <-ready:\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Close starts to tear down the ClientConn. Returns ErrClientConnClosing if\n\/\/ it has been closed (mostly due to dial time-out).\n\/\/ TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in\n\/\/ some edge cases (e.g., the caller opens and closes many ClientConn's in a\n\/\/ tight loop.\nfunc (cc *ClientConn) Close() error {\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\tif cc.state == Shutdown {\n\t\treturn ErrClientConnClosing\n\t}\n\tcc.state = Shutdown\n\tcc.stateCV.Broadcast()\n\tif cc.ready != nil {\n\t\tclose(cc.ready)\n\t\tcc.ready = nil\n\t}\n\tif cc.transport != nil {\n\t\tcc.transport.Close()\n\t}\n\tif cc.shutdownChan != nil {\n\t\tclose(cc.shutdownChan)\n\t}\n\treturn nil\n}\ns\/stated\/started\/ in Shutdown doc\/*\n *\n * Copyright 2014, Google Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n * * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following disclaimer\n * in the documentation and\/or other materials provided with the\n * distribution.\n * * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n *\/\n\npackage grpc\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\/credentials\"\n\t\"google.golang.org\/grpc\/grpclog\"\n\t\"google.golang.org\/grpc\/transport\"\n)\n\nvar (\n\t\/\/ ErrUnspecTarget indicates that the target address is unspecified.\n\tErrUnspecTarget = errors.New(\"grpc: target is unspecified\")\n\t\/\/ ErrNoTransportSecurity indicates that there is no transport security\n\t\/\/ being set for ClientConn. Users should either set one or explicityly\n\t\/\/ call WithInsecure DialOption to disable security.\n\tErrNoTransportSecurity = errors.New(\"grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)\")\n\t\/\/ ErrCredentialsMisuse indicates that users want to transmit security infomation\n\t\/\/ (e.g., oauth2 token) which requires secure connection on an insecure\n\t\/\/ connection.\n\tErrCredentialsMisuse = errors.New(\"grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)\")\n\t\/\/ ErrClientConnClosing indicates that the operation is illegal because\n\t\/\/ the session is closing.\n\tErrClientConnClosing = errors.New(\"grpc: the client connection is closing\")\n\t\/\/ ErrClientConnTimeout indicates that the connection could not be\n\t\/\/ established or re-established within the specified timeout.\n\tErrClientConnTimeout = errors.New(\"grpc: timed out trying to connect\")\n\t\/\/ minimum time to give a connection to complete\n\tminConnectTimeout = 20 * time.Second\n)\n\n\/\/ dialOptions configure a Dial call. dialOptions are set by the DialOption\n\/\/ values passed to Dial.\ntype dialOptions struct {\n\tcodec Codec\n\tblock bool\n\tinsecure bool\n\tcopts transport.ConnectOptions\n}\n\n\/\/ DialOption configures how we set up the connection.\ntype DialOption func(*dialOptions)\n\n\/\/ WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.\nfunc WithCodec(c Codec) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.codec = c\n\t}\n}\n\n\/\/ WithBlock returns a DialOption which makes caller of Dial blocks until the underlying\n\/\/ connection is up. Without this, Dial returns immediately and connecting the server\n\/\/ happens in background.\nfunc WithBlock() DialOption {\n\treturn func(o *dialOptions) {\n\t\to.block = true\n\t}\n}\n\nfunc WithInsecure() DialOption {\n\treturn func(o *dialOptions) {\n\t\to.insecure = true\n\t}\n}\n\n\/\/ WithTransportCredentials returns a DialOption which configures a\n\/\/ connection level security credentials (e.g., TLS\/SSL).\nfunc WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.copts.AuthOptions = append(o.copts.AuthOptions, creds)\n\t}\n}\n\n\/\/ WithPerRPCCredentials returns a DialOption which sets\n\/\/ credentials which will place auth state on each outbound RPC.\nfunc WithPerRPCCredentials(creds credentials.Credentials) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.copts.AuthOptions = append(o.copts.AuthOptions, creds)\n\t}\n}\n\n\/\/ WithTimeout returns a DialOption that configures a timeout for dialing a client connection.\nfunc WithTimeout(d time.Duration) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.copts.Timeout = d\n\t}\n}\n\n\/\/ WithDialer returns a DialOption that specifies a function to use for dialing network addresses.\nfunc WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.copts.Dialer = f\n\t}\n}\n\n\/\/ WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.\nfunc WithUserAgent(s string) DialOption {\n\treturn func(o *dialOptions) {\n\t\to.copts.UserAgent = s\n\t}\n}\n\n\/\/ Dial creates a client connection the given target.\nfunc Dial(target string, opts ...DialOption) (*ClientConn, error) {\n\tif target == \"\" {\n\t\treturn nil, ErrUnspecTarget\n\t}\n\tcc := &ClientConn{\n\t\ttarget: target,\n\t\tshutdownChan: make(chan struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(&cc.dopts)\n\t}\n\tif !cc.dopts.insecure {\n\t\tvar ok bool\n\t\tfor _, c := range cc.dopts.copts.AuthOptions {\n\t\t\tif _, ok := c.(credentials.TransportAuthenticator); !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tok = true\n\t\t}\n\t\tif !ok {\n\t\t\treturn nil, ErrNoTransportSecurity\n\t\t}\n\t} else {\n\t\tfor _, c := range cc.dopts.copts.AuthOptions {\n\t\t\tif c.RequireTransportSecurity() {\n\t\t\t\treturn nil, ErrCredentialsMisuse\n\t\t\t}\n\t\t}\n\t}\n\tcolonPos := strings.LastIndex(target, \":\")\n\tif colonPos == -1 {\n\t\tcolonPos = len(target)\n\t}\n\tcc.authority = target[:colonPos]\n\tif cc.dopts.codec == nil {\n\t\t\/\/ Set the default codec.\n\t\tcc.dopts.codec = protoCodec{}\n\t}\n\tcc.stateCV = sync.NewCond(&cc.mu)\n\tif cc.dopts.block {\n\t\tif err := cc.resetTransport(false); err != nil {\n\t\t\tcc.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ Start to monitor the error status of transport.\n\t\tgo cc.transportMonitor()\n\t} else {\n\t\t\/\/ Start a goroutine connecting to the server asynchronously.\n\t\tgo func() {\n\t\t\tif err := cc.resetTransport(false); err != nil {\n\t\t\t\tgrpclog.Printf(\"Failed to dial %s: %v; please retry.\", target, err)\n\t\t\t\tcc.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgo cc.transportMonitor()\n\t\t}()\n\t}\n\treturn cc, nil\n}\n\n\/\/ ConnectivityState indicates the state of a client connection.\ntype ConnectivityState int\n\nconst (\n\t\/\/ Idle indicates the ClientConn is idle.\n\tIdle ConnectivityState = iota\n\t\/\/ Connecting indicates the ClienConn is connecting.\n\tConnecting\n\t\/\/ Ready indicates the ClientConn is ready for work.\n\tReady\n\t\/\/ TransientFailure indicates the ClientConn has seen a failure but expects to recover.\n\tTransientFailure\n\t\/\/ Shutdown indicates the ClientConn has started shutting down.\n\tShutdown\n)\n\nfunc (s ConnectivityState) String() string {\n\tswitch s {\n\tcase Idle:\n\t\treturn \"IDLE\"\n\tcase Connecting:\n\t\treturn \"CONNECTING\"\n\tcase Ready:\n\t\treturn \"READY\"\n\tcase TransientFailure:\n\t\treturn \"TRANSIENT_FAILURE\"\n\tcase Shutdown:\n\t\treturn \"SHUTDOWN\"\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown connectivity state: %d\", s))\n\t}\n}\n\n\/\/ ClientConn represents a client connection to an RPC service.\ntype ClientConn struct {\n\ttarget string\n\tauthority string\n\tdopts dialOptions\n\tshutdownChan chan struct{}\n\n\tmu sync.Mutex\n\tstate ConnectivityState\n\tstateCV *sync.Cond\n\t\/\/ ready is closed and becomes nil when a new transport is up or failed\n\t\/\/ due to timeout.\n\tready chan struct{}\n\t\/\/ Every time a new transport is created, this is incremented by 1. Used\n\t\/\/ to avoid trying to recreate a transport while the new one is already\n\t\/\/ under construction.\n\ttransportSeq int\n\ttransport transport.ClientTransport\n}\n\n\/\/ State returns the connectivity state of the ClientConn\nfunc (cc *ClientConn) State() ConnectivityState {\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\treturn cc.state\n}\n\n\/\/ WaitForStateChange blocks until the state changes to something other than the sourceState\n\/\/ or timeout fires. It returns false if timeout fires and true otherwise.\nfunc (cc *ClientConn) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool {\n\tstart := time.Now()\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\tif sourceState != cc.state {\n\t\treturn true\n\t}\n\texpired := timeout <= time.Since(start)\n\tif expired {\n\t\treturn false\n\t}\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-time.After(timeout - time.Since(start)):\n\t\t\tcc.mu.Lock()\n\t\t\texpired = true\n\t\t\tcc.stateCV.Broadcast()\n\t\t\tcc.mu.Unlock()\n\t\tcase <-done:\n\t\t}\n\t}()\n\tdefer close(done)\n\tfor sourceState == cc.state {\n\t\tcc.stateCV.Wait()\n\t\tif expired {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (cc *ClientConn) resetTransport(closeTransport bool) error {\n\tvar retries int\n\tstart := time.Now()\n\tfor {\n\t\tcc.mu.Lock()\n\t\tcc.state = Connecting\n\t\tcc.stateCV.Broadcast()\n\t\tt := cc.transport\n\t\tts := cc.transportSeq\n\t\t\/\/ Avoid wait() picking up a dying transport unnecessarily.\n\t\tcc.transportSeq = 0\n\t\tif cc.state == Shutdown {\n\t\t\tcc.mu.Unlock()\n\t\t\treturn ErrClientConnClosing\n\t\t}\n\t\tcc.mu.Unlock()\n\t\tif closeTransport {\n\t\t\tt.Close()\n\t\t}\n\t\t\/\/ Adjust timeout for the current try.\n\t\tcopts := cc.dopts.copts\n\t\tif copts.Timeout < 0 {\n\t\t\tcc.Close()\n\t\t\treturn ErrClientConnTimeout\n\t\t}\n\t\tif copts.Timeout > 0 {\n\t\t\tcopts.Timeout -= time.Since(start)\n\t\t\tif copts.Timeout <= 0 {\n\t\t\t\tcc.Close()\n\t\t\t\treturn ErrClientConnTimeout\n\t\t\t}\n\t\t}\n\t\tsleepTime := backoff(retries)\n\t\ttimeout := sleepTime\n\t\tif timeout < minConnectTimeout {\n\t\t\ttimeout = minConnectTimeout\n\t\t}\n\t\tif copts.Timeout == 0 || copts.Timeout > timeout {\n\t\t\tcopts.Timeout = timeout\n\t\t}\n\t\tconnectTime := time.Now()\n\t\tnewTransport, err := transport.NewClientTransport(cc.target, &copts)\n\t\tif err != nil {\n\t\t\tcc.mu.Lock()\n\t\t\tcc.state = TransientFailure\n\t\t\tcc.stateCV.Broadcast()\n\t\t\tcc.mu.Unlock()\n\t\t\tsleepTime -= time.Since(connectTime)\n\t\t\tif sleepTime < 0 {\n\t\t\t\tsleepTime = 0\n\t\t\t}\n\t\t\t\/\/ Fail early before falling into sleep.\n\t\t\tif cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) {\n\t\t\t\tcc.Close()\n\t\t\t\treturn ErrClientConnTimeout\n\t\t\t}\n\t\t\tcloseTransport = false\n\t\t\ttime.Sleep(sleepTime)\n\t\t\tretries++\n\t\t\tgrpclog.Printf(\"grpc: ClientConn.resetTransport failed to create client transport: %v; Reconnecting to %q\", err, cc.target)\n\t\t\tcontinue\n\t\t}\n\t\tcc.mu.Lock()\n\t\tif cc.state == Shutdown {\n\t\t\t\/\/ cc.Close() has been invoked.\n\t\t\tcc.mu.Unlock()\n\t\t\tnewTransport.Close()\n\t\t\treturn ErrClientConnClosing\n\t\t}\n\t\tcc.state = Ready\n\t\tcc.stateCV.Broadcast()\n\t\tcc.transport = newTransport\n\t\tcc.transportSeq = ts + 1\n\t\tif cc.ready != nil {\n\t\t\tclose(cc.ready)\n\t\t\tcc.ready = nil\n\t\t}\n\t\tcc.mu.Unlock()\n\t\treturn nil\n\t}\n}\n\n\/\/ Run in a goroutine to track the error in transport and create the\n\/\/ new transport if an error happens. It returns when the channel is closing.\nfunc (cc *ClientConn) transportMonitor() {\n\tfor {\n\t\tselect {\n\t\t\/\/ shutdownChan is needed to detect the teardown when\n\t\t\/\/ the ClientConn is idle (i.e., no RPC in flight).\n\t\tcase <-cc.shutdownChan:\n\t\t\treturn\n\t\tcase <-cc.transport.Error():\n\t\t\tcc.mu.Lock()\n\t\t\tcc.state = TransientFailure\n\t\t\tcc.stateCV.Broadcast()\n\t\t\tcc.mu.Unlock()\n\t\t\tif err := cc.resetTransport(true); err != nil {\n\t\t\t\t\/\/ The ClientConn is closing.\n\t\t\t\tgrpclog.Printf(\"grpc: ClientConn.transportMonitor exits due to: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\n\/\/ When wait returns, either the new transport is up or ClientConn is\n\/\/ closing. Used to avoid working on a dying transport. It updates and\n\/\/ returns the transport and its version when there is no error.\nfunc (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTransport, int, error) {\n\tfor {\n\t\tcc.mu.Lock()\n\t\tswitch {\n\t\tcase cc.state == Shutdown:\n\t\t\tcc.mu.Unlock()\n\t\t\treturn nil, 0, ErrClientConnClosing\n\t\tcase ts < cc.transportSeq:\n\t\t\t\/\/ Worked on a dying transport. Try the new one immediately.\n\t\t\tdefer cc.mu.Unlock()\n\t\t\treturn cc.transport, cc.transportSeq, nil\n\t\tdefault:\n\t\t\tready := cc.ready\n\t\t\tif ready == nil {\n\t\t\t\tready = make(chan struct{})\n\t\t\t\tcc.ready = ready\n\t\t\t}\n\t\t\tcc.mu.Unlock()\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn nil, 0, transport.ContextErr(ctx.Err())\n\t\t\t\/\/ Wait until the new transport is ready or failed.\n\t\t\tcase <-ready:\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Close starts to tear down the ClientConn. Returns ErrClientConnClosing if\n\/\/ it has been closed (mostly due to dial time-out).\n\/\/ TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in\n\/\/ some edge cases (e.g., the caller opens and closes many ClientConn's in a\n\/\/ tight loop.\nfunc (cc *ClientConn) Close() error {\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\tif cc.state == Shutdown {\n\t\treturn ErrClientConnClosing\n\t}\n\tcc.state = Shutdown\n\tcc.stateCV.Broadcast()\n\tif cc.ready != nil {\n\t\tclose(cc.ready)\n\t\tcc.ready = nil\n\t}\n\tif cc.transport != nil {\n\t\tcc.transport.Close()\n\t}\n\tif cc.shutdownChan != nil {\n\t\tclose(cc.shutdownChan)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"package clix\n\nimport (\n\t\"time\"\n\n\t\"github.com\/urfave\/cli\/v2\"\n)\n\n\/\/ Flag constants declared for CLI use.\nconst (\n\tFlagPort = \"port\"\n\n\tFlagLogFormat = \"log-format\"\n\tFlagLogLevel = \"log-level\"\n\tFlagLogTags = \"log-tags\"\n\n\tFlagStatsDSN = \"stats-dsn\"\n\tFlagStatsPrefix = \"stats-prefix\"\n\tFlagStatsTags = \"stats-tags\"\n\n\tFlagProfiler = \"profiler\"\n\tFlagProfilerPort = \"profiler-port\"\n\n\tFlagKafkaConsumerBrokers = \"kafka-consumer-brokers\"\n\tFlagKafkaConsumerGroupID = \"kafka-consumer-group-id\"\n\tFlagKafkaConsumerTopic = \"kafka-consumer-topic\"\n\tFlagKafkaConsumerKafkaVersion = \"kafka-consumer-kafka-version\"\n\tFlagKafkaProducerBrokers = \"kafka-producer-brokers\"\n\tFlagKafkaProducerTopic = \"kafka-producer-topic\"\n\tFlagKafkaProducerKafkaVersion = \"kafka-producer-kafka-version\"\n\n\tFlagCommitBatch = \"commit-batch\"\n\tFlagCommitInterval = \"commit-interval\"\n\n\tFlagRedisDSN = \"redis-dsn\"\n)\n\ntype defaults struct {\n\tPort string\n\tLogFormat string\n\tLogLevel string\n\n\tProfilerPort string\n}\n\n\/\/ Defaults holds the flag default values.\nvar Defaults = defaults{\n\tPort: \"80\",\n\tLogFormat: \"json\",\n\tLogLevel: \"info\",\n\n\tProfilerPort: \"8081\",\n}\n\n\/\/ Flags represents a set of CLI flags.\ntype Flags []cli.Flag\n\n\/\/ Merge joins one or more Flags together, making a new set.\nfunc (f Flags) Merge(flags ...Flags) Flags {\n\tvar m Flags\n\tm = append(m, f...)\n\tfor _, flag := range flags {\n\t\tm = append(m, flag...)\n\t}\n\n\treturn m\n}\n\n\/\/ ServerFlags are flags that configure a server.\nvar ServerFlags = Flags{\n\t&cli.StringFlag{\n\t\tName: FlagPort,\n\t\tValue: Defaults.Port,\n\t\tUsage: \"Port for HTTP server to listen on\",\n\t\tEnvVars: []string{\"PORT\"},\n\t},\n}\n\n\/\/ KafkaConsumerFlags are flags that configure a Kafka consumer.\nvar KafkaConsumerFlags = Flags{\n\t&cli.StringSliceFlag{\n\t\tName: FlagKafkaConsumerBrokers,\n\t\tUsage: \"Kafka consumer brokers.\",\n\t\tEnvVars: []string{\"KAFKA_CONSUMER_BROKERS\"},\n\t\tRequired: true,\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagKafkaConsumerGroupID,\n\t\tUsage: \"Kafka consumer group id.\",\n\t\tEnvVars: []string{\"KAFKA_CONSUMER_GROUP_ID\"},\n\t\tRequired: true,\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagKafkaConsumerTopic,\n\t\tUsage: \"Kafka topic to consume from.\",\n\t\tEnvVars: []string{\"KAFKA_CONSUMER_TOPIC\"},\n\t\tRequired: true,\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagKafkaConsumerKafkaVersion,\n\t\tUsage: \"Kafka version (e.g. 0.10.2.0 or 2.3.0).\",\n\t\tEnvVars: []string{\"KAFKA_CONSUMER_KAFKA_VERSION\"},\n\t\tRequired: true,\n\t},\n}\n\n\/\/ KafkaProducerFlags are flags that configure a Kafka producer.\nvar KafkaProducerFlags = Flags{\n\t&cli.StringSliceFlag{\n\t\tName: FlagKafkaProducerBrokers,\n\t\tUsage: \"Kafka producer brokers.\",\n\t\tEnvVars: []string{\"KAFKA_PRODUCER_BROKERS\"},\n\t\tRequired: true,\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagKafkaProducerTopic,\n\t\tUsage: \"Kafka topic to produce into.\",\n\t\tEnvVars: []string{\"KAFKA_PRODUCER_TOPIC\"},\n\t\tRequired: true,\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagKafkaProducerKafkaVersion,\n\t\tUsage: \"Kafka version (e.g. 0.10.2.0 or 2.3.0).\",\n\t\tEnvVars: []string{\"KAFKA_PRODUCER_KAFKA_VERSION\"},\n\t\tRequired: true,\n\t},\n}\n\n\/\/ CommitterFlags are flags that configure message processing batch size and committing interval.\nvar CommitterFlags = Flags{\n\t&cli.IntFlag{\n\t\tName: FlagCommitBatch,\n\t\tValue: 500,\n\t\tUsage: \"Commit batch size for message processing.\",\n\t\tEnvVars: []string{\"COMMIT_BATCH\"},\n\t},\n\t&cli.DurationFlag{\n\t\tName: FlagCommitInterval,\n\t\tValue: 1 * time.Second,\n\t\tUsage: \"Commit interval for message processing.\",\n\t\tEnvVars: []string{\"COMMIT_INTERVAL\"},\n\t},\n}\n\n\/\/ RedisFlags are flags that configure redis.\nvar RedisFlags = Flags{\n\t&cli.StringFlag{\n\t\tName: FlagRedisDSN,\n\t\tUsage: \"The DSN of Redis.\",\n\t\tEnvVars: []string{\"REDIS_DSN\"},\n\t\tRequired: true,\n\t},\n}\n\n\/\/ CommonFlags are flags that configure logging and stats.\nvar CommonFlags = Flags{\n\t&cli.StringFlag{\n\t\tName: FlagLogFormat,\n\t\tValue: Defaults.LogFormat,\n\t\tUsage: \"Specify the format of logs. Supported formats: 'terminal', 'json'\",\n\t\tEnvVars: []string{\"LOG_FORMAT\"},\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagLogLevel,\n\t\tValue: Defaults.LogLevel,\n\t\tUsage: \"Specify the log level. E.g. 'debug', 'warning'.\",\n\t\tEnvVars: []string{\"LOG_LEVEL\"},\n\t},\n\t&cli.StringSliceFlag{\n\t\tName: FlagLogTags,\n\t\tUsage: \"A list of tags appended to every log. Format: key=value.\",\n\t\tEnvVars: []string{\"LOG_TAGS\"},\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagStatsDSN,\n\t\tUsage: \"The URL of a stats backend.\",\n\t\tEnvVars: []string{\"STATS_DSN\"},\n\t\tRequired: true,\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagStatsPrefix,\n\t\tUsage: \"The prefix of the measurements names.\",\n\t\tEnvVars: []string{\"STATS_PREFIX\"},\n\t},\n\t&cli.StringSliceFlag{\n\t\tName: FlagStatsTags,\n\t\tUsage: \"A list of tags appended to every measurement. Format: key=value.\",\n\t\tEnvVars: []string{\"STATS_TAGS\"},\n\t},\n}\n\n\/\/ ProfilerFlags are flags that configure to the profiler.\nvar ProfilerFlags = Flags{\n\t&cli.BoolFlag{\n\t\tName: FlagProfiler,\n\t\tUsage: \"Enable profiler server.\",\n\t\tEnvVars: []string{\"PROFILER\"},\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagProfilerPort,\n\t\tValue: Defaults.ProfilerPort,\n\t\tUsage: \"Port for the profiler to listen on.\",\n\t\tEnvVars: []string{\"PROFILER_PORT\"},\n\t},\n}\n[FEATURE] Added redis addrs flag for cluster reds. (#156)package clix\n\nimport (\n\t\"time\"\n\n\t\"github.com\/urfave\/cli\/v2\"\n)\n\n\/\/ Flag constants declared for CLI use.\nconst (\n\tFlagPort = \"port\"\n\n\tFlagLogFormat = \"log-format\"\n\tFlagLogLevel = \"log-level\"\n\tFlagLogTags = \"log-tags\"\n\n\tFlagStatsDSN = \"stats-dsn\"\n\tFlagStatsPrefix = \"stats-prefix\"\n\tFlagStatsTags = \"stats-tags\"\n\n\tFlagProfiler = \"profiler\"\n\tFlagProfilerPort = \"profiler-port\"\n\n\tFlagKafkaConsumerBrokers = \"kafka-consumer-brokers\"\n\tFlagKafkaConsumerGroupID = \"kafka-consumer-group-id\"\n\tFlagKafkaConsumerTopic = \"kafka-consumer-topic\"\n\tFlagKafkaConsumerKafkaVersion = \"kafka-consumer-kafka-version\"\n\tFlagKafkaProducerBrokers = \"kafka-producer-brokers\"\n\tFlagKafkaProducerTopic = \"kafka-producer-topic\"\n\tFlagKafkaProducerKafkaVersion = \"kafka-producer-kafka-version\"\n\n\tFlagCommitBatch = \"commit-batch\"\n\tFlagCommitInterval = \"commit-interval\"\n\n\tFlagRedisDSN = \"redis-dsn\"\n\tFlagRedisAddrs = \"redis-addrs\"\n)\n\ntype defaults struct {\n\tPort string\n\tLogFormat string\n\tLogLevel string\n\n\tProfilerPort string\n}\n\n\/\/ Defaults holds the flag default values.\nvar Defaults = defaults{\n\tPort: \"80\",\n\tLogFormat: \"json\",\n\tLogLevel: \"info\",\n\n\tProfilerPort: \"8081\",\n}\n\n\/\/ Flags represents a set of CLI flags.\ntype Flags []cli.Flag\n\n\/\/ Merge joins one or more Flags together, making a new set.\nfunc (f Flags) Merge(flags ...Flags) Flags {\n\tvar m Flags\n\tm = append(m, f...)\n\tfor _, flag := range flags {\n\t\tm = append(m, flag...)\n\t}\n\n\treturn m\n}\n\n\/\/ ServerFlags are flags that configure a server.\nvar ServerFlags = Flags{\n\t&cli.StringFlag{\n\t\tName: FlagPort,\n\t\tValue: Defaults.Port,\n\t\tUsage: \"Port for HTTP server to listen on\",\n\t\tEnvVars: []string{\"PORT\"},\n\t},\n}\n\n\/\/ KafkaConsumerFlags are flags that configure a Kafka consumer.\nvar KafkaConsumerFlags = Flags{\n\t&cli.StringSliceFlag{\n\t\tName: FlagKafkaConsumerBrokers,\n\t\tUsage: \"Kafka consumer brokers.\",\n\t\tEnvVars: []string{\"KAFKA_CONSUMER_BROKERS\"},\n\t\tRequired: true,\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagKafkaConsumerGroupID,\n\t\tUsage: \"Kafka consumer group id.\",\n\t\tEnvVars: []string{\"KAFKA_CONSUMER_GROUP_ID\"},\n\t\tRequired: true,\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagKafkaConsumerTopic,\n\t\tUsage: \"Kafka topic to consume from.\",\n\t\tEnvVars: []string{\"KAFKA_CONSUMER_TOPIC\"},\n\t\tRequired: true,\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagKafkaConsumerKafkaVersion,\n\t\tUsage: \"Kafka version (e.g. 0.10.2.0 or 2.3.0).\",\n\t\tEnvVars: []string{\"KAFKA_CONSUMER_KAFKA_VERSION\"},\n\t\tRequired: true,\n\t},\n}\n\n\/\/ KafkaProducerFlags are flags that configure a Kafka producer.\nvar KafkaProducerFlags = Flags{\n\t&cli.StringSliceFlag{\n\t\tName: FlagKafkaProducerBrokers,\n\t\tUsage: \"Kafka producer brokers.\",\n\t\tEnvVars: []string{\"KAFKA_PRODUCER_BROKERS\"},\n\t\tRequired: true,\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagKafkaProducerTopic,\n\t\tUsage: \"Kafka topic to produce into.\",\n\t\tEnvVars: []string{\"KAFKA_PRODUCER_TOPIC\"},\n\t\tRequired: true,\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagKafkaProducerKafkaVersion,\n\t\tUsage: \"Kafka version (e.g. 0.10.2.0 or 2.3.0).\",\n\t\tEnvVars: []string{\"KAFKA_PRODUCER_KAFKA_VERSION\"},\n\t\tRequired: true,\n\t},\n}\n\n\/\/ CommitterFlags are flags that configure message processing batch size and committing interval.\nvar CommitterFlags = Flags{\n\t&cli.IntFlag{\n\t\tName: FlagCommitBatch,\n\t\tValue: 500,\n\t\tUsage: \"Commit batch size for message processing.\",\n\t\tEnvVars: []string{\"COMMIT_BATCH\"},\n\t},\n\t&cli.DurationFlag{\n\t\tName: FlagCommitInterval,\n\t\tValue: 1 * time.Second,\n\t\tUsage: \"Commit interval for message processing.\",\n\t\tEnvVars: []string{\"COMMIT_INTERVAL\"},\n\t},\n}\n\n\/\/ RedisFlags are flags that configure redis.\nvar RedisFlags = Flags{\n\t&cli.StringFlag{\n\t\tName: FlagRedisDSN,\n\t\tUsage: \"The DSN of Redis.\",\n\t\tEnvVars: []string{\"REDIS_DSN\"},\n\t\tRequired: true,\n\t},\n}\n\n\/\/ RedisClusterFlags are flags that configure redis cluster.\nvar RedisClusterFlags = Flags{\n\t&cli.StringSliceFlag{\n\t\tName: FlagRedisAddrs,\n\t\tUsage: \"Adresses of Redis cluster.\",\n\t\tEnvVars: []string{\"REDIS_ADDRS\"},\n\t\tRequired: true,\n\t},\n}\n\n\/\/ CommonFlags are flags that configure logging and stats.\nvar CommonFlags = Flags{\n\t&cli.StringFlag{\n\t\tName: FlagLogFormat,\n\t\tValue: Defaults.LogFormat,\n\t\tUsage: \"Specify the format of logs. Supported formats: 'terminal', 'json'\",\n\t\tEnvVars: []string{\"LOG_FORMAT\"},\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagLogLevel,\n\t\tValue: Defaults.LogLevel,\n\t\tUsage: \"Specify the log level. E.g. 'debug', 'warning'.\",\n\t\tEnvVars: []string{\"LOG_LEVEL\"},\n\t},\n\t&cli.StringSliceFlag{\n\t\tName: FlagLogTags,\n\t\tUsage: \"A list of tags appended to every log. Format: key=value.\",\n\t\tEnvVars: []string{\"LOG_TAGS\"},\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagStatsDSN,\n\t\tUsage: \"The URL of a stats backend.\",\n\t\tEnvVars: []string{\"STATS_DSN\"},\n\t\tRequired: true,\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagStatsPrefix,\n\t\tUsage: \"The prefix of the measurements names.\",\n\t\tEnvVars: []string{\"STATS_PREFIX\"},\n\t},\n\t&cli.StringSliceFlag{\n\t\tName: FlagStatsTags,\n\t\tUsage: \"A list of tags appended to every measurement. Format: key=value.\",\n\t\tEnvVars: []string{\"STATS_TAGS\"},\n\t},\n}\n\n\/\/ ProfilerFlags are flags that configure to the profiler.\nvar ProfilerFlags = Flags{\n\t&cli.BoolFlag{\n\t\tName: FlagProfiler,\n\t\tUsage: \"Enable profiler server.\",\n\t\tEnvVars: []string{\"PROFILER\"},\n\t},\n\t&cli.StringFlag{\n\t\tName: FlagProfilerPort,\n\t\tValue: Defaults.ProfilerPort,\n\t\tUsage: \"Port for the profiler to listen on.\",\n\t\tEnvVars: []string{\"PROFILER_PORT\"},\n\t},\n}\n<|endoftext|>"} {"text":"\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\/\/\n\/\/ Contributor: Julien Vehent jvehent@mozilla.com [:ulfr]\n\npackage client \/* import \"mig.ninja\/mig\/client\" *\/\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n)\n\ntype CommandLocation struct {\n\tEndpoint string `json:\"endpoint\"`\n\tCommandID float64 `json:\"commandid\"`\n\tActionID float64 `json:\"actionid\"`\n\tFoundAnything bool `json:\"foundanything\"`\n\tConnectionsTo []string `json:\"connections_to\"`\n\tLatitude float64 `json:\"latitude\"`\n\tLongitude float64 `json:\"longitude\"`\n\tCity string `json:\"city\"`\n\tCountry string `json:\"country\"`\n}\n\nfunc ValueToLocation(v interface{}) (cl CommandLocation, err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = fmt.Errorf(\"ValueToLocation) -> %v\", e)\n\t\t}\n\t}()\n\tbData, err := json.Marshal(v)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = json.Unmarshal(bData, &cl)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn\n}\n\nfunc PrintMap(locs []CommandLocation, title string) (err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = fmt.Errorf(\"PrintMap() -> %v\", e)\n\t\t}\n\t}()\n\tgmap := makeMapHeader(title)\n\tlocs = singularizeLocations(locs)\n\tdata, err := json.Marshal(locs)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tgmap += fmt.Sprintf(`