{"text":"package stagosaurus\n\nimport (\n\t\"testing\"\n)\n\nfunc TestFileSystemImpl(t *testing.T) {\n\tconfig := EmptyConfig()\n\tconfig.Set(\"source-dir\", \".\")\n\n\tfs, err := NewFileSystem(config)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tvar configTest Config = fs\n\tif nil == configTest {\n\t\tt.Error(WTF)\n\t}\n\n\tres := fs.Find(func(k interface{}, v interface{}) bool {\n\t\t\/\/ fmt.Println(v.(*File).Name())\n\t\treturn v.(*File).Name() == \"io_test.go\"\n\t})\n\n\tcfg := ConfigFromMap(res)\n\tf := cfg.Get(\"io_test.go\")\n\tif f == nil {\n\t\tt.Error(\"filtering by filename had been broken\")\n\t}\n\n\tfile := f.(*File)\n\tcontent := string(*file.Contents(\".\"))\n\tif content == \"\" {\n\t\tt.Error(\"file hasn't been read\")\n\t}\n}\nsilly typo :hatched_chick:package stagosaurus\n\nimport (\n\t\"testing\"\n)\n\nfunc TestFileSystemImpl(t *testing.T) {\n\tconfig := EmptyConfig()\n\tconfig.Set(\"source-dir\", \".\")\n\n\tfs, err := NewFileSystem(config)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tvar configTest Config = fs\n\tif nil == configTest {\n\t\tt.Error(WTF)\n\t}\n\n\tres := fs.Find(func(k interface{}, v interface{}) bool {\n\t\treturn v.(*File).Name() == \"io_test.go\"\n\t})\n\n\tcfg := ConfigFromMap(res)\n\tf := cfg.Get(\"io_test.go\")\n\tif f == nil {\n\t\tt.Error(\"filtering by filename had been broken\")\n\t}\n\n\tfile := f.(*File)\n\tcontent := string(*file.Contents(\".\"))\n\tif content == \"\" {\n\t\tt.Error(\"file hasn't been read\")\n\t}\n}\n<|endoftext|>"} {"text":"package ipc\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"net\"\n\t\"syscall\"\n\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"github.com\/op\/go-logging\"\n\t\"io\"\n\t\"reflect\"\n)\n\nconst maxFdCount = 3\nconst maxMessageSz = 128 * 1024\nconst bufferSz = 1024\n\ntype MsgConn struct {\n\tlog *logging.Logger\n\tconn *net.UnixConn\n\tbuf []byte\n\toob []byte\n\tdisp *msgDispatcher\n\tfactory MsgFactory\n\tisClosed bool\n\tidGen <-chan int\n\trespMan *responseManager\n\tonClose func()\n}\n\ntype MsgServer struct {\n\tlog *logging.Logger\n\tdisp *msgDispatcher\n\tfactory MsgFactory\n\tlistener *net.UnixListener\n\tdone chan bool\n\tidGen <-chan int\n}\n\nfunc NewServer(address string, factory MsgFactory, log *logging.Logger, handlers ...interface{}) (*MsgServer, error) {\n\tmd, err := createDispatcher(log, handlers...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlistener, err := net.ListenUnix(\"unix\", &net.UnixAddr{address, \"unix\"})\n\tif err != nil {\n\t\tmd.close()\n\t\treturn nil, err\n\t}\n\tif err := setPassCred(listener); err != nil {\n\t\treturn nil, errors.New(\"Failed to set SO_PASSCRED on listening socket: \" + err.Error())\n\t}\n\tdone := make(chan bool)\n\tidGen := newIdGen(done)\n\treturn &MsgServer{\n\t\tlog: log,\n\t\tdisp: md,\n\t\tfactory: factory,\n\t\tlistener: listener,\n\t\tdone: done,\n\t\tidGen: idGen,\n\t}, nil\n}\n\nfunc (s *MsgServer) Run() error {\n\tfor {\n\t\tconn, err := s.listener.AcceptUnix()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := setPassCred(conn); err != nil {\n\t\t\treturn errors.New(\"Failed to set SO_PASSCRED on accepted socket connection:\" + err.Error())\n\t\t}\n\t\tmc := &MsgConn{\n\t\t\tlog: s.log,\n\t\t\tconn: conn,\n\t\t\tdisp: s.disp,\n\t\t\tbuf: make([]byte, bufferSz),\n\t\t\toob: createOobBuffer(),\n\t\t\tfactory: s.factory,\n\t\t\tidGen: s.idGen,\n\t\t\trespMan: newResponseManager(),\n\t\t}\n\t\tgo mc.readLoop()\n\t}\n\treturn nil\n}\n\nfunc (s *MsgServer) Close() error {\n\ts.disp.close()\n\tclose(s.done)\n\treturn s.listener.Close()\n}\n\nfunc Connect(address string, factory MsgFactory, log *logging.Logger, handlers ...interface{}) (*MsgConn, error) {\n\tmd, err := createDispatcher(log, handlers...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconn, err := net.DialUnix(\"unix\", nil, &net.UnixAddr{address, \"unix\"})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdone := make(chan bool)\n\tidGen := newIdGen(done)\n\tmc := &MsgConn{\n\t\tlog: log,\n\t\tconn: conn,\n\t\tdisp: md,\n\t\toob: createOobBuffer(),\n\t\tfactory: factory,\n\t\tidGen: idGen,\n\t\trespMan: newResponseManager(),\n\t\tonClose: func() {\n\t\t\tmd.close()\n\t\t\tclose(done)\n\t\t},\n\t}\n\tgo mc.readLoop()\n\treturn mc, nil\n}\n\nfunc newIdGen(done <-chan bool) <-chan int {\n\tch := make(chan int)\n\tgo idGenLoop(done, ch)\n\treturn ch\n}\n\nfunc idGenLoop(done <-chan bool, out chan<- int) {\n\tcurrent := int(1)\n\tfor {\n\t\tselect {\n\t\tcase out <- current:\n\t\t\tcurrent += 1\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (mc *MsgConn) readLoop() {\n\tfor {\n\t\tif mc.processOneMessage() {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (mc *MsgConn) logger() *logging.Logger {\n\tif mc.log != nil {\n\t\treturn mc.log\n\t}\n\treturn defaultLog\n}\n\nfunc (mc *MsgConn) processOneMessage() bool {\n\tm, err := mc.readMessage()\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\tmc.Close()\n\t\t\treturn true\n\t\t}\n\t\tif !mc.isClosed {\n\t\t\tmc.logger().Warning(\"error on MsgConn.readMessage(): %v\", err)\n\t\t}\n\t\treturn true\n\t}\n\tif !mc.respMan.handle(m) {\n\t\tmc.disp.dispatch(m)\n\t}\n\treturn false\n}\n\nfunc (mc *MsgConn) Close() error {\n\tmc.isClosed = true\n\tif mc.onClose != nil {\n\t\tmc.onClose()\n\t}\n\treturn mc.conn.Close()\n}\n\nfunc createOobBuffer() []byte {\n\toobSize := syscall.CmsgSpace(syscall.SizeofUcred) + syscall.CmsgSpace(4*maxFdCount)\n\treturn make([]byte, oobSize)\n}\n\nfunc (mc *MsgConn) readMessage() (*Message, error) {\n\tvar szbuf [4]byte\n\tn, oobn, _, _, err := mc.conn.ReadMsgUnix(szbuf[:], mc.oob)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsz := binary.BigEndian.Uint32(szbuf[:])\n\tif sz > maxMessageSz {\n\t\treturn nil, fmt.Errorf(\"message size of (%d) exceeds maximum message size (%d)\", sz, maxMessageSz)\n\t}\n\tif sz > uint32(len(mc.buf)) {\n\t\tmc.buf = make([]byte, sz)\n\t}\n\tn, _, _, _, err = mc.conn.ReadMsgUnix(mc.buf[:sz], nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm, err := mc.parseMessage(mc.buf[:n])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.mconn = mc\n\n\tif oobn > 0 {\n\t\terr := m.parseControlData(mc.oob[:oobn])\n\t\tif err != nil {\n\t\t}\n\t}\n\treturn m, nil\n}\n\n\/\/ AddHandlers registers a list of message handling functions with a MsgConn instance.\n\/\/ Each handler function must have two arguments and return a single error value. The\n\/\/ first argument must be pointer to a message structure type. A message structure type\n\/\/ is a structure that must have a struct tag on the first field:\n\/\/\n\/\/ type FooMsg struct {\n\/\/ Stuff string \"Foo\" \/\/ <------ struct tag\n\/\/ \/\/ etc...\n\/\/ }\n\/\/\n\/\/ type SimpleMsg struct {\n\/\/ dummy int \"Simple\" \/\/ struct has no fields, so add an unexported dummy field just for the tag\n\/\/ }\n\/\/\n\/\/ The second argument to a handler function must have type *ipc.Message. After a handler function\n\/\/ has been registered, received messages matching the first argument will be dispatched to the corresponding\n\/\/ handler function.\n\/\/\n\/\/ func fooHandler(foo *FooMsg, msg *ipc.Message) error { \/* ... *\/ }\n\/\/ func simpleHandler(simple *SimpleMsg, msg *ipc.Message) error { \/* ... *\/ }\n\/\/\n\/\/ \/* register fooHandler() to handle incoming FooMsg and SimpleHandler to handle SimpleMsg *\/\n\/\/ conn.AddHandlers(fooHandler, simpleHandler)\n\/\/\n\nfunc (mc *MsgConn) AddHandlers(args ...interface{}) error {\n\tfor len(args) > 0 {\n\t\tif err := mc.disp.hmap.addHandler(args[0]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\targs = args[1:]\n\t}\n\treturn nil\n}\n\nfunc (mc *MsgConn) SendMsg(msg interface{}, fds ...int) error {\n\treturn mc.sendMessage(msg, <-mc.idGen, fds...)\n}\n\nfunc (mc *MsgConn) ExchangeMsg(msg interface{}, fds ...int) (ResponseReader, error) {\n\tid := <-mc.idGen\n\trr := mc.respMan.register(id)\n\n\tif err := mc.sendMessage(msg, id, fds...); err != nil {\n\t\trr.Done()\n\t\treturn nil, err\n\t}\n\treturn rr, nil\n}\n\nfunc (mc *MsgConn) sendMessage(msg interface{}, msgID int, fds ...int) error {\n\tmsgType, err := getMessageType(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbase, err := mc.newBaseMessage(msgType, msgID, msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\traw, err := json.Marshal(base)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuf := make([]byte, len(raw)+4)\n\tbinary.BigEndian.PutUint32(buf, uint32(len(raw)))\n\tcopy(buf[4:], raw)\n\treturn mc.sendRaw(buf, fds...)\n}\n\nfunc getMessageType(msg interface{}) (string, error) {\n\tt := reflect.TypeOf(msg)\n\tif t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t}\n\tif t.Kind() != reflect.Struct {\n\t\treturn \"\", fmt.Errorf(\"sendMessage() msg (%T) is not a struct\", msg)\n\t}\n\tif t.NumField() == 0 || len(t.Field(0).Tag) == 0 {\n\t\treturn \"\", fmt.Errorf(\"sendMessage() msg struct (%T) does not have tag on first field\")\n\t}\n\treturn string(t.Field(0).Tag), nil\n}\n\nfunc (mc *MsgConn) newBaseMessage(msgType string, msgID int, body interface{}) (*BaseMsg, error) {\n\tbodyBytes, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbase := new(BaseMsg)\n\tbase.Type = msgType\n\tbase.MsgID = msgID\n\tbase.Body = bodyBytes\n\treturn base, nil\n}\n\nfunc (mc *MsgConn) sendRaw(data []byte, fds ...int) error {\n\tif len(fds) > 0 {\n\t\treturn mc.sendWithFds(data, fds)\n\t}\n\t_, err := mc.conn.Write(data)\n\treturn err\n}\n\nfunc (mc *MsgConn) sendWithFds(data []byte, fds []int) error {\n\toob := syscall.UnixRights(fds...)\n\t_, _, err := mc.conn.WriteMsgUnix(data, oob, nil)\n\treturn err\n}\ndon't return error from MsgServer.Run() when Close() is calledpackage ipc\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"net\"\n\t\"syscall\"\n\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"github.com\/op\/go-logging\"\n\t\"io\"\n\t\"reflect\"\n)\n\nconst maxFdCount = 3\nconst maxMessageSz = 128 * 1024\nconst bufferSz = 1024\n\ntype MsgConn struct {\n\tlog *logging.Logger\n\tconn *net.UnixConn\n\tbuf []byte\n\toob []byte\n\tdisp *msgDispatcher\n\tfactory MsgFactory\n\tisClosed bool\n\tidGen <-chan int\n\trespMan *responseManager\n\tonClose func()\n}\n\ntype MsgServer struct {\n\tisClosed bool\n\tlog *logging.Logger\n\tdisp *msgDispatcher\n\tfactory MsgFactory\n\tlistener *net.UnixListener\n\tdone chan bool\n\tidGen <-chan int\n}\n\nfunc NewServer(address string, factory MsgFactory, log *logging.Logger, handlers ...interface{}) (*MsgServer, error) {\n\tmd, err := createDispatcher(log, handlers...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlistener, err := net.ListenUnix(\"unix\", &net.UnixAddr{address, \"unix\"})\n\tif err != nil {\n\t\tmd.close()\n\t\treturn nil, err\n\t}\n\tif err := setPassCred(listener); err != nil {\n\t\treturn nil, errors.New(\"Failed to set SO_PASSCRED on listening socket: \" + err.Error())\n\t}\n\tdone := make(chan bool)\n\tidGen := newIdGen(done)\n\treturn &MsgServer{\n\t\tlog: log,\n\t\tdisp: md,\n\t\tfactory: factory,\n\t\tlistener: listener,\n\t\tdone: done,\n\t\tidGen: idGen,\n\t}, nil\n}\n\nfunc (s *MsgServer) Run() error {\n\tfor !s.isClosed {\n\t\tconn, err := s.listener.AcceptUnix()\n\t\tif err != nil {\n\t\t\tif s.isClosed {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tif err := setPassCred(conn); err != nil {\n\t\t\treturn errors.New(\"Failed to set SO_PASSCRED on accepted socket connection:\" + err.Error())\n\t\t}\n\t\tmc := &MsgConn{\n\t\t\tlog: s.log,\n\t\t\tconn: conn,\n\t\t\tdisp: s.disp,\n\t\t\tbuf: make([]byte, bufferSz),\n\t\t\toob: createOobBuffer(),\n\t\t\tfactory: s.factory,\n\t\t\tidGen: s.idGen,\n\t\t\trespMan: newResponseManager(),\n\t\t}\n\t\tgo mc.readLoop()\n\t}\n\treturn nil\n}\n\nfunc (s *MsgServer) Close() error {\n\tif s.isClosed {\n\t\treturn nil\n\t}\n\ts.isClosed = true\n\ts.disp.close()\n\tclose(s.done)\n\treturn s.listener.Close()\n}\n\nfunc Connect(address string, factory MsgFactory, log *logging.Logger, handlers ...interface{}) (*MsgConn, error) {\n\tmd, err := createDispatcher(log, handlers...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconn, err := net.DialUnix(\"unix\", nil, &net.UnixAddr{address, \"unix\"})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdone := make(chan bool)\n\tidGen := newIdGen(done)\n\tmc := &MsgConn{\n\t\tlog: log,\n\t\tconn: conn,\n\t\tdisp: md,\n\t\toob: createOobBuffer(),\n\t\tfactory: factory,\n\t\tidGen: idGen,\n\t\trespMan: newResponseManager(),\n\t\tonClose: func() {\n\t\t\tmd.close()\n\t\t\tclose(done)\n\t\t},\n\t}\n\tgo mc.readLoop()\n\treturn mc, nil\n}\n\nfunc newIdGen(done <-chan bool) <-chan int {\n\tch := make(chan int)\n\tgo idGenLoop(done, ch)\n\treturn ch\n}\n\nfunc idGenLoop(done <-chan bool, out chan<- int) {\n\tcurrent := int(1)\n\tfor {\n\t\tselect {\n\t\tcase out <- current:\n\t\t\tcurrent += 1\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (mc *MsgConn) readLoop() {\n\tfor {\n\t\tif mc.processOneMessage() {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (mc *MsgConn) logger() *logging.Logger {\n\tif mc.log != nil {\n\t\treturn mc.log\n\t}\n\treturn defaultLog\n}\n\nfunc (mc *MsgConn) processOneMessage() bool {\n\tm, err := mc.readMessage()\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\tmc.Close()\n\t\t\treturn true\n\t\t}\n\t\tif !mc.isClosed {\n\t\t\tmc.logger().Warning(\"error on MsgConn.readMessage(): %v\", err)\n\t\t}\n\t\treturn true\n\t}\n\tif !mc.respMan.handle(m) {\n\t\tmc.disp.dispatch(m)\n\t}\n\treturn false\n}\n\nfunc (mc *MsgConn) Close() error {\n\tmc.isClosed = true\n\tif mc.onClose != nil {\n\t\tmc.onClose()\n\t}\n\treturn mc.conn.Close()\n}\n\nfunc createOobBuffer() []byte {\n\toobSize := syscall.CmsgSpace(syscall.SizeofUcred) + syscall.CmsgSpace(4*maxFdCount)\n\treturn make([]byte, oobSize)\n}\n\nfunc (mc *MsgConn) readMessage() (*Message, error) {\n\tvar szbuf [4]byte\n\tn, oobn, _, _, err := mc.conn.ReadMsgUnix(szbuf[:], mc.oob)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsz := binary.BigEndian.Uint32(szbuf[:])\n\tif sz > maxMessageSz {\n\t\treturn nil, fmt.Errorf(\"message size of (%d) exceeds maximum message size (%d)\", sz, maxMessageSz)\n\t}\n\tif sz > uint32(len(mc.buf)) {\n\t\tmc.buf = make([]byte, sz)\n\t}\n\tn, _, _, _, err = mc.conn.ReadMsgUnix(mc.buf[:sz], nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm, err := mc.parseMessage(mc.buf[:n])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.mconn = mc\n\n\tif oobn > 0 {\n\t\terr := m.parseControlData(mc.oob[:oobn])\n\t\tif err != nil {\n\t\t}\n\t}\n\treturn m, nil\n}\n\n\/\/ AddHandlers registers a list of message handling functions with a MsgConn instance.\n\/\/ Each handler function must have two arguments and return a single error value. The\n\/\/ first argument must be pointer to a message structure type. A message structure type\n\/\/ is a structure that must have a struct tag on the first field:\n\/\/\n\/\/ type FooMsg struct {\n\/\/ Stuff string \"Foo\" \/\/ <------ struct tag\n\/\/ \/\/ etc...\n\/\/ }\n\/\/\n\/\/ type SimpleMsg struct {\n\/\/ dummy int \"Simple\" \/\/ struct has no fields, so add an unexported dummy field just for the tag\n\/\/ }\n\/\/\n\/\/ The second argument to a handler function must have type *ipc.Message. After a handler function\n\/\/ has been registered, received messages matching the first argument will be dispatched to the corresponding\n\/\/ handler function.\n\/\/\n\/\/ func fooHandler(foo *FooMsg, msg *ipc.Message) error { \/* ... *\/ }\n\/\/ func simpleHandler(simple *SimpleMsg, msg *ipc.Message) error { \/* ... *\/ }\n\/\/\n\/\/ \/* register fooHandler() to handle incoming FooMsg and SimpleHandler to handle SimpleMsg *\/\n\/\/ conn.AddHandlers(fooHandler, simpleHandler)\n\/\/\n\nfunc (mc *MsgConn) AddHandlers(args ...interface{}) error {\n\tfor len(args) > 0 {\n\t\tif err := mc.disp.hmap.addHandler(args[0]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\targs = args[1:]\n\t}\n\treturn nil\n}\n\nfunc (mc *MsgConn) SendMsg(msg interface{}, fds ...int) error {\n\treturn mc.sendMessage(msg, <-mc.idGen, fds...)\n}\n\nfunc (mc *MsgConn) ExchangeMsg(msg interface{}, fds ...int) (ResponseReader, error) {\n\tid := <-mc.idGen\n\trr := mc.respMan.register(id)\n\n\tif err := mc.sendMessage(msg, id, fds...); err != nil {\n\t\trr.Done()\n\t\treturn nil, err\n\t}\n\treturn rr, nil\n}\n\nfunc (mc *MsgConn) sendMessage(msg interface{}, msgID int, fds ...int) error {\n\tmsgType, err := getMessageType(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbase, err := mc.newBaseMessage(msgType, msgID, msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\traw, err := json.Marshal(base)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuf := make([]byte, len(raw)+4)\n\tbinary.BigEndian.PutUint32(buf, uint32(len(raw)))\n\tcopy(buf[4:], raw)\n\treturn mc.sendRaw(buf, fds...)\n}\n\nfunc getMessageType(msg interface{}) (string, error) {\n\tt := reflect.TypeOf(msg)\n\tif t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t}\n\tif t.Kind() != reflect.Struct {\n\t\treturn \"\", fmt.Errorf(\"sendMessage() msg (%T) is not a struct\", msg)\n\t}\n\tif t.NumField() == 0 || len(t.Field(0).Tag) == 0 {\n\t\treturn \"\", fmt.Errorf(\"sendMessage() msg struct (%T) does not have tag on first field\")\n\t}\n\treturn string(t.Field(0).Tag), nil\n}\n\nfunc (mc *MsgConn) newBaseMessage(msgType string, msgID int, body interface{}) (*BaseMsg, error) {\n\tbodyBytes, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbase := new(BaseMsg)\n\tbase.Type = msgType\n\tbase.MsgID = msgID\n\tbase.Body = bodyBytes\n\treturn base, nil\n}\n\nfunc (mc *MsgConn) sendRaw(data []byte, fds ...int) error {\n\tif len(fds) > 0 {\n\t\treturn mc.sendWithFds(data, fds)\n\t}\n\t_, err := mc.conn.Write(data)\n\treturn err\n}\n\nfunc (mc *MsgConn) sendWithFds(data []byte, fds []int) error {\n\toob := syscall.UnixRights(fds...)\n\t_, _, err := mc.conn.WriteMsgUnix(data, oob, nil)\n\treturn err\n}\n<|endoftext|>"} {"text":"\/\/ Package irc implements IRC handlers for github.com\/go-chat-bot\/bot\npackage irc\n\nimport (\n\t\"crypto\/tls\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/go-chat-bot\/bot\"\n\tircevent \"github.com\/thoj\/go-ircevent\"\n)\n\n\/\/ Config must contain the necessary data to connect to an IRC server\ntype Config struct {\n\tServer string \/\/ IRC server:port. Ex: ircevent.freenode.org:7000\n\tChannels []string \/\/ Channels to connect. Ex: []string{\"#go-bot\", \"#channel mypassword\"}\n\tUser string \/\/ The IRC username the bot will use\n\tNick string \/\/ The nick the bot will use\n\tPassword string \/\/ Server password\n\tUseTLS bool \/\/ Should connect using TLS?\n\tTLSServerName string \/\/ Must supply if UseTLS is true\n\tDebug bool \/\/ This will log all IRC communication to standad output\n}\n\nvar (\n\tircConn *ircevent.Connection\n\tconfig *Config\n\tb *bot.Bot\n)\n\nfunc responseHandler(target string, message string, sender *bot.User) {\n\tchannel := target\n\tif ircConn.GetNick() == target {\n\t\tchannel = sender.Nick\n\t}\n\t\/\/Return multiple lines if message contains \\n\n\tif strings.Contains(message, \"\\n\") {\n\t\tstrarray := strings.Split(message, \"\\n\")\n\t\tfor _, tmpmessage := range strarray {\n\t\t\tircConn.Privmsg(channel, tmpmessage)\n\t\t}\n\t\treturn\n\t}\n\tircConn.Privmsg(channel, message)\n}\n\nfunc onPRIVMSG(e *ircevent.Event) {\n\tb.MessageReceived(\n\t\t&bot.ChannelData{\n\t\t\tProtocol: \"irc\",\n\t\t\tServer: ircConn.Server,\n\t\t\tChannel: e.Arguments[0],\n\t\t\tIsPrivate: e.Arguments[0] == ircConn.GetNick()},\n\t\te.Message(),\n\t\t&bot.User{\n\t\t\tID: e.Host,\n\t\t\tNick: e.Nick,\n\t\t\tRealName: e.User})\n}\n\nfunc getServerName(server string) string {\n\tseparatorIndex := strings.LastIndex(server, \":\")\n\tif separatorIndex != -1 {\n\t\treturn server[:separatorIndex]\n\t}\n\treturn server\n}\n\nfunc onWelcome(e *ircevent.Event) {\n\tfor _, channel := range config.Channels {\n\t\tircConn.Join(channel)\n\t}\n}\n\n\/\/ Run reads the Config, connect to the specified IRC server and starts the bot.\n\/\/ The bot will automatically join all the channels specified in the configuration\nfunc Run(c *Config) {\n\tconfig = c\n\n\tircConn = ircevent.IRC(c.User, c.Nick)\n\tircConn.Password = c.Password\n\tircConn.UseTLS = c.UseTLS\n\tircConn.TLSConfig = &tls.Config{\n\t\tServerName: getServerName(c.Server),\n\t}\n\tircConn.VerboseCallbackHandler = c.Debug\n\n\tb = bot.New(&bot.Handlers{\n\t\tResponse: responseHandler,\n\t})\n\n\tircConn.AddCallback(\"001\", onWelcome)\n\tircConn.AddCallback(\"PRIVMSG\", onPRIVMSG)\n\n\terr := ircConn.Connect(c.Server)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tircConn.Loop()\n}\nRefactors IRC message handling (#52)\/\/ Package irc implements IRC handlers for github.com\/go-chat-bot\/bot\npackage irc\n\nimport (\n\t\"crypto\/tls\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/go-chat-bot\/bot\"\n\tircevent \"github.com\/thoj\/go-ircevent\"\n)\n\n\/\/ Config must contain the necessary data to connect to an IRC server\ntype Config struct {\n\tServer string \/\/ IRC server:port. Ex: ircevent.freenode.org:7000\n\tChannels []string \/\/ Channels to connect. Ex: []string{\"#go-bot\", \"#channel mypassword\"}\n\tUser string \/\/ The IRC username the bot will use\n\tNick string \/\/ The nick the bot will use\n\tPassword string \/\/ Server password\n\tUseTLS bool \/\/ Should connect using TLS?\n\tTLSServerName string \/\/ Must supply if UseTLS is true\n\tDebug bool \/\/ This will log all IRC communication to standad output\n}\n\nvar (\n\tircConn *ircevent.Connection\n\tconfig *Config\n\tb *bot.Bot\n)\n\nfunc responseHandler(target string, message string, sender *bot.User) {\n\tchannel := target\n\tif ircConn.GetNick() == target {\n\t\tchannel = sender.Nick\n\t}\n\n\tfor _, line := range strings.Split(message, \"\\n\") {\n\t\tircConn.Privmsg(channel, line)\n\t}\n}\n\nfunc onPRIVMSG(e *ircevent.Event) {\n\tb.MessageReceived(\n\t\t&bot.ChannelData{\n\t\t\tProtocol: \"irc\",\n\t\t\tServer: ircConn.Server,\n\t\t\tChannel: e.Arguments[0],\n\t\t\tIsPrivate: e.Arguments[0] == ircConn.GetNick()},\n\t\te.Message(),\n\t\t&bot.User{\n\t\t\tID: e.Host,\n\t\t\tNick: e.Nick,\n\t\t\tRealName: e.User})\n}\n\nfunc getServerName(server string) string {\n\tseparatorIndex := strings.LastIndex(server, \":\")\n\tif separatorIndex != -1 {\n\t\treturn server[:separatorIndex]\n\t}\n\treturn server\n}\n\nfunc onWelcome(e *ircevent.Event) {\n\tfor _, channel := range config.Channels {\n\t\tircConn.Join(channel)\n\t}\n}\n\n\/\/ Run reads the Config, connect to the specified IRC server and starts the bot.\n\/\/ The bot will automatically join all the channels specified in the configuration\nfunc Run(c *Config) {\n\tconfig = c\n\n\tircConn = ircevent.IRC(c.User, c.Nick)\n\tircConn.Password = c.Password\n\tircConn.UseTLS = c.UseTLS\n\tircConn.TLSConfig = &tls.Config{\n\t\tServerName: getServerName(c.Server),\n\t}\n\tircConn.VerboseCallbackHandler = c.Debug\n\n\tb = bot.New(&bot.Handlers{\n\t\tResponse: responseHandler,\n\t})\n\n\tircConn.AddCallback(\"001\", onWelcome)\n\tircConn.AddCallback(\"PRIVMSG\", onPRIVMSG)\n\n\terr := ircConn.Connect(c.Server)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tircConn.Loop()\n}\n<|endoftext|>"} {"text":"\/\/ Copyright © 2014 Daniele Tricoli .\n\/\/ All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage irc \/\/ import \"eriol.xyz\/perpetua\/irc\"\n\nimport (\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/thoj\/go-ircevent\"\n\n\t\"eriol.xyz\/perpetua\/config\"\n\t\"eriol.xyz\/perpetua\/db\"\n)\n\nconst version = \"perpetua quote bot \" + config.Version\n\nvar (\n\tconf *config.Config\n\tstore *db.Store\n)\n\n\/\/ Localizated quote and about tokens used to detect the kind of query for\n\/\/ the bot.\nvar i18n = map[string]map[string][]string{\n\t\"en\": map[string][]string{\n\t\t\"quote\": []string{\"quote\", \"what does it say\"},\n\t\t\"about\": []string{\"about\"},\n\t},\n\t\"it\": map[string][]string{\n\t\t\"quote\": []string{\n\t\t\t\"cita\",\n\t\t\t\"che dice\",\n\t\t\t\"cosa dice\",\n\t\t\t\"che cosa dice\"},\n\t\t\"about\": []string{\n\t\t\t\"su\",\n\t\t\t\"sul\",\n\t\t\t\"sulla\",\n\t\t\t\"sullo\",\n\t\t\t\"sui\",\n\t\t\t\"sugli\",\n\t\t\t\"sulle\"},\n\t},\n}\n\n\/\/ Join keys from i18n using \"|\": used inside the regex to perform an\n\/\/ OR of all keys.\nfunc i18nKeyJoin(lang, key string) string {\n\treturn strings.Join(i18n[lang][key], \"|\")\n}\n\nfunc connect() (connection *irc.Connection, err error) {\n\tconnection = irc.IRC(conf.IRC.Nickname, conf.IRC.User)\n\tconnection.Version = version\n\tconnection.UseTLS = conf.Server.UseTLS\n\tif conf.Server.SkipVerify == true {\n\t\tconnection.TLSConfig = &tls.Config{InsecureSkipVerify: true}\n\t}\n\n\tif err := connection.Connect(fmt.Sprintf(\"%s:%d\",\n\t\tconf.Server.Hostname,\n\t\tconf.Server.Port)); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn connection, nil\n}\n\nfunc doWelcome(event *irc.Event) {\n\tfor _, channel := range conf.IRC.Channels {\n\t\tevent.Connection.Join(channel)\n\t\tevent.Connection.Log.Println(\"Joined to \" + channel)\n\t}\n}\n\nfunc doJoin(event *irc.Event) {\n\tchannel := event.Arguments[0]\n\n\tif event.Nick == conf.IRC.Nickname {\n\t\tevent.Connection.Privmsg(channel, \"Hello! I'm \"+version)\n\t} else {\n\t\tevent.Connection.Privmsg(channel,\n\t\t\tfmt.Sprintf(\"Hello %s! I'm %s. Do you want a quote?\",\n\t\t\t\tevent.Nick,\n\t\t\t\tversion))\n\t}\n}\n\nfunc doPrivmsg(event *irc.Event) {\n\tchannel := event.Arguments[0]\n\tvar quote string\n\n\t\/\/ Don't speak in private!\n\tif channel == conf.IRC.Nickname {\n\t\treturn\n\t}\n\tcommand, person, extra, argument := parseMessage(event.Message())\n\n\tif command != \"\" && person != \"\" {\n\n\t\tquote = store.GetQuote(person, channel)\n\n\t\tif extra != \"\" && argument != \"\" {\n\t\t\tquote = store.GetQuoteAbout(person, argument, channel)\n\t\t}\n\n\t\tevent.Connection.Privmsg(channel, quote)\n\t}\n}\n\nfunc parseMessage(message string) (command, person, extra, argument string) {\n\tvar names []string\n\tlang := conf.I18N.Lang\n\n\treArgument := regexp.MustCompile(conf.IRC.Nickname +\n\t\t`:?` +\n\t\t`\\s+` +\n\t\t`(?P` + i18nKeyJoin(lang, \"quote\") + `)` +\n\t\t`\\s+` +\n\t\t`(?P[\\w\\s-'\\p{Latin}]+)` +\n\t\t`(?:\\s+)` +\n\t\t`(?P` + i18nKeyJoin(lang, \"about\") + `)` +\n\t\t`(?:\\s+)` +\n\t\t`(?P[\\w\\s-'\\p{Latin}]+)`)\n\n\tre := regexp.MustCompile(conf.IRC.Nickname +\n\t\t`:?` +\n\t\t`\\s+` +\n\t\t`(?P` + i18nKeyJoin(lang, \"quote\") + `)` +\n\t\t`\\s+` +\n\t\t`(?P[\\w\\s-'\\p{Latin}]+)`)\n\n\tres := reArgument.FindStringSubmatch(message)\n\n\tif res == nil {\n\t\tres = re.FindStringSubmatch(message)\n\t\tnames = re.SubexpNames()\n\t} else {\n\t\tnames = reArgument.SubexpNames()\n\t}\n\n\tm := map[string]string{}\n\tfor i, n := range res {\n\t\tm[names[i]] = n\n\t}\n\n\treturn m[\"command\"], m[\"person\"], m[\"extra\"], m[\"argument\"]\n}\n\nfunc Client(c *config.Config, db *db.Store) (err error) {\n\tconf = c\n\tstore = db\n\n\tconnection, err := connect()\n\tif err != nil {\n\t\treturn errors.New(\"Can't connect\")\n\t}\n\n\tconnection.AddCallback(\"001\", doWelcome)\n\tconnection.AddCallback(\"JOIN\", doJoin)\n\tconnection.AddCallback(\"PRIVMSG\", doPrivmsg)\n\n\tconnection.Loop()\n\n\treturn nil\n}\nDon't shadow error returned\/\/ Copyright © 2014 Daniele Tricoli .\n\/\/ All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage irc \/\/ import \"eriol.xyz\/perpetua\/irc\"\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/thoj\/go-ircevent\"\n\n\t\"eriol.xyz\/perpetua\/config\"\n\t\"eriol.xyz\/perpetua\/db\"\n)\n\nconst version = \"perpetua quote bot \" + config.Version\n\nvar (\n\tconf *config.Config\n\tstore *db.Store\n)\n\n\/\/ Localizated quote and about tokens used to detect the kind of query for\n\/\/ the bot.\nvar i18n = map[string]map[string][]string{\n\t\"en\": map[string][]string{\n\t\t\"quote\": []string{\"quote\", \"what does it say\"},\n\t\t\"about\": []string{\"about\"},\n\t},\n\t\"it\": map[string][]string{\n\t\t\"quote\": []string{\n\t\t\t\"cita\",\n\t\t\t\"che dice\",\n\t\t\t\"cosa dice\",\n\t\t\t\"che cosa dice\"},\n\t\t\"about\": []string{\n\t\t\t\"su\",\n\t\t\t\"sul\",\n\t\t\t\"sulla\",\n\t\t\t\"sullo\",\n\t\t\t\"sui\",\n\t\t\t\"sugli\",\n\t\t\t\"sulle\"},\n\t},\n}\n\n\/\/ Join keys from i18n using \"|\": used inside the regex to perform an\n\/\/ OR of all keys.\nfunc i18nKeyJoin(lang, key string) string {\n\treturn strings.Join(i18n[lang][key], \"|\")\n}\n\nfunc connect() (connection *irc.Connection, err error) {\n\tconnection = irc.IRC(conf.IRC.Nickname, conf.IRC.User)\n\tconnection.Version = version\n\tconnection.UseTLS = conf.Server.UseTLS\n\tif conf.Server.SkipVerify == true {\n\t\tconnection.TLSConfig = &tls.Config{InsecureSkipVerify: true}\n\t}\n\n\tif err := connection.Connect(fmt.Sprintf(\"%s:%d\",\n\t\tconf.Server.Hostname,\n\t\tconf.Server.Port)); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn connection, nil\n}\n\nfunc doWelcome(event *irc.Event) {\n\tfor _, channel := range conf.IRC.Channels {\n\t\tevent.Connection.Join(channel)\n\t\tevent.Connection.Log.Println(\"Joined to \" + channel)\n\t}\n}\n\nfunc doJoin(event *irc.Event) {\n\tchannel := event.Arguments[0]\n\n\tif event.Nick == conf.IRC.Nickname {\n\t\tevent.Connection.Privmsg(channel, \"Hello! I'm \"+version)\n\t} else {\n\t\tevent.Connection.Privmsg(channel,\n\t\t\tfmt.Sprintf(\"Hello %s! I'm %s. Do you want a quote?\",\n\t\t\t\tevent.Nick,\n\t\t\t\tversion))\n\t}\n}\n\nfunc doPrivmsg(event *irc.Event) {\n\tchannel := event.Arguments[0]\n\tvar quote string\n\n\t\/\/ Don't speak in private!\n\tif channel == conf.IRC.Nickname {\n\t\treturn\n\t}\n\tcommand, person, extra, argument := parseMessage(event.Message())\n\n\tif command != \"\" && person != \"\" {\n\n\t\tquote = store.GetQuote(person, channel)\n\n\t\tif extra != \"\" && argument != \"\" {\n\t\t\tquote = store.GetQuoteAbout(person, argument, channel)\n\t\t}\n\n\t\tevent.Connection.Privmsg(channel, quote)\n\t}\n}\n\nfunc parseMessage(message string) (command, person, extra, argument string) {\n\tvar names []string\n\tlang := conf.I18N.Lang\n\n\treArgument := regexp.MustCompile(conf.IRC.Nickname +\n\t\t`:?` +\n\t\t`\\s+` +\n\t\t`(?P` + i18nKeyJoin(lang, \"quote\") + `)` +\n\t\t`\\s+` +\n\t\t`(?P[\\w\\s-'\\p{Latin}]+)` +\n\t\t`(?:\\s+)` +\n\t\t`(?P` + i18nKeyJoin(lang, \"about\") + `)` +\n\t\t`(?:\\s+)` +\n\t\t`(?P[\\w\\s-'\\p{Latin}]+)`)\n\n\tre := regexp.MustCompile(conf.IRC.Nickname +\n\t\t`:?` +\n\t\t`\\s+` +\n\t\t`(?P` + i18nKeyJoin(lang, \"quote\") + `)` +\n\t\t`\\s+` +\n\t\t`(?P[\\w\\s-'\\p{Latin}]+)`)\n\n\tres := reArgument.FindStringSubmatch(message)\n\n\tif res == nil {\n\t\tres = re.FindStringSubmatch(message)\n\t\tnames = re.SubexpNames()\n\t} else {\n\t\tnames = reArgument.SubexpNames()\n\t}\n\n\tm := map[string]string{}\n\tfor i, n := range res {\n\t\tm[names[i]] = n\n\t}\n\n\treturn m[\"command\"], m[\"person\"], m[\"extra\"], m[\"argument\"]\n}\n\nfunc Client(c *config.Config, db *db.Store) (err error) {\n\tconf = c\n\tstore = db\n\n\tconnection, err := connect()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconnection.AddCallback(\"001\", doWelcome)\n\tconnection.AddCallback(\"JOIN\", doJoin)\n\tconnection.AddCallback(\"PRIVMSG\", doPrivmsg)\n\n\tconnection.Loop()\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"package serverlib\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"io\"\n\t\"time\"\n\t\"io\/ioutil\"\n)\n\nvar pathMapping = map[string]string{\n\t\"\/gas\": \"https:\/\/creativecommons.tankerkoenig.de\/json\/prices.php\",\n\t\"\/transport\": \"https:\/\/www.rmv.de\/hapi\/departureBoard\",\n\t\"\/weather\": \"http:\/\/api.openweathermap.org\/data\/2.5\/weather\",\n\t\"\/forecast\": \"http:\/\/api.openweathermap.org\/data\/2.5\/forecast\",\n}\n\nvar client = &http.Client{\n\tTimeout: time.Second * 10,\n}\n\nfunc NewHandler(w http.ResponseWriter, r *http.Request) {\n\tif externalUrl, ok := pathMapping[r.URL.Path]; ok {\n\t\trequest := newRequest(externalUrl, r.URL.Query())\n\t\tpassResponseBody(request, w)\n\t} else {\n\t\tw.Write([]byte(\"online\"))\n\t}\n}\n\nfunc newRequest(url string, params url.Values) (*http.Request) {\n\trequest, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\trequest.Header.Add(\"Accept\", \"application\/json\")\n\tquery := request.URL.Query()\n\n\tfor key, value := range params {\n\t\tfor i := range value {\n\t\t\tquery.Add(key, value[i])\n\t\t}\n\t}\n\trequest.URL.RawQuery = query.Encode()\n\treturn request\n}\n\nfunc passResponseBody(request *http.Request, w http.ResponseWriter) {\n\tresponse, err := client.Do(request)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\t\/\/ pass the status code\n\tw.WriteHeader(response.StatusCode)\n\tdefer response.Body.Close()\n\t\/\/ pass the body (body should be a JSON file)\n\t_, err = io.Copy(w, response.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\nfunc NewLogHandler(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tbytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Printf(\"%s\", bytes)\n}\ntransport checkCancellationpackage serverlib\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"io\"\n\t\"time\"\n\t\"io\/ioutil\"\n)\n\nvar pathMapping = map[string]string{\n\t\"\/gas\": \"https:\/\/creativecommons.tankerkoenig.de\/json\/prices.php\",\n\t\"\/transport\": \"https:\/\/www.rmv.de\/hapi\/departureBoard\",\n\t\"\/transportDetail\": \"https:\/\/www.rmv.de\/hapi\/journeyDetail\",\n\t\"\/weather\": \"http:\/\/api.openweathermap.org\/data\/2.5\/weather\",\n\t\"\/forecast\": \"http:\/\/api.openweathermap.org\/data\/2.5\/forecast\",\n}\n\nvar client = &http.Client{\n\tTimeout: time.Second * 10,\n}\n\nfunc NewHandler(w http.ResponseWriter, r *http.Request) {\n\tif externalUrl, ok := pathMapping[r.URL.Path]; ok {\n\t\trequest := newRequest(externalUrl, r.URL.Query())\n\t\tpassResponseBody(request, w)\n\t} else {\n\t\tw.Write([]byte(\"online\"))\n\t}\n}\n\nfunc newRequest(url string, params url.Values) (*http.Request) {\n\trequest, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\trequest.Header.Add(\"Accept\", \"application\/json\")\n\tquery := request.URL.Query()\n\n\tfor key, value := range params {\n\t\tfor i := range value {\n\t\t\tquery.Add(key, value[i])\n\t\t}\n\t}\n\trequest.URL.RawQuery = query.Encode()\n\treturn request\n}\n\nfunc passResponseBody(request *http.Request, w http.ResponseWriter) {\n\tresponse, err := client.Do(request)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\t\/\/ pass the status code\n\tw.WriteHeader(response.StatusCode)\n\tdefer response.Body.Close()\n\t\/\/ pass the body (body should be a JSON file)\n\t_, err = io.Copy(w, response.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\nfunc NewLogHandler(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tbytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Printf(\"%s\", bytes)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage flushfs\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseops\"\n)\n\n\/\/ Create a file system whose sole contents are a file named \"foo\" and a\n\/\/ directory named \"bar\".\n\/\/\n\/\/ The file may be opened for reading and\/or writing. Its initial contents are\n\/\/ empty. Whenever a flush or fsync is received, the supplied function will be\n\/\/ called with the current contents of the file and its status returned.\n\/\/\n\/\/ The directory cannot be modified.\nfunc NewFileSystem(\n\treportFlush func(string) error,\n\treportFsync func(string) error) (server fuse.Server, err error) {\n\tserver = &flushFS{\n\t\treportFlush: reportFlush,\n\t\treportFsync: reportFsync,\n\t}\n\n\treturn\n}\n\nconst (\n\tfooID = fuseops.RootInodeID + 1 + iota\n\tbarID\n)\n\ntype flushFS struct {\n\treportFlush func(string) error\n\treportFsync func(string) error\n\n\tmu sync.Mutex\n\tfooContents []byte \/\/ GUARDED_BY(mu)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) rootAttributes() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777 | os.ModeDir,\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) fooAttributes() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777,\n\t\tSize: uint64(len(fs.fooContents)),\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) barAttributes() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777 | os.ModeDir,\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) ServeOps(c *fuse.Connection) {\n\tfor {\n\t\top, err := c.ReadOp()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tswitch typed := op.(type) {\n\t\tcase *fuseops.InitOp:\n\t\t\tfs.init(typed)\n\n\t\tcase *fuseops.LookUpInodeOp:\n\t\t\tfs.lookUpInode(typed)\n\n\t\tcase *fuseops.GetInodeAttributesOp:\n\t\t\tfs.getInodeAttributes(typed)\n\n\t\tcase *fuseops.OpenFileOp:\n\t\t\tfs.openFile(typed)\n\n\t\tcase *fuseops.ReadFileOp:\n\t\t\tfs.readFile(typed)\n\n\t\tcase *fuseops.WriteFileOp:\n\t\t\tfs.writeFile(typed)\n\n\t\tcase *fuseops.SyncFileOp:\n\t\t\tfs.syncFile(typed)\n\n\t\tcase *fuseops.FlushFileOp:\n\t\t\tfs.flushFile(typed)\n\n\t\tcase *fuseops.OpenDirOp:\n\t\t\tfs.openDir(typed)\n\n\t\tdefault:\n\t\t\ttyped.Respond(fuse.ENOSYS)\n\t\t}\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Op methods\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (fs *flushFS) init(op *fuseops.InitOp) {\n\tvar err error\n\tdefer func() { op.Respond(err) }()\n\n\treturn\n}\n\nfunc (fs *flushFS) lookUpInode(op *fuseops.LookUpInodeOp) {\n\tvar err error\n\tdefer func() { op.Respond(err) }()\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check.\n\tif op.Parent != fuseops.RootInodeID {\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n\n\t\/\/ Set up the entry.\n\tswitch op.Name {\n\tcase \"foo\":\n\t\top.Entry = fuseops.ChildInodeEntry{\n\t\t\tChild: fooID,\n\t\t\tAttributes: fs.fooAttributes(),\n\t\t}\n\n\tcase \"bar\":\n\t\top.Entry = fuseops.ChildInodeEntry{\n\t\t\tChild: barID,\n\t\t\tAttributes: fs.barAttributes(),\n\t\t}\n\n\tdefault:\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (fs *flushFS) getInodeAttributes(op *fuseops.GetInodeAttributesOp) {\n\tvar err error\n\tdefer func() { op.Respond(err) }()\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\tswitch op.Inode {\n\tcase fuseops.RootInodeID:\n\t\top.Attributes = fs.rootAttributes()\n\t\treturn\n\n\tcase fooID:\n\t\top.Attributes = fs.fooAttributes()\n\t\treturn\n\n\tcase barID:\n\t\top.Attributes = fs.barAttributes()\n\t\treturn\n\n\tdefault:\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n}\n\nfunc (fs *flushFS) openFile(op *fuseops.OpenFileOp) {\n\tvar err error\n\tdefer func() { op.Respond(err) }()\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check.\n\tif op.Inode != fooID {\n\t\terr = fuse.ENOSYS\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (fs *flushFS) readFile(op *fuseops.ReadFileOp) {\n\tvar err error\n\tdefer func() { op.Respond(err) }()\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Ensure the offset is in range.\n\tif op.Offset > int64(len(fs.fooContents)) {\n\t\treturn\n\t}\n\n\t\/\/ Read what we can.\n\top.Data = make([]byte, op.Size)\n\tcopy(op.Data, fs.fooContents[op.Offset:])\n\n\treturn\n}\n\nfunc (fs *flushFS) writeFile(op *fuseops.WriteFileOp) {\n\tvar err error\n\tdefer func() { op.Respond(err) }()\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Ensure that the contents slice is long enough.\n\tnewLen := int(op.Offset) + len(op.Data)\n\tif len(fs.fooContents) < newLen {\n\t\tpadding := make([]byte, newLen-len(fs.fooContents))\n\t\tfs.fooContents = append(fs.fooContents, padding...)\n\t}\n\n\t\/\/ Copy in the data.\n\tn := copy(fs.fooContents[op.Offset:], op.Data)\n\n\t\/\/ Sanity check.\n\tif n != len(op.Data) {\n\t\tpanic(fmt.Sprintf(\"Unexpected short copy: %v\", n))\n\t}\n\n\treturn\n}\n\nfunc (fs *flushFS) syncFile(op *fuseops.SyncFileOp) {\n\tvar err error\n\tdefer func() { op.Respond(err) }()\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\terr = fs.reportFsync(string(fs.fooContents))\n\treturn\n}\n\nfunc (fs *flushFS) flushFile(op *fuseops.FlushFileOp) {\n\tvar err error\n\tdefer func() { op.Respond(err) }()\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\terr = fs.reportFlush(string(fs.fooContents))\n\treturn\n}\n\nfunc (fs *flushFS) openDir(op *fuseops.OpenDirOp) {\n\tvar err error\n\tdefer func() { op.Respond(err) }()\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check.\n\tif op.Inode != barID {\n\t\terr = fuse.ENOSYS\n\t\treturn\n\t}\n\n\treturn\n}\nUse FileSystem in flushfs.\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage flushfs\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseops\"\n\t\"github.com\/jacobsa\/fuse\/fuseutil\"\n)\n\n\/\/ Create a file system whose sole contents are a file named \"foo\" and a\n\/\/ directory named \"bar\".\n\/\/\n\/\/ The file may be opened for reading and\/or writing. Its initial contents are\n\/\/ empty. Whenever a flush or fsync is received, the supplied function will be\n\/\/ called with the current contents of the file and its status returned.\n\/\/\n\/\/ The directory cannot be modified.\nfunc NewFileSystem(\n\treportFlush func(string) error,\n\treportFsync func(string) error) (server fuse.Server, err error) {\n\tfs := &flushFS{\n\t\treportFlush: reportFlush,\n\t\treportFsync: reportFsync,\n\t}\n\n\tserver = fuseutil.NewFileSystemServer(fs)\n\treturn\n}\n\nconst (\n\tfooID = fuseops.RootInodeID + 1 + iota\n\tbarID\n)\n\ntype flushFS struct {\n\tfuseutil.NotImplementedFileSystem\n\n\treportFlush func(string) error\n\treportFsync func(string) error\n\n\tmu sync.Mutex\n\tfooContents []byte \/\/ GUARDED_BY(mu)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) rootAttributes() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777 | os.ModeDir,\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) fooAttributes() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777,\n\t\tSize: uint64(len(fs.fooContents)),\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) barAttributes() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777 | os.ModeDir,\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Op methods\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (fs *flushFS) Init(\n\top *fuseops.InitOp) (err error) {\n\treturn\n}\n\nfunc (fs *flushFS) LookUpInode(\n\top *fuseops.LookUpInodeOp) (err error) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check.\n\tif op.Parent != fuseops.RootInodeID {\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n\n\t\/\/ Set up the entry.\n\tswitch op.Name {\n\tcase \"foo\":\n\t\top.Entry = fuseops.ChildInodeEntry{\n\t\t\tChild: fooID,\n\t\t\tAttributes: fs.fooAttributes(),\n\t\t}\n\n\tcase \"bar\":\n\t\top.Entry = fuseops.ChildInodeEntry{\n\t\t\tChild: barID,\n\t\t\tAttributes: fs.barAttributes(),\n\t\t}\n\n\tdefault:\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (fs *flushFS) GetInodeAttributes(\n\top *fuseops.GetInodeAttributesOp) (err error) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\tswitch op.Inode {\n\tcase fuseops.RootInodeID:\n\t\top.Attributes = fs.rootAttributes()\n\t\treturn\n\n\tcase fooID:\n\t\top.Attributes = fs.fooAttributes()\n\t\treturn\n\n\tcase barID:\n\t\top.Attributes = fs.barAttributes()\n\t\treturn\n\n\tdefault:\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n}\n\nfunc (fs *flushFS) OpenFile(\n\top *fuseops.OpenFileOp) (err error) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check.\n\tif op.Inode != fooID {\n\t\terr = fuse.ENOSYS\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (fs *flushFS) ReadFile(\n\top *fuseops.ReadFileOp) (err error) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Ensure the offset is in range.\n\tif op.Offset > int64(len(fs.fooContents)) {\n\t\treturn\n\t}\n\n\t\/\/ Read what we can.\n\top.Data = make([]byte, op.Size)\n\tcopy(op.Data, fs.fooContents[op.Offset:])\n\n\treturn\n}\n\nfunc (fs *flushFS) WriteFile(\n\top *fuseops.WriteFileOp) (err error) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Ensure that the contents slice is long enough.\n\tnewLen := int(op.Offset) + len(op.Data)\n\tif len(fs.fooContents) < newLen {\n\t\tpadding := make([]byte, newLen-len(fs.fooContents))\n\t\tfs.fooContents = append(fs.fooContents, padding...)\n\t}\n\n\t\/\/ Copy in the data.\n\tn := copy(fs.fooContents[op.Offset:], op.Data)\n\n\t\/\/ Sanity check.\n\tif n != len(op.Data) {\n\t\tpanic(fmt.Sprintf(\"Unexpected short copy: %v\", n))\n\t}\n\n\treturn\n}\n\nfunc (fs *flushFS) SyncFile(\n\top *fuseops.SyncFileOp) (err error) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\terr = fs.reportFsync(string(fs.fooContents))\n\treturn\n}\n\nfunc (fs *flushFS) FlushFile(\n\top *fuseops.FlushFileOp) (err error) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\terr = fs.reportFlush(string(fs.fooContents))\n\treturn\n}\n\nfunc (fs *flushFS) OpenDir(\n\top *fuseops.OpenDirOp) (err error) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check.\n\tif op.Inode != barID {\n\t\terr = fuse.ENOSYS\n\t\treturn\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2018 The Ebiten Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package inpututil provides utility functions of input like keyboard or mouse.\npackage inpututil\n\nimport (\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com\/hajimehoshi\/ebiten\/v2\"\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/hooks\"\n)\n\ntype inputState struct {\n\tkeyDurations []int\n\tprevKeyDurations []int\n\n\tmouseButtonDurations map[ebiten.MouseButton]int\n\tprevMouseButtonDurations map[ebiten.MouseButton]int\n\n\tgamepadIDs map[ebiten.GamepadID]struct{}\n\tprevGamepadIDs map[ebiten.GamepadID]struct{}\n\n\tgamepadButtonDurations map[ebiten.GamepadID][]int\n\tprevGamepadButtonDurations map[ebiten.GamepadID][]int\n\n\ttouchDurations map[ebiten.TouchID]int\n\tprevTouchDurations map[ebiten.TouchID]int\n\n\tm sync.RWMutex\n}\n\nvar theInputState = &inputState{\n\tkeyDurations: make([]int, ebiten.KeyMax+1),\n\tprevKeyDurations: make([]int, ebiten.KeyMax+1),\n\n\tmouseButtonDurations: map[ebiten.MouseButton]int{},\n\tprevMouseButtonDurations: map[ebiten.MouseButton]int{},\n\n\tgamepadIDs: map[ebiten.GamepadID]struct{}{},\n\tprevGamepadIDs: map[ebiten.GamepadID]struct{}{},\n\n\tgamepadButtonDurations: map[ebiten.GamepadID][]int{},\n\tprevGamepadButtonDurations: map[ebiten.GamepadID][]int{},\n\n\ttouchDurations: map[ebiten.TouchID]int{},\n\tprevTouchDurations: map[ebiten.TouchID]int{},\n}\n\nfunc init() {\n\thooks.AppendHookOnBeforeUpdate(func() error {\n\t\ttheInputState.update()\n\t\treturn nil\n\t})\n}\n\nfunc (i *inputState) update() {\n\ti.m.Lock()\n\tdefer i.m.Unlock()\n\n\t\/\/ Keyboard\n\tcopy(i.prevKeyDurations[:], i.keyDurations[:])\n\tfor k := ebiten.Key(0); k <= ebiten.KeyMax; k++ {\n\t\tif ebiten.IsKeyPressed(k) {\n\t\t\ti.keyDurations[k]++\n\t\t} else {\n\t\t\ti.keyDurations[k] = 0\n\t\t}\n\t}\n\n\t\/\/ Mouse\n\tfor _, b := range []ebiten.MouseButton{\n\t\tebiten.MouseButtonLeft,\n\t\tebiten.MouseButtonRight,\n\t\tebiten.MouseButtonMiddle,\n\t} {\n\t\ti.prevMouseButtonDurations[b] = i.mouseButtonDurations[b]\n\t\tif ebiten.IsMouseButtonPressed(b) {\n\t\t\ti.mouseButtonDurations[b]++\n\t\t} else {\n\t\t\ti.mouseButtonDurations[b] = 0\n\t\t}\n\t}\n\n\t\/\/ Gamepads\n\n\t\/\/ Copy the gamepad IDs.\n\ti.prevGamepadIDs = map[ebiten.GamepadID]struct{}{}\n\tfor id := range i.gamepadIDs {\n\t\ti.prevGamepadIDs[id] = struct{}{}\n\t}\n\n\t\/\/ Copy the gamepad button durations.\n\ti.prevGamepadButtonDurations = map[ebiten.GamepadID][]int{}\n\tfor id, ds := range i.gamepadButtonDurations {\n\t\ti.prevGamepadButtonDurations[id] = append([]int{}, ds...)\n\t}\n\n\ti.gamepadIDs = map[ebiten.GamepadID]struct{}{}\n\tfor _, id := range ebiten.GamepadIDs() {\n\t\ti.gamepadIDs[id] = struct{}{}\n\t\tif _, ok := i.gamepadButtonDurations[id]; !ok {\n\t\t\ti.gamepadButtonDurations[id] = make([]int, ebiten.GamepadButtonMax+1)\n\t\t}\n\t\tn := ebiten.GamepadButtonNum(id)\n\t\tfor b := ebiten.GamepadButton(0); b < ebiten.GamepadButton(n); b++ {\n\t\t\tif ebiten.IsGamepadButtonPressed(id, b) {\n\t\t\t\ti.gamepadButtonDurations[id][b]++\n\t\t\t} else {\n\t\t\t\ti.gamepadButtonDurations[id][b] = 0\n\t\t\t}\n\t\t}\n\t}\n\tgamepadIDsToDelete := []ebiten.GamepadID{}\n\tfor id := range i.gamepadButtonDurations {\n\t\tif _, ok := i.gamepadIDs[id]; !ok {\n\t\t\tgamepadIDsToDelete = append(gamepadIDsToDelete, id)\n\t\t}\n\t}\n\tfor _, id := range gamepadIDsToDelete {\n\t\tdelete(i.gamepadButtonDurations, id)\n\t}\n\n\t\/\/ Touches\n\tids := map[ebiten.TouchID]struct{}{}\n\n\t\/\/ Copy the touch durations.\n\ti.prevTouchDurations = map[ebiten.TouchID]int{}\n\tfor id := range i.touchDurations {\n\t\ti.prevTouchDurations[id] = i.touchDurations[id]\n\t}\n\n\tfor _, id := range ebiten.TouchIDs() {\n\t\tids[id] = struct{}{}\n\t\ti.touchDurations[id]++\n\t}\n\ttouchIDsToDelete := []ebiten.TouchID{}\n\tfor id := range i.touchDurations {\n\t\tif _, ok := ids[id]; !ok {\n\t\t\ttouchIDsToDelete = append(touchIDsToDelete, id)\n\t\t}\n\t}\n\tfor _, id := range touchIDsToDelete {\n\t\tdelete(i.touchDurations, id)\n\t}\n}\n\n\/\/ IsKeyJustPressed returns a boolean value indicating\n\/\/ whether the given key is pressed just in the current frame.\n\/\/\n\/\/ IsKeyJustPressed is concurrent safe.\nfunc IsKeyJustPressed(key ebiten.Key) bool {\n\treturn KeyPressDuration(key) == 1\n}\n\n\/\/ IsKeyJustReleased returns a boolean value indicating\n\/\/ whether the given key is released just in the current frame.\n\/\/\n\/\/ IsKeyJustReleased is concurrent safe.\nfunc IsKeyJustReleased(key ebiten.Key) bool {\n\ttheInputState.m.RLock()\n\tr := theInputState.keyDurations[key] == 0 && theInputState.prevKeyDurations[key] > 0\n\ttheInputState.m.RUnlock()\n\treturn r\n}\n\n\/\/ KeyPressDuration returns how long the key is pressed in frames.\n\/\/\n\/\/ KeyPressDuration is concurrent safe.\nfunc KeyPressDuration(key ebiten.Key) int {\n\ttheInputState.m.RLock()\n\ts := theInputState.keyDurations[key]\n\ttheInputState.m.RUnlock()\n\treturn s\n}\n\n\/\/ IsMouseButtonJustPressed returns a boolean value indicating\n\/\/ whether the given mouse button is pressed just in the current frame.\n\/\/\n\/\/ IsMouseButtonJustPressed is concurrent safe.\nfunc IsMouseButtonJustPressed(button ebiten.MouseButton) bool {\n\treturn MouseButtonPressDuration(button) == 1\n}\n\n\/\/ IsMouseButtonJustReleased returns a boolean value indicating\n\/\/ whether the given mouse button is released just in the current frame.\n\/\/\n\/\/ IsMouseButtonJustReleased is concurrent safe.\nfunc IsMouseButtonJustReleased(button ebiten.MouseButton) bool {\n\ttheInputState.m.RLock()\n\tr := theInputState.mouseButtonDurations[button] == 0 &&\n\t\ttheInputState.prevMouseButtonDurations[button] > 0\n\ttheInputState.m.RUnlock()\n\treturn r\n}\n\n\/\/ MouseButtonPressDuration returns how long the mouse button is pressed in frames.\n\/\/\n\/\/ MouseButtonPressDuration is concurrent safe.\nfunc MouseButtonPressDuration(button ebiten.MouseButton) int {\n\ttheInputState.m.RLock()\n\ts := theInputState.mouseButtonDurations[button]\n\ttheInputState.m.RUnlock()\n\treturn s\n}\n\n\/\/ JustConnectedGamepadIDs returns gamepad IDs that are connected just in the current frame.\n\/\/\n\/\/ JustConnectedGamepadIDs might return nil when there is no connected gamepad.\n\/\/\n\/\/ JustConnectedGamepadIDs is concurrent safe.\nfunc JustConnectedGamepadIDs() []ebiten.GamepadID {\n\tvar ids []ebiten.GamepadID\n\ttheInputState.m.RLock()\n\tfor id := range theInputState.gamepadIDs {\n\t\tif _, ok := theInputState.prevGamepadIDs[id]; !ok {\n\t\t\tids = append(ids, id)\n\t\t}\n\t}\n\ttheInputState.m.RUnlock()\n\tsort.Slice(ids, func(a, b int) bool {\n\t\treturn ids[a] < ids[b]\n\t})\n\treturn ids\n}\n\n\/\/ IsGamepadJustDisconnected returns a boolean value indicating\n\/\/ whether the gamepad of the given id is released just in the current frame.\n\/\/\n\/\/ IsGamepadJustDisconnected is concurrent safe.\nfunc IsGamepadJustDisconnected(id ebiten.GamepadID) bool {\n\ttheInputState.m.RLock()\n\t_, prev := theInputState.prevGamepadIDs[id]\n\t_, current := theInputState.gamepadIDs[id]\n\ttheInputState.m.RUnlock()\n\treturn prev && !current\n}\n\n\/\/ IsGamepadButtonJustPressed returns a boolean value indicating\n\/\/ whether the given gamepad button of the gamepad id is pressed just in the current frame.\n\/\/\n\/\/ IsGamepadButtonJustPressed is concurrent safe.\nfunc IsGamepadButtonJustPressed(id ebiten.GamepadID, button ebiten.GamepadButton) bool {\n\treturn GamepadButtonPressDuration(id, button) == 1\n}\n\n\/\/ IsGamepadButtonJustReleased returns a boolean value indicating\n\/\/ whether the given gamepad button of the gamepad id is released just in the current frame.\n\/\/\n\/\/ IsGamepadButtonJustReleased is concurrent safe.\nfunc IsGamepadButtonJustReleased(id ebiten.GamepadID, button ebiten.GamepadButton) bool {\n\ttheInputState.m.RLock()\n\tprev := 0\n\tif _, ok := theInputState.prevGamepadButtonDurations[id]; ok {\n\t\tprev = theInputState.prevGamepadButtonDurations[id][button]\n\t}\n\tcurrent := 0\n\tif _, ok := theInputState.gamepadButtonDurations[id]; ok {\n\t\tcurrent = theInputState.gamepadButtonDurations[id][button]\n\t}\n\ttheInputState.m.RUnlock()\n\treturn current == 0 && prev > 0\n}\n\n\/\/ GamepadButtonPressDuration returns how long the gamepad button of the gamepad id is pressed in frames.\n\/\/\n\/\/ GamepadButtonPressDuration is concurrent safe.\nfunc GamepadButtonPressDuration(id ebiten.GamepadID, button ebiten.GamepadButton) int {\n\ttheInputState.m.RLock()\n\ts := 0\n\tif _, ok := theInputState.gamepadButtonDurations[id]; ok {\n\t\ts = theInputState.gamepadButtonDurations[id][button]\n\t}\n\ttheInputState.m.RUnlock()\n\treturn s\n}\n\n\/\/ JustPressedTouchIDs returns touch IDs that are created just in the current frame.\n\/\/\n\/\/ JustPressedTouchIDs might return nil when there is not touch.\n\/\/\n\/\/ JustPressedTouchIDs is concurrent safe.\nfunc JustPressedTouchIDs() []ebiten.TouchID {\n\tvar ids []ebiten.TouchID\n\ttheInputState.m.RLock()\n\tfor id, s := range theInputState.touchDurations {\n\t\tif s == 1 {\n\t\t\tids = append(ids, id)\n\t\t}\n\t}\n\ttheInputState.m.RUnlock()\n\tsort.Slice(ids, func(a, b int) bool {\n\t\treturn ids[a] < ids[b]\n\t})\n\treturn ids\n}\n\n\/\/ IsTouchJustReleased returns a boolean value indicating\n\/\/ whether the given touch is released just in the current frame.\n\/\/\n\/\/ IsTouchJustReleased is concurrent safe.\nfunc IsTouchJustReleased(id ebiten.TouchID) bool {\n\ttheInputState.m.RLock()\n\tr := theInputState.touchDurations[id] == 0 && theInputState.prevTouchDurations[id] > 0\n\ttheInputState.m.RUnlock()\n\treturn r\n}\n\n\/\/ TouchPressDuration returns how long the touch remains in frames.\n\/\/\n\/\/ TouchPressDuration is concurrent safe.\nfunc TouchPressDuration(id ebiten.TouchID) int {\n\ttheInputState.m.RLock()\n\ts := theInputState.touchDurations[id]\n\ttheInputState.m.RUnlock()\n\treturn s\n}\ninpututil: Optimization\/\/ Copyright 2018 The Ebiten Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package inpututil provides utility functions of input like keyboard or mouse.\npackage inpututil\n\nimport (\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com\/hajimehoshi\/ebiten\/v2\"\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/hooks\"\n)\n\ntype inputState struct {\n\tkeyDurations []int\n\tprevKeyDurations []int\n\n\tmouseButtonDurations map[ebiten.MouseButton]int\n\tprevMouseButtonDurations map[ebiten.MouseButton]int\n\n\tgamepadIDs map[ebiten.GamepadID]struct{}\n\tprevGamepadIDs map[ebiten.GamepadID]struct{}\n\n\tgamepadButtonDurations map[ebiten.GamepadID][]int\n\tprevGamepadButtonDurations map[ebiten.GamepadID][]int\n\n\ttouchDurations map[ebiten.TouchID]int\n\tprevTouchDurations map[ebiten.TouchID]int\n\n\tm sync.RWMutex\n}\n\nvar theInputState = &inputState{\n\tkeyDurations: make([]int, ebiten.KeyMax+1),\n\tprevKeyDurations: make([]int, ebiten.KeyMax+1),\n\n\tmouseButtonDurations: map[ebiten.MouseButton]int{},\n\tprevMouseButtonDurations: map[ebiten.MouseButton]int{},\n\n\tgamepadIDs: map[ebiten.GamepadID]struct{}{},\n\tprevGamepadIDs: map[ebiten.GamepadID]struct{}{},\n\n\tgamepadButtonDurations: map[ebiten.GamepadID][]int{},\n\tprevGamepadButtonDurations: map[ebiten.GamepadID][]int{},\n\n\ttouchDurations: map[ebiten.TouchID]int{},\n\tprevTouchDurations: map[ebiten.TouchID]int{},\n}\n\nfunc init() {\n\thooks.AppendHookOnBeforeUpdate(func() error {\n\t\ttheInputState.update()\n\t\treturn nil\n\t})\n}\n\nfunc (i *inputState) update() {\n\ti.m.Lock()\n\tdefer i.m.Unlock()\n\n\t\/\/ Keyboard\n\tcopy(i.prevKeyDurations[:], i.keyDurations[:])\n\tfor k := ebiten.Key(0); k <= ebiten.KeyMax; k++ {\n\t\tif ebiten.IsKeyPressed(k) {\n\t\t\ti.keyDurations[k]++\n\t\t} else {\n\t\t\ti.keyDurations[k] = 0\n\t\t}\n\t}\n\n\t\/\/ Mouse\n\tfor _, b := range []ebiten.MouseButton{\n\t\tebiten.MouseButtonLeft,\n\t\tebiten.MouseButtonRight,\n\t\tebiten.MouseButtonMiddle,\n\t} {\n\t\ti.prevMouseButtonDurations[b] = i.mouseButtonDurations[b]\n\t\tif ebiten.IsMouseButtonPressed(b) {\n\t\t\ti.mouseButtonDurations[b]++\n\t\t} else {\n\t\t\ti.mouseButtonDurations[b] = 0\n\t\t}\n\t}\n\n\t\/\/ Gamepads\n\n\t\/\/ Copy the gamepad IDs.\n\tfor id := range i.prevGamepadIDs {\n\t\tdelete(i.prevGamepadIDs, id)\n\t}\n\tfor id := range i.gamepadIDs {\n\t\ti.prevGamepadIDs[id] = struct{}{}\n\t}\n\n\t\/\/ Copy the gamepad button durations.\n\tfor id := range i.prevGamepadButtonDurations {\n\t\tdelete(i.prevGamepadButtonDurations, id)\n\t}\n\tfor id, ds := range i.gamepadButtonDurations {\n\t\ti.prevGamepadButtonDurations[id] = append([]int{}, ds...)\n\t}\n\n\tfor id := range i.gamepadIDs {\n\t\tdelete(i.gamepadIDs, id)\n\t}\n\tfor _, id := range ebiten.GamepadIDs() {\n\t\ti.gamepadIDs[id] = struct{}{}\n\t\tif _, ok := i.gamepadButtonDurations[id]; !ok {\n\t\t\ti.gamepadButtonDurations[id] = make([]int, ebiten.GamepadButtonMax+1)\n\t\t}\n\t\tn := ebiten.GamepadButtonNum(id)\n\t\tfor b := ebiten.GamepadButton(0); b < ebiten.GamepadButton(n); b++ {\n\t\t\tif ebiten.IsGamepadButtonPressed(id, b) {\n\t\t\t\ti.gamepadButtonDurations[id][b]++\n\t\t\t} else {\n\t\t\t\ti.gamepadButtonDurations[id][b] = 0\n\t\t\t}\n\t\t}\n\t}\n\tfor id := range i.gamepadButtonDurations {\n\t\tif _, ok := i.gamepadIDs[id]; !ok {\n\t\t\tdelete(i.gamepadButtonDurations, id)\n\t\t}\n\t}\n\n\t\/\/ Touches\n\tids := map[ebiten.TouchID]struct{}{}\n\n\t\/\/ Copy the touch durations.\n\ti.prevTouchDurations = map[ebiten.TouchID]int{}\n\tfor id := range i.touchDurations {\n\t\ti.prevTouchDurations[id] = i.touchDurations[id]\n\t}\n\n\tfor _, id := range ebiten.TouchIDs() {\n\t\tids[id] = struct{}{}\n\t\ti.touchDurations[id]++\n\t}\n\ttouchIDsToDelete := []ebiten.TouchID{}\n\tfor id := range i.touchDurations {\n\t\tif _, ok := ids[id]; !ok {\n\t\t\ttouchIDsToDelete = append(touchIDsToDelete, id)\n\t\t}\n\t}\n\tfor _, id := range touchIDsToDelete {\n\t\tdelete(i.touchDurations, id)\n\t}\n}\n\n\/\/ IsKeyJustPressed returns a boolean value indicating\n\/\/ whether the given key is pressed just in the current frame.\n\/\/\n\/\/ IsKeyJustPressed is concurrent safe.\nfunc IsKeyJustPressed(key ebiten.Key) bool {\n\treturn KeyPressDuration(key) == 1\n}\n\n\/\/ IsKeyJustReleased returns a boolean value indicating\n\/\/ whether the given key is released just in the current frame.\n\/\/\n\/\/ IsKeyJustReleased is concurrent safe.\nfunc IsKeyJustReleased(key ebiten.Key) bool {\n\ttheInputState.m.RLock()\n\tr := theInputState.keyDurations[key] == 0 && theInputState.prevKeyDurations[key] > 0\n\ttheInputState.m.RUnlock()\n\treturn r\n}\n\n\/\/ KeyPressDuration returns how long the key is pressed in frames.\n\/\/\n\/\/ KeyPressDuration is concurrent safe.\nfunc KeyPressDuration(key ebiten.Key) int {\n\ttheInputState.m.RLock()\n\ts := theInputState.keyDurations[key]\n\ttheInputState.m.RUnlock()\n\treturn s\n}\n\n\/\/ IsMouseButtonJustPressed returns a boolean value indicating\n\/\/ whether the given mouse button is pressed just in the current frame.\n\/\/\n\/\/ IsMouseButtonJustPressed is concurrent safe.\nfunc IsMouseButtonJustPressed(button ebiten.MouseButton) bool {\n\treturn MouseButtonPressDuration(button) == 1\n}\n\n\/\/ IsMouseButtonJustReleased returns a boolean value indicating\n\/\/ whether the given mouse button is released just in the current frame.\n\/\/\n\/\/ IsMouseButtonJustReleased is concurrent safe.\nfunc IsMouseButtonJustReleased(button ebiten.MouseButton) bool {\n\ttheInputState.m.RLock()\n\tr := theInputState.mouseButtonDurations[button] == 0 &&\n\t\ttheInputState.prevMouseButtonDurations[button] > 0\n\ttheInputState.m.RUnlock()\n\treturn r\n}\n\n\/\/ MouseButtonPressDuration returns how long the mouse button is pressed in frames.\n\/\/\n\/\/ MouseButtonPressDuration is concurrent safe.\nfunc MouseButtonPressDuration(button ebiten.MouseButton) int {\n\ttheInputState.m.RLock()\n\ts := theInputState.mouseButtonDurations[button]\n\ttheInputState.m.RUnlock()\n\treturn s\n}\n\n\/\/ JustConnectedGamepadIDs returns gamepad IDs that are connected just in the current frame.\n\/\/\n\/\/ JustConnectedGamepadIDs might return nil when there is no connected gamepad.\n\/\/\n\/\/ JustConnectedGamepadIDs is concurrent safe.\nfunc JustConnectedGamepadIDs() []ebiten.GamepadID {\n\tvar ids []ebiten.GamepadID\n\ttheInputState.m.RLock()\n\tfor id := range theInputState.gamepadIDs {\n\t\tif _, ok := theInputState.prevGamepadIDs[id]; !ok {\n\t\t\tids = append(ids, id)\n\t\t}\n\t}\n\ttheInputState.m.RUnlock()\n\tsort.Slice(ids, func(a, b int) bool {\n\t\treturn ids[a] < ids[b]\n\t})\n\treturn ids\n}\n\n\/\/ IsGamepadJustDisconnected returns a boolean value indicating\n\/\/ whether the gamepad of the given id is released just in the current frame.\n\/\/\n\/\/ IsGamepadJustDisconnected is concurrent safe.\nfunc IsGamepadJustDisconnected(id ebiten.GamepadID) bool {\n\ttheInputState.m.RLock()\n\t_, prev := theInputState.prevGamepadIDs[id]\n\t_, current := theInputState.gamepadIDs[id]\n\ttheInputState.m.RUnlock()\n\treturn prev && !current\n}\n\n\/\/ IsGamepadButtonJustPressed returns a boolean value indicating\n\/\/ whether the given gamepad button of the gamepad id is pressed just in the current frame.\n\/\/\n\/\/ IsGamepadButtonJustPressed is concurrent safe.\nfunc IsGamepadButtonJustPressed(id ebiten.GamepadID, button ebiten.GamepadButton) bool {\n\treturn GamepadButtonPressDuration(id, button) == 1\n}\n\n\/\/ IsGamepadButtonJustReleased returns a boolean value indicating\n\/\/ whether the given gamepad button of the gamepad id is released just in the current frame.\n\/\/\n\/\/ IsGamepadButtonJustReleased is concurrent safe.\nfunc IsGamepadButtonJustReleased(id ebiten.GamepadID, button ebiten.GamepadButton) bool {\n\ttheInputState.m.RLock()\n\tprev := 0\n\tif _, ok := theInputState.prevGamepadButtonDurations[id]; ok {\n\t\tprev = theInputState.prevGamepadButtonDurations[id][button]\n\t}\n\tcurrent := 0\n\tif _, ok := theInputState.gamepadButtonDurations[id]; ok {\n\t\tcurrent = theInputState.gamepadButtonDurations[id][button]\n\t}\n\ttheInputState.m.RUnlock()\n\treturn current == 0 && prev > 0\n}\n\n\/\/ GamepadButtonPressDuration returns how long the gamepad button of the gamepad id is pressed in frames.\n\/\/\n\/\/ GamepadButtonPressDuration is concurrent safe.\nfunc GamepadButtonPressDuration(id ebiten.GamepadID, button ebiten.GamepadButton) int {\n\ttheInputState.m.RLock()\n\ts := 0\n\tif _, ok := theInputState.gamepadButtonDurations[id]; ok {\n\t\ts = theInputState.gamepadButtonDurations[id][button]\n\t}\n\ttheInputState.m.RUnlock()\n\treturn s\n}\n\n\/\/ JustPressedTouchIDs returns touch IDs that are created just in the current frame.\n\/\/\n\/\/ JustPressedTouchIDs might return nil when there is not touch.\n\/\/\n\/\/ JustPressedTouchIDs is concurrent safe.\nfunc JustPressedTouchIDs() []ebiten.TouchID {\n\tvar ids []ebiten.TouchID\n\ttheInputState.m.RLock()\n\tfor id, s := range theInputState.touchDurations {\n\t\tif s == 1 {\n\t\t\tids = append(ids, id)\n\t\t}\n\t}\n\ttheInputState.m.RUnlock()\n\tsort.Slice(ids, func(a, b int) bool {\n\t\treturn ids[a] < ids[b]\n\t})\n\treturn ids\n}\n\n\/\/ IsTouchJustReleased returns a boolean value indicating\n\/\/ whether the given touch is released just in the current frame.\n\/\/\n\/\/ IsTouchJustReleased is concurrent safe.\nfunc IsTouchJustReleased(id ebiten.TouchID) bool {\n\ttheInputState.m.RLock()\n\tr := theInputState.touchDurations[id] == 0 && theInputState.prevTouchDurations[id] > 0\n\ttheInputState.m.RUnlock()\n\treturn r\n}\n\n\/\/ TouchPressDuration returns how long the touch remains in frames.\n\/\/\n\/\/ TouchPressDuration is concurrent safe.\nfunc TouchPressDuration(id ebiten.TouchID) int {\n\ttheInputState.m.RLock()\n\ts := theInputState.touchDurations[id]\n\ttheInputState.m.RUnlock()\n\treturn s\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage keyupdater\n\nimport (\n\t\"strings\"\n\n\t\"launchpad.net\/juju-core\/state\"\n\t\"launchpad.net\/juju-core\/state\/api\/params\"\n\t\"launchpad.net\/juju-core\/state\/apiserver\/common\"\n\t\"launchpad.net\/juju-core\/state\/watcher\"\n)\n\n\/\/ KeyUpdater defines the methods on the keyupdater API end point.\ntype KeyUpdater interface {\n\tAuthorisedKeys(args params.Entities) (params.StringsResults, error)\n\tWatchAuthorisedKeys(args params.Entities) (params.NotifyWatchResults, error)\n}\n\n\/\/ KeyUpdaterAPI implements the KeyUpdater interface and is the concrete\n\/\/ implementation of the api end point.\ntype KeyUpdaterAPI struct {\n\tstate *state.State\n\tresources *common.Resources\n\tauthorizer common.Authorizer\n\tgetCanRead common.GetAuthFunc\n}\n\nvar _ KeyUpdater = (*KeyUpdaterAPI)(nil)\n\n\/\/ NewKeyUpdaterAPI creates a new server-side keyupdater API end point.\nfunc NewKeyUpdaterAPI(\n\tst *state.State,\n\tresources *common.Resources,\n\tauthorizer common.Authorizer,\n) (*KeyUpdaterAPI, error) {\n\t\/\/ Only machine agents have access to the keyupdater service.\n\tif !authorizer.AuthMachineAgent() {\n\t\treturn nil, common.ErrPerm\n\t}\n\t\/\/ No-one else except the machine itself can only read a machine's own credentials.\n\tgetCanRead := func() (common.AuthFunc, error) {\n\t\treturn authorizer.AuthOwner, nil\n\t}\n\treturn &KeyUpdaterAPI{state: st, resources: resources, authorizer: authorizer, getCanRead: getCanRead}, nil\n}\n\n\/\/ WatchAuthorisedKeys starts a watcher to track changes to the authorised ssh keys\n\/\/ for the specified machines.\n\/\/ The current implementation relies on global authorised keys being stored in the environment config.\n\/\/ This will change as new user management and authorisation functionality is added.\nfunc (api *KeyUpdaterAPI) WatchAuthorisedKeys(arg params.Entities) (params.NotifyWatchResults, error) {\n\tresults := make([]params.NotifyWatchResult, len(arg.Entities))\n\n\tgetCanRead, err := api.getCanRead()\n\tif err != nil {\n\t\treturn params.NotifyWatchResults{}, err\n\t}\n\tfor i, entity := range arg.Entities {\n\t\tif _, err := api.state.FindEntity(entity.Tag); err != nil {\n\t\t\tresults[i].Error = common.ServerError(common.ErrPerm)\n\t\t\tcontinue\n\t\t}\n\t\tif !getCanRead(entity.Tag) {\n\t\t\tresults[i].Error = common.ServerError(common.ErrPerm)\n\t\t\tcontinue\n\t\t}\n\t\tvar err error\n\t\twatch := api.state.WatchForEnvironConfigChanges()\n\t\t\/\/ Consume the initial event.\n\t\tif _, ok := <-watch.Changes(); ok {\n\t\t\tresults[i].NotifyWatcherId = api.resources.Register(watch)\n\t\t\terr = nil\n\t\t} else {\n\t\t\terr = watcher.MustErr(watch)\n\t\t}\n\t\tresults[i].Error = common.ServerError(err)\n\t}\n\treturn params.NotifyWatchResults{results}, nil\n}\n\n\/\/ AuthorisedKeys reports the authorised ssh keys for the specified machines.\n\/\/ The current implementation relies on global authorised keys being stored in the environment config.\n\/\/ This will change as new user management and authorisation functionality is added.\nfunc (api *KeyUpdaterAPI) AuthorisedKeys(arg params.Entities) (params.StringsResults, error) {\n\tif len(arg.Entities) == 0 {\n\t\treturn params.StringsResults{}, nil\n\t}\n\tresults := make([]params.StringsResult, len(arg.Entities))\n\n\t\/\/ For now, authorised keys are global, common to all machines.\n\tvar keys []string\n\tconfig, configErr := api.state.EnvironConfig()\n\tif configErr == nil {\n\t\tkeysString := config.AuthorizedKeys()\n\t\tkeys = strings.Split(keysString, \"\\n\")\n\t}\n\n\tgetCanRead, err := api.getCanRead()\n\tif err != nil {\n\t\treturn params.StringsResults{}, err\n\t}\n\tfor i, entity := range arg.Entities {\n\t\tif _, err := api.state.FindEntity(entity.Tag); err != nil {\n\t\t\tresults[i].Error = common.ServerError(common.ErrPerm)\n\t\t\tcontinue\n\t\t}\n\t\tif !getCanRead(entity.Tag) {\n\t\t\tresults[i].Error = common.ServerError(common.ErrPerm)\n\t\t\tcontinue\n\t\t}\n\t\tvar err error\n\t\tif configErr == nil {\n\t\t\tresults[i].Result = keys\n\t\t\terr = nil\n\t\t} else {\n\t\t\terr = configErr\n\t\t}\n\t\tresults[i].Error = common.ServerError(err)\n\t}\n\treturn params.StringsResults{results}, nil\n}\nTweak server logic\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage keyupdater\n\nimport (\n\t\"strings\"\n\n\t\"launchpad.net\/juju-core\/errors\"\n\t\"launchpad.net\/juju-core\/state\"\n\t\"launchpad.net\/juju-core\/state\/api\/params\"\n\t\"launchpad.net\/juju-core\/state\/apiserver\/common\"\n\t\"launchpad.net\/juju-core\/state\/watcher\"\n)\n\n\/\/ KeyUpdater defines the methods on the keyupdater API end point.\ntype KeyUpdater interface {\n\tAuthorisedKeys(args params.Entities) (params.StringsResults, error)\n\tWatchAuthorisedKeys(args params.Entities) (params.NotifyWatchResults, error)\n}\n\n\/\/ KeyUpdaterAPI implements the KeyUpdater interface and is the concrete\n\/\/ implementation of the api end point.\ntype KeyUpdaterAPI struct {\n\tstate *state.State\n\tresources *common.Resources\n\tauthorizer common.Authorizer\n\tgetCanRead common.GetAuthFunc\n}\n\nvar _ KeyUpdater = (*KeyUpdaterAPI)(nil)\n\n\/\/ NewKeyUpdaterAPI creates a new server-side keyupdater API end point.\nfunc NewKeyUpdaterAPI(\n\tst *state.State,\n\tresources *common.Resources,\n\tauthorizer common.Authorizer,\n) (*KeyUpdaterAPI, error) {\n\t\/\/ Only machine agents have access to the keyupdater service.\n\tif !authorizer.AuthMachineAgent() {\n\t\treturn nil, common.ErrPerm\n\t}\n\t\/\/ No-one else except the machine itself can only read a machine's own credentials.\n\tgetCanRead := func() (common.AuthFunc, error) {\n\t\treturn authorizer.AuthOwner, nil\n\t}\n\treturn &KeyUpdaterAPI{state: st, resources: resources, authorizer: authorizer, getCanRead: getCanRead}, nil\n}\n\n\/\/ WatchAuthorisedKeys starts a watcher to track changes to the authorised ssh keys\n\/\/ for the specified machines.\n\/\/ The current implementation relies on global authorised keys being stored in the environment config.\n\/\/ This will change as new user management and authorisation functionality is added.\nfunc (api *KeyUpdaterAPI) WatchAuthorisedKeys(arg params.Entities) (params.NotifyWatchResults, error) {\n\tresults := make([]params.NotifyWatchResult, len(arg.Entities))\n\n\tcanRead, err := api.getCanRead()\n\tif err != nil {\n\t\treturn params.NotifyWatchResults{}, err\n\t}\n\tfor i, entity := range arg.Entities {\n\t\t\/\/ 1. Check permissions\n\t\tif !canRead(entity.Tag) {\n\t\t\tresults[i].Error = common.ServerError(common.ErrPerm)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ 2. Check entity exists\n\t\tif _, err := api.state.FindEntity(entity.Tag); err != nil {\n\t\t\tif errors.IsNotFoundError(err) {\n\t\t\t\tresults[i].Error = common.ServerError(common.ErrPerm)\n\t\t\t} else {\n\t\t\t\tresults[i].Error = common.ServerError(err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ 3. Watch fr changes\n\t\tvar err error\n\t\twatch := api.state.WatchForEnvironConfigChanges()\n\t\t\/\/ Consume the initial event.\n\t\tif _, ok := <-watch.Changes(); ok {\n\t\t\tresults[i].NotifyWatcherId = api.resources.Register(watch)\n\t\t} else {\n\t\t\terr = watcher.MustErr(watch)\n\t\t}\n\t\tresults[i].Error = common.ServerError(err)\n\t}\n\treturn params.NotifyWatchResults{results}, nil\n}\n\n\/\/ AuthorisedKeys reports the authorised ssh keys for the specified machines.\n\/\/ The current implementation relies on global authorised keys being stored in the environment config.\n\/\/ This will change as new user management and authorisation functionality is added.\nfunc (api *KeyUpdaterAPI) AuthorisedKeys(arg params.Entities) (params.StringsResults, error) {\n\tif len(arg.Entities) == 0 {\n\t\treturn params.StringsResults{}, nil\n\t}\n\tresults := make([]params.StringsResult, len(arg.Entities))\n\n\t\/\/ For now, authorised keys are global, common to all machines.\n\tvar keys []string\n\tconfig, configErr := api.state.EnvironConfig()\n\tif configErr == nil {\n\t\tkeysString := config.AuthorizedKeys()\n\t\tkeys = strings.Split(keysString, \"\\n\")\n\t}\n\n\tcanRead, err := api.getCanRead()\n\tif err != nil {\n\t\treturn params.StringsResults{}, err\n\t}\n\tfor i, entity := range arg.Entities {\n\t\t\/\/ 1. Check permissions\n\t\tif !canRead(entity.Tag) {\n\t\t\tresults[i].Error = common.ServerError(common.ErrPerm)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ 2. Check entity exists\n\t\tif _, err := api.state.FindEntity(entity.Tag); err != nil {\n\t\t\tif errors.IsNotFoundError(err) {\n\t\t\t\tresults[i].Error = common.ServerError(common.ErrPerm)\n\t\t\t} else {\n\t\t\t\tresults[i].Error = common.ServerError(err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ 3. Get keys\n\t\tvar err error\n\t\tif configErr == nil {\n\t\t\tresults[i].Result = keys\n\t\t} else {\n\t\t\terr = configErr\n\t\t}\n\t\tresults[i].Error = common.ServerError(err)\n\t}\n\treturn params.StringsResults{results}, nil\n}\n<|endoftext|>"} {"text":"\/\/\n\/\/ instagram.go\n\/\/ Copyright 2017 Konstantin Dovnar\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\n\/\/ Package instagram helps you with requesting to Instagram without a key.\npackage instagram\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\n\/\/ GetAccountByUsername try to find account by username.\nfunc GetAccountByUsername(username string) (Account, error) {\n\turl := fmt.Sprintf(accountInfoURL, username)\n\tdata, err := getDataFromURL(url)\n\tif err != nil {\n\t\treturn Account{}, err\n\t}\n\taccount, err := getFromAccountPage(data)\n\tif err != nil {\n\t\treturn account, err\n\t}\n\treturn account, nil\n}\n\n\/\/ GetMediaByURL try to find media by url.\n\/\/ URL should be like https:\/\/www.instagram.com\/p\/12376OtT5o\/\nfunc GetMediaByURL(url string) (Media, error) {\n\tcode := strings.Split(url, \"\/\")[4]\n\treturn GetMediaByCode(code)\n}\n\n\/\/ GetMediaByCode try to find media by code.\n\/\/ Code can be find in URL to media, after p\/.\n\/\/ If URL to media is https:\/\/www.instagram.com\/p\/12376OtT5o\/,\n\/\/ then code of the media is 12376OtT5o.\nfunc GetMediaByCode(code string) (Media, error) {\n\turl := fmt.Sprintf(mediaInfoURL, code)\n\tdata, err := getDataFromURL(url)\n\tif err != nil {\n\t\treturn Media{}, err\n\t}\n\tmedia, err := getFromMediaPage(data)\n\tif err != nil {\n\t\treturn Media{}, err\n\t}\n\treturn media, nil\n}\n\n\/\/ GetAccountMedia try to get slice of user's media.\n\/\/ Limit set how much media you need.\nfunc GetAccountMedia(username string, limit uint16) ([]Media, error) {\n\tvar count uint16\n\tmaxID := \"\"\n\tavailable := true\n\tmedias := []Media{}\n\tfor available && count < limit {\n\t\turl := fmt.Sprintf(accountMediaURL, username, maxID)\n\t\tjsonBody, err := getJSONFromURL(url)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tavailable, _ = jsonBody[\"more_available\"].(bool)\n\n\t\titems, _ := jsonBody[\"items\"].([]interface{})\n\t\tfor _, item := range items {\n\t\t\tif count >= limit {\n\t\t\t\treturn medias, nil\n\t\t\t}\n\t\t\tcount++\n\t\t\titemData, err := json.Marshal(item)\n\t\t\tif err == nil {\n\t\t\t\tmedia, err := getFromAccountMediaList(itemData)\n\t\t\t\tif err == nil {\n\t\t\t\t\tmedias = append(medias, media)\n\t\t\t\t\tmaxID = media.ID\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn medias, nil\n}\n\n\/\/ GetAllAccountMedia try to get slice of all user's media.\n\/\/ It's function the same as GetAccountMedia,\n\/\/ except limit = count of user's media.\nfunc GetAllAccountMedia(username string) ([]Media, error) {\n\taccount, err := GetAccountByUsername(username)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcount := uint16(account.MediaCount)\n\tmedias, err := GetAccountMedia(username, count)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn medias, nil\n}\n\n\/\/ GetLocationMedia try to get slice of last location's media.\n\/\/ The id is a facebook location id.\n\/\/ The limit set how much media you need.\nfunc GetLocationMedia(id string, limit uint16) ([]Media, error) {\n\tvar count uint16\n\tmaxID := \"\"\n\thasNext := true\n\tmedias := []Media{}\n\tfor hasNext && count < limit {\n\t\turl := fmt.Sprintf(locationURL, id, maxID)\n\t\tjsonBody, err := getJSONFromURL(url)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tjsonBody, _ = jsonBody[\"location\"].(map[string]interface{})\n\t\tjsonBody, _ = jsonBody[\"media\"].(map[string]interface{})\n\n\t\tnodes, _ := jsonBody[\"nodes\"].([]interface{})\n\t\tfor _, node := range nodes {\n\t\t\tif count >= limit {\n\t\t\t\treturn medias, nil\n\t\t\t}\n\t\t\tcount++\n\t\t\tnodeData, err := json.Marshal(node)\n\t\t\tif err == nil {\n\t\t\t\tmedia, err := getFromSearchMediaList(nodeData)\n\t\t\t\tif err == nil {\n\t\t\t\t\tmedias = append(medias, media)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tjsonBody, _ = jsonBody[\"page_info\"].(map[string]interface{})\n\t\thasNext, _ = jsonBody[\"has_next_page\"].(bool)\n\t\tmaxID, _ = jsonBody[\"end_cursor\"].(string)\n\t}\n\treturn medias, nil\n}\n\n\/\/ GetLocationTopMedia try to get array of top location's media.\n\/\/ The id is a facebook location id.\n\/\/ Length of returned array is 9.\nfunc GetLocationTopMedia(id string) ([9]Media, error) {\n\turl := fmt.Sprintf(locationURL, id, \"\")\n\tjsonBody, err := getJSONFromURL(url)\n\tif err != nil {\n\t\treturn [9]Media{}, err\n\t}\n\tjsonBody, _ = jsonBody[\"location\"].(map[string]interface{})\n\tjsonBody, _ = jsonBody[\"top_posts\"].(map[string]interface{})\n\n\tmedias := [9]Media{}\n\tnodes, _ := jsonBody[\"nodes\"].([]interface{})\n\tfor i, node := range nodes {\n\t\tnodeData, err := json.Marshal(node)\n\t\tif err == nil {\n\t\t\tmedia, err := getFromSearchMediaList(nodeData)\n\t\t\tif err == nil {\n\t\t\t\tmedias[i] = media\n\t\t\t}\n\t\t}\n\t}\n\treturn medias, nil\n}\n\n\/\/ GetLocationByID try to find location info by id.\n\/\/ The id is a facebook location id.\nfunc GetLocationByID(id string) (Location, error) {\n\turl := fmt.Sprintf(locationURL, id, \"\")\n\tdata, err := getDataFromURL(url)\n\tif err != nil {\n\t\treturn Location{}, err\n\t}\n\n\tlocation, err := getFromLocationPage(data)\n\tif err != nil {\n\t\treturn Location{}, err\n\t}\n\treturn location, nil\n}\n\n\/\/ GetTagMedia try to get slice of last tag's media.\n\/\/ The limit set how much media you need.\nfunc GetTagMedia(tag string, quantity uint16) ([]Media, error) {\n\tvar count uint16\n\tmaxID := \"\"\n\thasNext := true\n\tmedias := []Media{}\n\tfor hasNext && count < quantity {\n\t\turl := fmt.Sprintf(tagURL, tag, maxID)\n\t\tjsonBody, err := getJSONFromURL(url)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tjsonBody, _ = jsonBody[\"tag\"].(map[string]interface{})\n\t\tjsonBody, _ = jsonBody[\"media\"].(map[string]interface{})\n\n\t\tnodes, _ := jsonBody[\"nodes\"].([]interface{})\n\t\tfor _, node := range nodes {\n\t\t\tif count >= quantity {\n\t\t\t\treturn medias, nil\n\t\t\t}\n\t\t\tcount++\n\t\t\tnodeData, err := json.Marshal(node)\n\t\t\tif err == nil {\n\t\t\t\tmedia, err := getFromSearchMediaList(nodeData)\n\t\t\t\tif err == nil {\n\t\t\t\t\tmedias = append(medias, media)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tjsonBody, _ = jsonBody[\"page_info\"].(map[string]interface{})\n\t\thasNext, _ = jsonBody[\"has_next_page\"].(bool)\n\t\tmaxID, _ = jsonBody[\"end_cursor\"].(string)\n\t}\n\treturn medias, nil\n}\n\n\/\/ GetTagTopMedia try to get array of top tag's media.\n\/\/ Length of returned array is 9.\nfunc GetTagTopMedia(tag string) ([9]Media, error) {\n\turl := fmt.Sprintf(tagURL, tag, \"\")\n\tjsonBody, err := getJSONFromURL(url)\n\tif err != nil {\n\t\treturn [9]Media{}, err\n\t}\n\tjsonBody, _ = jsonBody[\"tag\"].(map[string]interface{})\n\tjsonBody, _ = jsonBody[\"top_posts\"].(map[string]interface{})\n\n\tmedias := [9]Media{}\n\tnodes, _ := jsonBody[\"nodes\"].([]interface{})\n\tfor i, node := range nodes {\n\t\tnodeData, err := json.Marshal(node)\n\t\tif err == nil {\n\t\t\tmedia, err := getFromSearchMediaList(nodeData)\n\t\t\tif err == nil {\n\t\t\t\tmedias[i] = media\n\t\t\t}\n\t\t}\n\t}\n\treturn medias, nil\n}\n\n\/\/ SearchForUsers try to find users by given username.\n\/\/ Return slice of Account with length of 0 or more.\nfunc SearchForUsers(username string) ([]Account, error) {\n\turl := fmt.Sprintf(searchURL, username)\n\tdata, err := getDataFromURL(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taccounts, err := getFromSearchPage(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn accounts, nil\n}\n\nfunc getJSONFromURL(url string) (map[string]interface{}, error) {\n\tdata, err := getDataFromURL(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar jsonBody map[string]interface{}\n\terr = json.Unmarshal(data, &jsonBody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn jsonBody, nil\n}\n\nfunc getDataFromURL(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil || resp.StatusCode == 404 {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}\nthrow error if http status != 200\/\/\n\/\/ instagram.go\n\/\/ Copyright 2017 Konstantin Dovnar\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\n\/\/ Package instagram helps you with requesting to Instagram without a key.\npackage instagram\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"errors\"\n)\n\n\/\/ GetAccountByUsername try to find account by username.\nfunc GetAccountByUsername(username string) (Account, error) {\n\turl := fmt.Sprintf(accountInfoURL, username)\n\tdata, err := getDataFromURL(url)\n\tif err != nil {\n\t\treturn Account{}, err\n\t}\n\taccount, err := getFromAccountPage(data)\n\tif err != nil {\n\t\treturn account, err\n\t}\n\treturn account, nil\n}\n\n\/\/ GetMediaByURL try to find media by url.\n\/\/ URL should be like https:\/\/www.instagram.com\/p\/12376OtT5o\/\nfunc GetMediaByURL(url string) (Media, error) {\n\tcode := strings.Split(url, \"\/\")[4]\n\treturn GetMediaByCode(code)\n}\n\n\/\/ GetMediaByCode try to find media by code.\n\/\/ Code can be find in URL to media, after p\/.\n\/\/ If URL to media is https:\/\/www.instagram.com\/p\/12376OtT5o\/,\n\/\/ then code of the media is 12376OtT5o.\nfunc GetMediaByCode(code string) (Media, error) {\n\turl := fmt.Sprintf(mediaInfoURL, code)\n\tdata, err := getDataFromURL(url)\n\tif err != nil {\n\t\treturn Media{}, err\n\t}\n\tmedia, err := getFromMediaPage(data)\n\tif err != nil {\n\t\treturn Media{}, err\n\t}\n\treturn media, nil\n}\n\n\/\/ GetAccountMedia try to get slice of user's media.\n\/\/ Limit set how much media you need.\nfunc GetAccountMedia(username string, limit uint16) ([]Media, error) {\n\tvar count uint16\n\tmaxID := \"\"\n\tavailable := true\n\tmedias := []Media{}\n\tfor available && count < limit {\n\t\turl := fmt.Sprintf(accountMediaURL, username, maxID)\n\t\tjsonBody, err := getJSONFromURL(url)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tavailable, _ = jsonBody[\"more_available\"].(bool)\n\n\t\titems, _ := jsonBody[\"items\"].([]interface{})\n\t\tfor _, item := range items {\n\t\t\tif count >= limit {\n\t\t\t\treturn medias, nil\n\t\t\t}\n\t\t\tcount++\n\t\t\titemData, err := json.Marshal(item)\n\t\t\tif err == nil {\n\t\t\t\tmedia, err := getFromAccountMediaList(itemData)\n\t\t\t\tif err == nil {\n\t\t\t\t\tmedias = append(medias, media)\n\t\t\t\t\tmaxID = media.ID\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn medias, nil\n}\n\n\/\/ GetAllAccountMedia try to get slice of all user's media.\n\/\/ It's function the same as GetAccountMedia,\n\/\/ except limit = count of user's media.\nfunc GetAllAccountMedia(username string) ([]Media, error) {\n\taccount, err := GetAccountByUsername(username)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcount := uint16(account.MediaCount)\n\tmedias, err := GetAccountMedia(username, count)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn medias, nil\n}\n\n\/\/ GetLocationMedia try to get slice of last location's media.\n\/\/ The id is a facebook location id.\n\/\/ The limit set how much media you need.\nfunc GetLocationMedia(id string, limit uint16) ([]Media, error) {\n\tvar count uint16\n\tmaxID := \"\"\n\thasNext := true\n\tmedias := []Media{}\n\tfor hasNext && count < limit {\n\t\turl := fmt.Sprintf(locationURL, id, maxID)\n\t\tjsonBody, err := getJSONFromURL(url)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tjsonBody, _ = jsonBody[\"location\"].(map[string]interface{})\n\t\tjsonBody, _ = jsonBody[\"media\"].(map[string]interface{})\n\n\t\tnodes, _ := jsonBody[\"nodes\"].([]interface{})\n\t\tfor _, node := range nodes {\n\t\t\tif count >= limit {\n\t\t\t\treturn medias, nil\n\t\t\t}\n\t\t\tcount++\n\t\t\tnodeData, err := json.Marshal(node)\n\t\t\tif err == nil {\n\t\t\t\tmedia, err := getFromSearchMediaList(nodeData)\n\t\t\t\tif err == nil {\n\t\t\t\t\tmedias = append(medias, media)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tjsonBody, _ = jsonBody[\"page_info\"].(map[string]interface{})\n\t\thasNext, _ = jsonBody[\"has_next_page\"].(bool)\n\t\tmaxID, _ = jsonBody[\"end_cursor\"].(string)\n\t}\n\treturn medias, nil\n}\n\n\/\/ GetLocationTopMedia try to get array of top location's media.\n\/\/ The id is a facebook location id.\n\/\/ Length of returned array is 9.\nfunc GetLocationTopMedia(id string) ([9]Media, error) {\n\turl := fmt.Sprintf(locationURL, id, \"\")\n\tjsonBody, err := getJSONFromURL(url)\n\tif err != nil {\n\t\treturn [9]Media{}, err\n\t}\n\tjsonBody, _ = jsonBody[\"location\"].(map[string]interface{})\n\tjsonBody, _ = jsonBody[\"top_posts\"].(map[string]interface{})\n\n\tmedias := [9]Media{}\n\tnodes, _ := jsonBody[\"nodes\"].([]interface{})\n\tfor i, node := range nodes {\n\t\tnodeData, err := json.Marshal(node)\n\t\tif err == nil {\n\t\t\tmedia, err := getFromSearchMediaList(nodeData)\n\t\t\tif err == nil {\n\t\t\t\tmedias[i] = media\n\t\t\t}\n\t\t}\n\t}\n\treturn medias, nil\n}\n\n\/\/ GetLocationByID try to find location info by id.\n\/\/ The id is a facebook location id.\nfunc GetLocationByID(id string) (Location, error) {\n\turl := fmt.Sprintf(locationURL, id, \"\")\n\tdata, err := getDataFromURL(url)\n\tif err != nil {\n\t\treturn Location{}, err\n\t}\n\n\tlocation, err := getFromLocationPage(data)\n\tif err != nil {\n\t\treturn Location{}, err\n\t}\n\treturn location, nil\n}\n\n\/\/ GetTagMedia try to get slice of last tag's media.\n\/\/ The limit set how much media you need.\nfunc GetTagMedia(tag string, quantity uint16) ([]Media, error) {\n\tvar count uint16\n\tmaxID := \"\"\n\thasNext := true\n\tmedias := []Media{}\n\tfor hasNext && count < quantity {\n\t\turl := fmt.Sprintf(tagURL, tag, maxID)\n\t\tjsonBody, err := getJSONFromURL(url)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tjsonBody, _ = jsonBody[\"tag\"].(map[string]interface{})\n\t\tjsonBody, _ = jsonBody[\"media\"].(map[string]interface{})\n\n\t\tnodes, _ := jsonBody[\"nodes\"].([]interface{})\n\t\tfor _, node := range nodes {\n\t\t\tif count >= quantity {\n\t\t\t\treturn medias, nil\n\t\t\t}\n\t\t\tcount++\n\t\t\tnodeData, err := json.Marshal(node)\n\t\t\tif err == nil {\n\t\t\t\tmedia, err := getFromSearchMediaList(nodeData)\n\t\t\t\tif err == nil {\n\t\t\t\t\tmedias = append(medias, media)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tjsonBody, _ = jsonBody[\"page_info\"].(map[string]interface{})\n\t\thasNext, _ = jsonBody[\"has_next_page\"].(bool)\n\t\tmaxID, _ = jsonBody[\"end_cursor\"].(string)\n\t}\n\treturn medias, nil\n}\n\n\/\/ GetTagTopMedia try to get array of top tag's media.\n\/\/ Length of returned array is 9.\nfunc GetTagTopMedia(tag string) ([9]Media, error) {\n\turl := fmt.Sprintf(tagURL, tag, \"\")\n\tjsonBody, err := getJSONFromURL(url)\n\tif err != nil {\n\t\treturn [9]Media{}, err\n\t}\n\tjsonBody, _ = jsonBody[\"tag\"].(map[string]interface{})\n\tjsonBody, _ = jsonBody[\"top_posts\"].(map[string]interface{})\n\n\tmedias := [9]Media{}\n\tnodes, _ := jsonBody[\"nodes\"].([]interface{})\n\tfor i, node := range nodes {\n\t\tnodeData, err := json.Marshal(node)\n\t\tif err == nil {\n\t\t\tmedia, err := getFromSearchMediaList(nodeData)\n\t\t\tif err == nil {\n\t\t\t\tmedias[i] = media\n\t\t\t}\n\t\t}\n\t}\n\treturn medias, nil\n}\n\n\/\/ SearchForUsers try to find users by given username.\n\/\/ Return slice of Account with length of 0 or more.\nfunc SearchForUsers(username string) ([]Account, error) {\n\turl := fmt.Sprintf(searchURL, username)\n\tdata, err := getDataFromURL(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taccounts, err := getFromSearchPage(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn accounts, nil\n}\n\nfunc getJSONFromURL(url string) (map[string]interface{}, error) {\n\tdata, err := getDataFromURL(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar jsonBody map[string]interface{}\n\terr = json.Unmarshal(data, &jsonBody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn jsonBody, nil\n}\n\nfunc getDataFromURL(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn nil, errors.New( \"statusCode != 200\")\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright (C) 2017 Damon Revoe. All rights reserved.\n\/\/ Use of this source code is governed by the MIT\n\/\/ license, which can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\ntype fileProcessor func(sourcePathname, relativePathname string,\n\tinfo os.FileInfo) error\n\n\/\/ ProcessAllFiles calls the processFile() function for every file in\n\/\/ sourceDir. All hidden files and all files in hidden subdirectories\n\/\/ as well as package definition files are skipped.\nfunc processAllFiles(sourceDir, targetDir string,\n\tprocessFile fileProcessor) error {\n\n\tsourceDir = filepath.Clean(sourceDir)\n\tsourceDirWithSlash := sourceDir + string(filepath.Separator)\n\n\treturn filepath.Walk(sourceDir, func(sourcePathname string,\n\t\tinfo os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Ignore the top-level directory (sourceDir itself).\n\t\tif len(sourcePathname) <= len(sourceDirWithSlash) {\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ Panic if filepath.Walk() does not behave as expected.\n\t\tif !strings.HasPrefix(sourcePathname, sourceDirWithSlash) {\n\t\t\tpanic(sourcePathname + \" does not start with \" +\n\t\t\t\tsourceDirWithSlash)\n\t\t}\n\n\t\t\/\/ Relative pathname of the source file in the source\n\t\t\/\/ directory (and the target file in the target directory).\n\t\trelativePathname := sourcePathname[len(sourceDirWithSlash):]\n\n\t\t\/\/ Ignore hidden files and the package definition file.\n\t\tif filepath.Base(relativePathname)[0] == '.' {\n\t\t\tif info.IsDir() {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t\treturn nil\n\t\t} else if info.IsDir() {\n\t\t\treturn nil\n\t\t} else if relativePathname == packageDefinitionFilename {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn processFile(sourcePathname, relativePathname, info)\n\t})\n}\n\ntype filesFromSourceDir map[string]struct{}\n\nfunc linkFilesFromSourceDir(pd *packageDefinition,\n\tprojectDir string) (filesFromSourceDir, error) {\n\tsourceFiles := make(filesFromSourceDir)\n\tsourceDir := filepath.Dir(pd.pathname)\n\n\tlinkFile := func(sourcePathname, relativePathname string,\n\t\tsourceFileInfo os.FileInfo) error {\n\t\tsourceFiles[relativePathname] = struct{}{}\n\t\ttargetPathname := filepath.Join(projectDir, relativePathname)\n\t\ttargetFileInfo, err := os.Lstat(targetPathname)\n\t\tif err == nil {\n\t\t\tif (targetFileInfo.Mode() & os.ModeSymlink) != 0 {\n\t\t\t\toriginalLink, err := os.Readlink(targetPathname)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tif originalLink == sourcePathname {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err = os.Remove(targetPathname); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfmt.Println(\"L\", targetPathname)\n\n\t\tif err = os.MkdirAll(filepath.Dir(targetPathname),\n\t\t\tos.ModePerm); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn os.Symlink(sourcePathname, targetPathname)\n\t}\n\n\terr := processAllFiles(sourceDir, projectDir, linkFile)\n\n\treturn sourceFiles, err\n}\n\n\/\/ For each source file in 'templateDir', generateBuildFilesFromProjectTemplate\n\/\/ generates an output file with the same relative pathname inside 'projectDir'.\nfunc generateBuildFilesFromProjectTemplate(templateDir,\n\tprojectDir string, pd *packageDefinition) error {\n\n\tsourceFiles, err := linkFilesFromSourceDir(pd, projectDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgenerateFile := func(sourcePathname, relativePathname string,\n\t\tsourceFileInfo os.FileInfo) error {\n\t\tif _, sourceFile := sourceFiles[relativePathname]; sourceFile {\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ Read the contents of the template file. Cannot use\n\t\t\/\/ template.ParseFiles() because a Funcs() call must be\n\t\t\/\/ made between New() and Parse().\n\t\ttemplateContents, err := ioutil.ReadFile(sourcePathname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn generateFilesFromFileTemplate(projectDir,\n\t\t\trelativePathname, templateContents,\n\t\t\tsourceFileInfo.Mode(),\n\t\t\tpd, sourceFiles)\n\t}\n\n\treturn processAllFiles(templateDir, projectDir, generateFile)\n}\n\n\/\/ EmbeddedTemplateFile defines the file mode and the contents\n\/\/ of a single file that is a part of an embedded project template.\ntype embeddedTemplateFile struct {\n\tpathname string\n\tmode os.FileMode\n\tcontents []byte\n}\n\n\/\/ GenerateBuildFilesFromEmbeddedTemplate generates project build\n\/\/ files from a built-in template pointed to by the 't' parameter.\nfunc generateBuildFilesFromEmbeddedTemplate(t *[]embeddedTemplateFile,\n\tprojectDir string, pd *packageDefinition) error {\n\n\tsourceFiles, err := linkFilesFromSourceDir(pd, projectDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fileInfo := range append(*t, commonTemplateFiles...) {\n\t\tif _, exists := sourceFiles[fileInfo.pathname]; exists {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := generateFilesFromFileTemplate(projectDir,\n\t\t\tfileInfo.pathname, fileInfo.contents, fileInfo.mode,\n\t\t\tpd, sourceFiles); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (pd *packageDefinition) getPackageGeneratorFunc(\n\tpackageDir string) (func() error, error) {\n\tswitch pd.packageType {\n\tcase \"app\", \"application\":\n\t\treturn func() error {\n\t\t\treturn generateBuildFilesFromEmbeddedTemplate(\n\t\t\t\t&appTemplate, packageDir, pd)\n\t\t}, nil\n\n\tcase \"lib\", \"library\":\n\t\treturn func() error {\n\t\t\treturn generateBuildFilesFromEmbeddedTemplate(\n\t\t\t\t&libTemplate, packageDir, pd)\n\t\t}, nil\n\n\tdefault:\n\t\treturn nil, errors.New(pd.packageName +\n\t\t\t\": unknown package type '\" + pd.packageType + \"'\")\n\t}\n}\nSimplify generateBuildFilesFromEmbeddedTemplate\/\/ Copyright (C) 2017 Damon Revoe. All rights reserved.\n\/\/ Use of this source code is governed by the MIT\n\/\/ license, which can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\ntype fileProcessor func(sourcePathname, relativePathname string,\n\tinfo os.FileInfo) error\n\n\/\/ ProcessAllFiles calls the processFile() function for every file in\n\/\/ sourceDir. All hidden files and all files in hidden subdirectories\n\/\/ as well as package definition files are skipped.\nfunc processAllFiles(sourceDir, targetDir string,\n\tprocessFile fileProcessor) error {\n\n\tsourceDir = filepath.Clean(sourceDir)\n\tsourceDirWithSlash := sourceDir + string(filepath.Separator)\n\n\treturn filepath.Walk(sourceDir, func(sourcePathname string,\n\t\tinfo os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Ignore the top-level directory (sourceDir itself).\n\t\tif len(sourcePathname) <= len(sourceDirWithSlash) {\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ Panic if filepath.Walk() does not behave as expected.\n\t\tif !strings.HasPrefix(sourcePathname, sourceDirWithSlash) {\n\t\t\tpanic(sourcePathname + \" does not start with \" +\n\t\t\t\tsourceDirWithSlash)\n\t\t}\n\n\t\t\/\/ Relative pathname of the source file in the source\n\t\t\/\/ directory (and the target file in the target directory).\n\t\trelativePathname := sourcePathname[len(sourceDirWithSlash):]\n\n\t\t\/\/ Ignore hidden files and the package definition file.\n\t\tif filepath.Base(relativePathname)[0] == '.' {\n\t\t\tif info.IsDir() {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t\treturn nil\n\t\t} else if info.IsDir() {\n\t\t\treturn nil\n\t\t} else if relativePathname == packageDefinitionFilename {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn processFile(sourcePathname, relativePathname, info)\n\t})\n}\n\ntype filesFromSourceDir map[string]struct{}\n\nfunc linkFilesFromSourceDir(pd *packageDefinition,\n\tprojectDir string) (filesFromSourceDir, error) {\n\tsourceFiles := make(filesFromSourceDir)\n\tsourceDir := filepath.Dir(pd.pathname)\n\n\tlinkFile := func(sourcePathname, relativePathname string,\n\t\tsourceFileInfo os.FileInfo) error {\n\t\tsourceFiles[relativePathname] = struct{}{}\n\t\ttargetPathname := filepath.Join(projectDir, relativePathname)\n\t\ttargetFileInfo, err := os.Lstat(targetPathname)\n\t\tif err == nil {\n\t\t\tif (targetFileInfo.Mode() & os.ModeSymlink) != 0 {\n\t\t\t\toriginalLink, err := os.Readlink(targetPathname)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tif originalLink == sourcePathname {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err = os.Remove(targetPathname); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfmt.Println(\"L\", targetPathname)\n\n\t\tif err = os.MkdirAll(filepath.Dir(targetPathname),\n\t\t\tos.ModePerm); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn os.Symlink(sourcePathname, targetPathname)\n\t}\n\n\terr := processAllFiles(sourceDir, projectDir, linkFile)\n\n\treturn sourceFiles, err\n}\n\n\/\/ For each source file in 'templateDir', generateBuildFilesFromProjectTemplate\n\/\/ generates an output file with the same relative pathname inside 'projectDir'.\nfunc generateBuildFilesFromProjectTemplate(templateDir,\n\tprojectDir string, pd *packageDefinition) error {\n\n\tsourceFiles, err := linkFilesFromSourceDir(pd, projectDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgenerateFile := func(sourcePathname, relativePathname string,\n\t\tsourceFileInfo os.FileInfo) error {\n\t\tif _, sourceFile := sourceFiles[relativePathname]; sourceFile {\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ Read the contents of the template file. Cannot use\n\t\t\/\/ template.ParseFiles() because a Funcs() call must be\n\t\t\/\/ made between New() and Parse().\n\t\ttemplateContents, err := ioutil.ReadFile(sourcePathname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn generateFilesFromFileTemplate(projectDir,\n\t\t\trelativePathname, templateContents,\n\t\t\tsourceFileInfo.Mode(),\n\t\t\tpd, sourceFiles)\n\t}\n\n\treturn processAllFiles(templateDir, projectDir, generateFile)\n}\n\n\/\/ EmbeddedTemplateFile defines the file mode and the contents\n\/\/ of a single file that is a part of an embedded project template.\ntype embeddedTemplateFile struct {\n\tpathname string\n\tmode os.FileMode\n\tcontents []byte\n}\n\n\/\/ GenerateBuildFilesFromEmbeddedTemplate generates project build\n\/\/ files from a built-in template pointed to by the 't' parameter.\nfunc generateBuildFilesFromEmbeddedTemplate(t []embeddedTemplateFile,\n\tprojectDir string, pd *packageDefinition) error {\n\n\tsourceFiles, err := linkFilesFromSourceDir(pd, projectDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fileInfo := range append(t, commonTemplateFiles...) {\n\t\tif _, exists := sourceFiles[fileInfo.pathname]; exists {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := generateFilesFromFileTemplate(projectDir,\n\t\t\tfileInfo.pathname, fileInfo.contents, fileInfo.mode,\n\t\t\tpd, sourceFiles); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (pd *packageDefinition) getPackageGeneratorFunc(\n\tpackageDir string) (func() error, error) {\n\tswitch pd.packageType {\n\tcase \"app\", \"application\":\n\t\treturn func() error {\n\t\t\treturn generateBuildFilesFromEmbeddedTemplate(\n\t\t\t\tappTemplate, packageDir, pd)\n\t\t}, nil\n\n\tcase \"lib\", \"library\":\n\t\treturn func() error {\n\t\t\treturn generateBuildFilesFromEmbeddedTemplate(\n\t\t\t\tlibTemplate, packageDir, pd)\n\t\t}, nil\n\n\tdefault:\n\t\treturn nil, errors.New(pd.packageName +\n\t\t\t\": unknown package type '\" + pd.packageType + \"'\")\n\t}\n}\n<|endoftext|>"} {"text":"package exposer\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\n\t\"github.com\/inconshreveable\/muxado\"\n)\n\ntype HandshakeHandleFunc func(proto *Protocal, cmd string, details []byte) error\ntype Protocal struct {\n\tconn net.Conn\n\tisHandshakeDone bool\n\thandshakeDecoder *json.Decoder\n\teventbus chan HandshakeIncoming\n\n\t\/\/ handle handshake\n\tmutex_On *sync.Mutex\n\tOn HandshakeHandleFunc\n}\n\nfunc NewProtocal(conn net.Conn) *Protocal {\n\treturn &Protocal{\n\t\tconn: conn,\n\t\tisHandshakeDone: false,\n\t\thandshakeDecoder: json.NewDecoder(conn),\n\t\teventbus: make(chan HandshakeIncoming),\n\t\tmutex_On: new(sync.Mutex),\n\t}\n}\n\nfunc (proto *Protocal) Reply(cmd string, details interface{}) error {\n\tif proto.isHandshakeDone {\n\t\tpanic(\"protoport handshake is done, unexpect Reply call\")\n\t}\n\n\tdata, err := json.Marshal(&HandshakeOutgoing{\n\t\tCommand: cmd,\n\t\tDetails: details,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = proto.conn.Write(data)\n\treturn err\n}\n\nfunc newReadWriteCloser(buffered io.Reader, conn net.Conn) io.ReadWriteCloser {\n\ttype readWriteCloser struct {\n\t\tio.Reader\n\t\tio.Writer\n\t\tio.Closer\n\t}\n\n\treturn &readWriteCloser{\n\t\tReader: io.MultiReader(buffered, conn),\n\t\tWriter: conn,\n\t\tCloser: conn,\n\t}\n}\n\nfunc (proto *Protocal) Multiplex(isClient bool) muxado.Session {\n\tproto.isHandshakeDone = true\n\n\tif isClient {\n\t\treturn muxado.Client(newReadWriteCloser(proto.handshakeDecoder.Buffered(), proto.conn), nil)\n\t}\n\n\treturn muxado.Server(newReadWriteCloser(proto.handshakeDecoder.Buffered(), proto.conn), nil)\n}\n\nfunc (proto *Protocal) Forward(conn net.Conn) {\n\tdefer proto.conn.Close()\n\tdefer conn.Close()\n\n\tproto.isHandshakeDone = true\n\n\twg := &sync.WaitGroup{}\n\twg.Add(2)\n\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tdefer conn.Close()\n\t\tio.Copy(conn, io.MultiReader(proto.handshakeDecoder.Buffered(), proto.conn))\n\t}()\n\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tdefer proto.conn.Close()\n\t\tio.Copy(proto.conn, conn)\n\t}()\n\twg.Wait()\n}\n\nfunc (proto *Protocal) Request(cmd string, details interface{}) {\n\terr := proto.Reply(cmd, details)\n\tif err != nil {\n\t\tproto.conn.Close()\n\t\treturn\n\t}\n\n\tproto.Handle()\n}\n\nfunc (proto *Protocal) Emit(event string, details interface{}) (err error) {\n\tvar data []byte\n\tdata, err = json.Marshal(&HandshakeOutgoing{\n\t\tCommand: event,\n\t\tDetails: details,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar handshake HandshakeIncoming\n\terr = json.Unmarshal(data, &handshake)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = errors.New(\"conn closed\")\n\t\t}\n\t}()\n\tproto.eventbus <- handshake\n\treturn nil\n}\n\nfunc (proto *Protocal) Handle() {\n\tdefer proto.conn.Close()\n\tdefer close(proto.eventbus)\n\n\tif proto.On == nil {\n\t\tpanic(\"not set Protocal.On\")\n\t}\n\n\tgo func() {\n\t\tdefer proto.conn.Close()\n\n\t\tfor handshake := range proto.eventbus {\n\t\t\terr := func() error {\n\t\t\t\tproto.mutex_On.Lock()\n\t\t\t\tdefer proto.mutex_On.Unlock()\n\t\t\t\treturn proto.On(proto, handshake.Command, handshake.Details)\n\t\t\t}()\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tvar handshake HandshakeIncoming\n\tfor !proto.isHandshakeDone {\n\t\terr := proto.handshakeDecoder.Decode(&handshake)\n\t\tif err != nil {\n\t\t\t\/\/ TODO: handle error\n\t\t\treturn\n\t\t}\n\n\t\terr = func() error {\n\t\t\tproto.mutex_On.Lock()\n\t\t\tdefer proto.mutex_On.Unlock()\n\t\t\treturn proto.On(proto, handshake.Command, handshake.Details)\n\t\t}()\n\t\tif err != nil {\n\t\t\t\/\/ TODO: handle error\n\t\t\treturn\n\t\t}\n\t}\n}\nfix: race at muxado.Server() .Client()package exposer\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\n\t\"github.com\/inconshreveable\/muxado\"\n)\n\ntype HandshakeHandleFunc func(proto *Protocal, cmd string, details []byte) error\ntype Protocal struct {\n\tconn net.Conn\n\tisHandshakeDone bool\n\thandshakeDecoder *json.Decoder\n\teventbus chan HandshakeIncoming\n\n\t\/\/ handle handshake\n\tmutex_On *sync.Mutex\n\tOn HandshakeHandleFunc\n}\n\nfunc NewProtocal(conn net.Conn) *Protocal {\n\treturn &Protocal{\n\t\tconn: conn,\n\t\tisHandshakeDone: false,\n\t\thandshakeDecoder: json.NewDecoder(conn),\n\t\teventbus: make(chan HandshakeIncoming),\n\t\tmutex_On: new(sync.Mutex),\n\t}\n}\n\nfunc (proto *Protocal) Reply(cmd string, details interface{}) error {\n\tif proto.isHandshakeDone {\n\t\tpanic(\"protoport handshake is done, unexpect Reply call\")\n\t}\n\n\tdata, err := json.Marshal(&HandshakeOutgoing{\n\t\tCommand: cmd,\n\t\tDetails: details,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = proto.conn.Write(data)\n\treturn err\n}\n\nfunc newReadWriteCloser(buffered io.Reader, conn net.Conn) io.ReadWriteCloser {\n\ttype readWriteCloser struct {\n\t\tio.Reader\n\t\tio.Writer\n\t\tio.Closer\n\t}\n\n\treturn &readWriteCloser{\n\t\tReader: io.MultiReader(buffered, conn),\n\t\tWriter: conn,\n\t\tCloser: conn,\n\t}\n}\n\nvar (\n\tmuxadoMutex = new(sync.Mutex)\n)\n\nfunc (proto *Protocal) Multiplex(isClient bool) muxado.Session {\n\tproto.isHandshakeDone = true\n\n\tmuxadoMutex.Lock()\n\tdefer muxadoMutex.Unlock()\n\n\tif isClient {\n\t\treturn muxado.Client(newReadWriteCloser(proto.handshakeDecoder.Buffered(), proto.conn), nil)\n\t}\n\n\treturn muxado.Server(newReadWriteCloser(proto.handshakeDecoder.Buffered(), proto.conn), nil)\n}\n\nfunc (proto *Protocal) Forward(conn net.Conn) {\n\tdefer proto.conn.Close()\n\tdefer conn.Close()\n\n\tproto.isHandshakeDone = true\n\n\twg := &sync.WaitGroup{}\n\twg.Add(2)\n\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tdefer conn.Close()\n\t\tio.Copy(conn, io.MultiReader(proto.handshakeDecoder.Buffered(), proto.conn))\n\t}()\n\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tdefer proto.conn.Close()\n\t\tio.Copy(proto.conn, conn)\n\t}()\n\twg.Wait()\n}\n\nfunc (proto *Protocal) Request(cmd string, details interface{}) {\n\terr := proto.Reply(cmd, details)\n\tif err != nil {\n\t\tproto.conn.Close()\n\t\treturn\n\t}\n\n\tproto.Handle()\n}\n\nfunc (proto *Protocal) Emit(event string, details interface{}) (err error) {\n\tvar data []byte\n\tdata, err = json.Marshal(&HandshakeOutgoing{\n\t\tCommand: event,\n\t\tDetails: details,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar handshake HandshakeIncoming\n\terr = json.Unmarshal(data, &handshake)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = errors.New(\"conn closed\")\n\t\t}\n\t}()\n\tproto.eventbus <- handshake\n\treturn nil\n}\n\nfunc (proto *Protocal) Handle() {\n\tdefer proto.conn.Close()\n\tdefer close(proto.eventbus)\n\n\tif proto.On == nil {\n\t\tpanic(\"not set Protocal.On\")\n\t}\n\n\tgo func() {\n\t\tdefer proto.conn.Close()\n\n\t\tfor handshake := range proto.eventbus {\n\t\t\terr := func() error {\n\t\t\t\tproto.mutex_On.Lock()\n\t\t\t\tdefer proto.mutex_On.Unlock()\n\t\t\t\treturn proto.On(proto, handshake.Command, handshake.Details)\n\t\t\t}()\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tvar handshake HandshakeIncoming\n\tfor !proto.isHandshakeDone {\n\t\terr := proto.handshakeDecoder.Decode(&handshake)\n\t\tif err != nil {\n\t\t\t\/\/ TODO: handle error\n\t\t\treturn\n\t\t}\n\n\t\terr = func() error {\n\t\t\tproto.mutex_On.Lock()\n\t\t\tdefer proto.mutex_On.Unlock()\n\t\t\treturn proto.On(proto, handshake.Command, handshake.Details)\n\t\t}()\n\t\tif err != nil {\n\t\t\t\/\/ TODO: handle error\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package dispatcher\n\nimport \"strings\"\n\nimport \"github.com\/cnf\/go-claw\/listeners\"\nimport \"github.com\/cnf\/go-claw\/targets\"\nimport \"github.com\/cnf\/go-claw\/clog\"\n\ntype Dispatcher struct {\n Configfile string\n config Config\n listenermap map[string]*listeners.Listener\n targetmap map[string]targets.Target\n modemap map[string]*Mode\n activemode string\n cs *listeners.CommandStream\n}\n\nfunc (self *Dispatcher) Start() {\n defer self.cs.Close()\n self.activemode = \"default\"\n self.activemode = \"plex\"\n self.readConfig()\n self.setupListeners()\n self.setupModes()\n self.setupTargets()\n\n var out listeners.RemoteCommand\n\n for self.cs.Next(&out) {\n if self.cs.HasError() {\n clog.Warn(\"An error occured somewhere: %v\", self.cs.GetError())\n self.cs.ClearError()\n }\n \/\/ clog.Debug(\"repeat: %2d - key: %s - source: %s\", out.Repeat, out.Key, out.Source)\n self.dispatch(&out)\n }\n}\n\nfunc (self *Dispatcher) setupListeners() {\n self.listenermap = make(map[string]*listeners.Listener)\n self.cs = listeners.NewCommandStream()\n\n for k, v := range self.config.Listeners {\n l, ok := listeners.GetListener(v.Module, v.Params)\n if ok {\n clog.Debug(\"Setting up listener `%s`\", k)\n self.listenermap[k] = &l\n self.cs.AddListener(l)\n }\n }\n\n}\n\nfunc (self *Dispatcher) setupModes() {\n self.modemap = make(map[string]*Mode)\n for k, v := range self.config.Modes {\n self.modemap[k] = &Mode{Keys: make(map[string][]string)}\n for kk, kv := range v {\n self.modemap[k].Keys[kk] = make([]string, len(kv))\n i := 0\n for _, av := range kv {\n self.modemap[k].Keys[kk][i] = av\n i++\n }\n }\n }\n}\n\nfunc (self *Dispatcher) setupTargets() {\n self.targetmap = make(map[string]targets.Target)\n for k, v := range self.config.Targets {\n t, ok := targets.GetTarget(v.Module, k, v.Params)\n if ok {\n self.targetmap[k] = t\n println(k)\n }\n }\n}\n\nfunc (self *Dispatcher) dispatch(rc *listeners.RemoteCommand) bool {\n clog.Debug(\"repeat: %2d - key: %s - source: %s\", rc.Repeat, rc.Key, rc.Source)\n var mod string\n var cmd string\n var args string\n var rok bool\n if val, ok := self.modemap[self.activemode].Keys[rc.Key]; ok {\n clog.Debug(\"FOUND in %s\", self.activemode)\n for _, v := range val {\n clog.Debug(v)\n mod, cmd, args, rok = self.resolve(v)\n self.sender(mod, cmd, args)\n }\n return true\n } else if val, ok := self.modemap[\"default\"].Keys[rc.Key]; ok {\n clog.Debug(\"FOUND in default!\")\n for _, v := range val {\n clog.Debug(v)\n mod, cmd, args, rok = self.resolve(v)\n self.sender(mod, cmd, args)\n }\n return true\n } else {\n clog.Debug(\"Not found\")\n return false\n }\n if !rok {\n return false\n }\n\n return true\n}\n\nfunc (self *Dispatcher) resolve(input string) (mod string, cmd string, args string, ok bool) {\n clog.Debug(\"Resolving input for %s\", input)\n foo := strings.SplitN(input, \"::\", 2)\n if len(foo) < 2 {\n clog.Warn(\"%s is not a well formed command\", input)\n return \"\", \"\", \"\", false\n }\n bar := strings.SplitN(foo[1], \" \", 2)\n baz := \"\"\n if len(bar) > 1 {\n baz = bar[1]\n }\n\n return foo[0], bar[0], baz, true\n}\n\nfunc (self *Dispatcher) sender(mod string, cmd string, args string) bool {\n if t, ok := self.targetmap[mod]; ok {\n sok := t.SendCommand(cmd, args)\n if sok {\n clog.Debug(\"Sent command %# v\", sok)\n }\n return true\n }\n return false\n}\n\nmsg cleanup and modes!package dispatcher\n\nimport \"strings\"\n\nimport \"github.com\/cnf\/go-claw\/listeners\"\nimport \"github.com\/cnf\/go-claw\/targets\"\nimport \"github.com\/cnf\/go-claw\/clog\"\n\ntype Dispatcher struct {\n Configfile string\n config Config\n listenermap map[string]*listeners.Listener\n targetmap map[string]targets.Target\n modemap map[string]*Mode\n activemode string\n cs *listeners.CommandStream\n}\n\nfunc (self *Dispatcher) Start() {\n defer self.cs.Close()\n self.activemode = \"default\"\n self.activemode = \"plex\"\n self.readConfig()\n self.setupListeners()\n self.setupModes()\n self.setupTargets()\n\n var out listeners.RemoteCommand\n\n for self.cs.Next(&out) {\n if self.cs.HasError() {\n clog.Warn(\"An error occured somewhere: %v\", self.cs.GetError())\n self.cs.ClearError()\n }\n \/\/ clog.Debug(\"repeat: %2d - key: %s - source: %s\", out.Repeat, out.Key, out.Source)\n self.dispatch(&out)\n }\n}\n\nfunc (self *Dispatcher) setupListeners() {\n self.listenermap = make(map[string]*listeners.Listener)\n self.cs = listeners.NewCommandStream()\n\n for k, v := range self.config.Listeners {\n l, ok := listeners.GetListener(v.Module, v.Params)\n if ok {\n clog.Debug(\"Setting up listener `%s`\", k)\n self.listenermap[k] = &l\n self.cs.AddListener(l)\n }\n }\n\n}\n\nfunc (self *Dispatcher) setupModes() {\n self.modemap = make(map[string]*Mode)\n for k, v := range self.config.Modes {\n self.modemap[k] = &Mode{Keys: make(map[string][]string)}\n for kk, kv := range v {\n self.modemap[k].Keys[kk] = make([]string, len(kv))\n i := 0\n for _, av := range kv {\n self.modemap[k].Keys[kk][i] = av\n i++\n }\n }\n }\n}\n\nfunc (self *Dispatcher) setupTargets() {\n self.targetmap = make(map[string]targets.Target)\n for k, v := range self.config.Targets {\n t, ok := targets.GetTarget(v.Module, k, v.Params)\n if ok {\n self.targetmap[k] = t\n println(k)\n }\n }\n}\n\nfunc (self *Dispatcher) dispatch(rc *listeners.RemoteCommand) bool {\n clog.Debug(\"repeat: %2d - key: %s - source: %s\", rc.Repeat, rc.Key, rc.Source)\n var mod string\n var cmd string\n var args string\n var rok bool\n if val, ok := self.modemap[self.activemode].Keys[rc.Key]; ok {\n clog.Debug(\"+ Found `%s` in %s\", rc.Key, self.activemode)\n for _, v := range val {\n clog.Debug(v)\n mod, cmd, args, rok = self.resolve(v)\n self.sender(mod, cmd, args)\n }\n return true\n } else if val, ok := self.modemap[\"default\"].Keys[rc.Key]; ok {\n clog.Debug(\"+ Found `%s` in default!\", rc.Key)\n for _, v := range val {\n mod, cmd, args, rok = self.resolve(v)\n self.sender(mod, cmd, args)\n }\n return true\n } else {\n clog.Debug(\"+ `%s` Not found.\")\n return false\n }\n if !rok {\n return false\n }\n\n return true\n}\n\nfunc (self *Dispatcher) resolve(input string) (mod string, cmd string, args string, ok bool) {\n clog.Debug(\"++ Resolving input for %s\", input)\n foo := strings.SplitN(input, \"::\", 2)\n if len(foo) < 2 {\n clog.Warn(\"%s is not a well formed command\", input)\n return \"\", \"\", \"\", false\n }\n bar := strings.SplitN(foo[1], \" \", 2)\n baz := \"\"\n if len(bar) > 1 {\n baz = bar[1]\n }\n\n return foo[0], bar[0], baz, true\n}\n\nfunc (self *Dispatcher) sender(mod string, cmd string, args string) bool {\n if mod == \"mode\" {\n clog.Debug(\"++++ %s - %s\", mod, cmd)\n return self.setMode(cmd)\n }\n if t, ok := self.targetmap[mod]; ok {\n sok := t.SendCommand(cmd, args)\n if !sok {\n clog.Debug(\"- Failed to send command `%s` for `%s`\", cmd, mod)\n }\n return true\n }\n return false\n}\n\nfunc (self *Dispatcher) setMode(mode string) bool {\n if _, ok := self.modemap[mode]; ok {\n clog.Debug(\"+ Mode changed to `%s`\", mode)\n self.activemode = mode\n } else {\n for k, _ := range self.modemap {\n clog.Debug(\"---- %s\", k)\n }\n return false\n }\n return true\n}\n<|endoftext|>"} {"text":"package shell\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"sort\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/operation\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/master_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/volume_server_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/super_block\"\n)\n\nfunc init() {\n\tCommands = append(Commands, &commandVolumeFixReplication{})\n}\n\ntype commandVolumeFixReplication struct {\n}\n\nfunc (c *commandVolumeFixReplication) Name() string {\n\treturn \"volume.fix.replication\"\n}\n\nfunc (c *commandVolumeFixReplication) Help() string {\n\treturn `add replicas to volumes that are missing replicas\n\n\tThis command finds all under-replicated volumes, and finds volume servers with free slots.\n\tIf the free slots satisfy the replication requirement, the volume content is copied over and mounted.\n\n\tvolume.fix.replication -n # do not take action\n\tvolume.fix.replication # actually copying the volume files and mount the volume\n\n\tNote:\n\t\t* each time this will only add back one replica for one volume id. If there are multiple replicas\n\t\t are missing, e.g. multiple volume servers are new, you may need to run this multiple times.\n\t\t* do not run this too quick within seconds, since the new volume replica may take a few seconds \n\t\t to register itself to the master.\n\n`\n}\n\nfunc (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {\n\n\tif err = commandEnv.confirmIsLocked(); err != nil {\n\t\treturn\n\t}\n\n\ttakeAction := true\n\tif len(args) > 0 && args[0] == \"-n\" {\n\t\ttakeAction = false\n\t}\n\n\tvar resp *master_pb.VolumeListResponse\n\terr = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {\n\t\tresp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ find all volumes that needs replication\n\t\/\/ collect all data nodes\n\treplicatedVolumeLocations := make(map[uint32][]location)\n\treplicatedVolumeInfo := make(map[uint32]*master_pb.VolumeInformationMessage)\n\tvar allLocations []location\n\teachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {\n\t\tloc := newLocation(dc, string(rack), dn)\n\t\tfor _, v := range dn.VolumeInfos {\n\t\t\tif v.ReplicaPlacement > 0 {\n\t\t\t\treplicatedVolumeLocations[v.Id] = append(replicatedVolumeLocations[v.Id], loc)\n\t\t\t\treplicatedVolumeInfo[v.Id] = v\n\t\t\t}\n\t\t}\n\t\tallLocations = append(allLocations, loc)\n\t})\n\n\t\/\/ find all under replicated volumes\n\tunderReplicatedVolumeLocations := make(map[uint32][]location)\n\tfor vid, locations := range replicatedVolumeLocations {\n\t\tvolumeInfo := replicatedVolumeInfo[vid]\n\t\treplicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement))\n\t\tif replicaPlacement.GetCopyCount() > len(locations) {\n\t\t\tunderReplicatedVolumeLocations[vid] = locations\n\t\t}\n\t}\n\n\tif len(underReplicatedVolumeLocations) == 0 {\n\t\treturn fmt.Errorf(\"no under replicated volumes\")\n\t}\n\n\tif len(allLocations) == 0 {\n\t\treturn fmt.Errorf(\"no data nodes at all\")\n\t}\n\n\t\/\/ find the most under populated data nodes\n\tkeepDataNodesSorted(allLocations)\n\n\tfor vid, locations := range underReplicatedVolumeLocations {\n\t\tvolumeInfo := replicatedVolumeInfo[vid]\n\t\treplicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement))\n\t\tfoundNewLocation := false\n\t\tfor _, dst := range allLocations {\n\t\t\t\/\/ check whether data nodes satisfy the constraints\n\t\t\tif dst.dataNode.FreeVolumeCount > 0 && satisfyReplicaPlacement(replicaPlacement, locations, dst) {\n\t\t\t\t\/\/ ask the volume server to replicate the volume\n\t\t\t\tsourceNodes := underReplicatedVolumeLocations[vid]\n\t\t\t\tsourceNode := sourceNodes[rand.Intn(len(sourceNodes))]\n\t\t\t\tfoundNewLocation = true\n\t\t\t\tfmt.Fprintf(writer, \"replicating volume %d %s from %s to dataNode %s ...\\n\", volumeInfo.Id, replicaPlacement, sourceNode.dataNode.Id, dst.dataNode.Id)\n\n\t\t\t\tif !takeAction {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\terr := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\t\t\t\t\t_, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{\n\t\t\t\t\t\tVolumeId: volumeInfo.Id,\n\t\t\t\t\t\tSourceDataNode: sourceNode.dataNode.Id,\n\t\t\t\t\t})\n\t\t\t\t\tif replicateErr != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"copying from %s => %s : %v\", sourceNode.dataNode.Id, dst.dataNode.Id, replicateErr)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t\/\/ adjust free volume count\n\t\t\t\tdst.dataNode.FreeVolumeCount--\n\t\t\t\tkeepDataNodesSorted(allLocations)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !foundNewLocation {\n\t\t\tfmt.Fprintf(writer, \"failed to place volume %d replica as %s, existing:%+v\\n\", volumeInfo.Id, replicaPlacement, locations)\n\t\t}\n\n\t}\n\n\treturn nil\n}\n\nfunc keepDataNodesSorted(dataNodes []location) {\n\tsort.Slice(dataNodes, func(i, j int) bool {\n\t\treturn dataNodes[i].dataNode.FreeVolumeCount > dataNodes[j].dataNode.FreeVolumeCount\n\t})\n}\n\n\/*\n if on an existing data node {\n return false\n }\n if different from existing dcs {\n if lack on different dcs {\n return true\n }else{\n return false\n }\n }\n if not on primary dc {\n return false\n }\n if different from existing racks {\n if lack on different racks {\n return true\n }else{\n return false\n }\n }\n if not on primary rack {\n return false\n }\n if lacks on same rack {\n return true\n } else {\n return false\n }\n*\/\nfunc satisfyReplicaPlacement(replicaPlacement *super_block.ReplicaPlacement, existingLocations []location, possibleLocation location) bool {\n\n\texistingDataNodes := make(map[string]int)\n\tfor _, loc := range existingLocations {\n\t\texistingDataNodes[loc.String()] += 1\n\t}\n\tsameDataNodeCount := existingDataNodes[possibleLocation.String()]\n\t\/\/ avoid duplicated volume on the same data node\n\tif sameDataNodeCount > 0 {\n\t\treturn false\n\t}\n\n\texistingDataCenters := make(map[string]int)\n\tfor _, loc := range existingLocations {\n\t\texistingDataCenters[loc.DataCenter()] += 1\n\t}\n\tprimaryDataCenters, _ := findTopKeys(existingDataCenters)\n\n\t\/\/ ensure data center count is within limit\n\tif _, found := existingDataCenters[possibleLocation.DataCenter()]; !found {\n\t\t\/\/ different from existing dcs\n\t\tif len(existingDataCenters) < replicaPlacement.DiffDataCenterCount+1 {\n\t\t\t\/\/ lack on different dcs\n\t\t\treturn true\n\t\t} else {\n\t\t\t\/\/ adding this would go over the different dcs limit\n\t\t\treturn false\n\t\t}\n\t}\n\t\/\/ now this is same as one of the existing data center\n\tif !isAmong(possibleLocation.DataCenter(), primaryDataCenters) {\n\t\t\/\/ not on one of the primary dcs\n\t\treturn false\n\t}\n\n\t\/\/ now this is one of the primary dcs\n\texistingRacks := make(map[string]int)\n\tfor _, loc := range existingLocations {\n\t\tif loc.DataCenter() != possibleLocation.DataCenter() {\n\t\t\tcontinue\n\t\t}\n\t\texistingRacks[loc.Rack()] += 1\n\t}\n\tprimaryRacks, _ := findTopKeys(existingRacks)\n\tsameRackCount := existingRacks[possibleLocation.Rack()]\n\n\t\/\/ ensure rack count is within limit\n\tif _, found := existingRacks[possibleLocation.Rack()]; !found {\n\t\t\/\/ different from existing racks\n\t\tif len(existingRacks) < replicaPlacement.DiffRackCount+1 {\n\t\t\t\/\/ lack on different racks\n\t\t\treturn true\n\t\t} else {\n\t\t\t\/\/ adding this would go over the different racks limit\n\t\t\treturn false\n\t\t}\n\t}\n\t\/\/ now this is same as one of the existing racks\n\tif !isAmong(possibleLocation.Rack(), primaryRacks) {\n\t\t\/\/ not on the primary rack\n\t\treturn false\n\t}\n\n\t\/\/ now this is on the primary rack\n\n\t\/\/ different from existing data nodes\n\tif sameRackCount < replicaPlacement.SameRackCount+1 {\n\t\t\/\/ lack on same rack\n\t\treturn true\n\t} else {\n\t\t\/\/ adding this would go over the same data node limit\n\t\treturn false\n\t}\n\n}\n\nfunc findTopKeys(m map[string]int) (topKeys []string, max int) {\n\tfor k, c := range m {\n\t\tif max < c {\n\t\t\ttopKeys = topKeys[:0]\n\t\t\ttopKeys = append(topKeys, k)\n\t\t\tmax = c\n\t\t} else if max == c {\n\t\t\ttopKeys = append(topKeys, k)\n\t\t}\n\t}\n\treturn\n}\n\nfunc isAmong(key string, keys []string) bool {\n\tfor _, k := range keys {\n\t\tif k == key {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\ntype location struct {\n\tdc string\n\track string\n\tdataNode *master_pb.DataNodeInfo\n}\n\nfunc newLocation(dc, rack string, dataNode *master_pb.DataNodeInfo) location {\n\treturn location{\n\t\tdc: dc,\n\t\track: rack,\n\t\tdataNode: dataNode,\n\t}\n}\n\nfunc (l location) String() string {\n\treturn fmt.Sprintf(\"%s %s %s\", l.dc, l.rack, l.dataNode.Id)\n}\n\nfunc (l location) Rack() string {\n\treturn fmt.Sprintf(\"%s %s\", l.dc, l.rack)\n}\n\nfunc (l location) DataCenter() string {\n\treturn l.dc\n}\nprintout over replicated locationspackage shell\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"sort\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/operation\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/master_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/volume_server_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/super_block\"\n)\n\nfunc init() {\n\tCommands = append(Commands, &commandVolumeFixReplication{})\n}\n\ntype commandVolumeFixReplication struct {\n}\n\nfunc (c *commandVolumeFixReplication) Name() string {\n\treturn \"volume.fix.replication\"\n}\n\nfunc (c *commandVolumeFixReplication) Help() string {\n\treturn `add replicas to volumes that are missing replicas\n\n\tThis command finds all under-replicated volumes, and finds volume servers with free slots.\n\tIf the free slots satisfy the replication requirement, the volume content is copied over and mounted.\n\n\tvolume.fix.replication -n # do not take action\n\tvolume.fix.replication # actually copying the volume files and mount the volume\n\n\tNote:\n\t\t* each time this will only add back one replica for one volume id. If there are multiple replicas\n\t\t are missing, e.g. multiple volume servers are new, you may need to run this multiple times.\n\t\t* do not run this too quick within seconds, since the new volume replica may take a few seconds \n\t\t to register itself to the master.\n\n`\n}\n\nfunc (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {\n\n\tif err = commandEnv.confirmIsLocked(); err != nil {\n\t\treturn\n\t}\n\n\ttakeAction := true\n\tif len(args) > 0 && args[0] == \"-n\" {\n\t\ttakeAction = false\n\t}\n\n\tvar resp *master_pb.VolumeListResponse\n\terr = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {\n\t\tresp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ find all volumes that needs replication\n\t\/\/ collect all data nodes\n\treplicatedVolumeLocations := make(map[uint32][]location)\n\treplicatedVolumeInfo := make(map[uint32]*master_pb.VolumeInformationMessage)\n\tvar allLocations []location\n\teachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {\n\t\tloc := newLocation(dc, string(rack), dn)\n\t\tfor _, v := range dn.VolumeInfos {\n\t\t\tif v.ReplicaPlacement > 0 {\n\t\t\t\treplicatedVolumeLocations[v.Id] = append(replicatedVolumeLocations[v.Id], loc)\n\t\t\t\treplicatedVolumeInfo[v.Id] = v\n\t\t\t}\n\t\t}\n\t\tallLocations = append(allLocations, loc)\n\t})\n\n\t\/\/ find all under replicated volumes\n\tunderReplicatedVolumeLocations := make(map[uint32][]location)\n\tfor vid, locations := range replicatedVolumeLocations {\n\t\tvolumeInfo := replicatedVolumeInfo[vid]\n\t\treplicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement))\n\t\tif replicaPlacement.GetCopyCount() > len(locations) {\n\t\t\tunderReplicatedVolumeLocations[vid] = locations\n\t\t} else if replicaPlacement.GetCopyCount() < len(locations) {\n\t\t\tfmt.Fprintf(writer, \"volume %d replication %s, but over repliacated:%+v\\n\", volumeInfo.Id, replicaPlacement, locations)\n\t\t}\n\t}\n\n\tif len(underReplicatedVolumeLocations) == 0 {\n\t\treturn fmt.Errorf(\"no under replicated volumes\")\n\t}\n\n\tif len(allLocations) == 0 {\n\t\treturn fmt.Errorf(\"no data nodes at all\")\n\t}\n\n\t\/\/ find the most under populated data nodes\n\tkeepDataNodesSorted(allLocations)\n\n\tfor vid, locations := range underReplicatedVolumeLocations {\n\t\tvolumeInfo := replicatedVolumeInfo[vid]\n\t\treplicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement))\n\t\tfoundNewLocation := false\n\t\tfor _, dst := range allLocations {\n\t\t\t\/\/ check whether data nodes satisfy the constraints\n\t\t\tif dst.dataNode.FreeVolumeCount > 0 && satisfyReplicaPlacement(replicaPlacement, locations, dst) {\n\t\t\t\t\/\/ ask the volume server to replicate the volume\n\t\t\t\tsourceNodes := underReplicatedVolumeLocations[vid]\n\t\t\t\tsourceNode := sourceNodes[rand.Intn(len(sourceNodes))]\n\t\t\t\tfoundNewLocation = true\n\t\t\t\tfmt.Fprintf(writer, \"replicating volume %d %s from %s to dataNode %s ...\\n\", volumeInfo.Id, replicaPlacement, sourceNode.dataNode.Id, dst.dataNode.Id)\n\n\t\t\t\tif !takeAction {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\terr := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {\n\t\t\t\t\t_, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{\n\t\t\t\t\t\tVolumeId: volumeInfo.Id,\n\t\t\t\t\t\tSourceDataNode: sourceNode.dataNode.Id,\n\t\t\t\t\t})\n\t\t\t\t\tif replicateErr != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"copying from %s => %s : %v\", sourceNode.dataNode.Id, dst.dataNode.Id, replicateErr)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t\/\/ adjust free volume count\n\t\t\t\tdst.dataNode.FreeVolumeCount--\n\t\t\t\tkeepDataNodesSorted(allLocations)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !foundNewLocation {\n\t\t\tfmt.Fprintf(writer, \"failed to place volume %d replica as %s, existing:%+v\\n\", volumeInfo.Id, replicaPlacement, locations)\n\t\t}\n\n\t}\n\n\treturn nil\n}\n\nfunc keepDataNodesSorted(dataNodes []location) {\n\tsort.Slice(dataNodes, func(i, j int) bool {\n\t\treturn dataNodes[i].dataNode.FreeVolumeCount > dataNodes[j].dataNode.FreeVolumeCount\n\t})\n}\n\n\/*\n if on an existing data node {\n return false\n }\n if different from existing dcs {\n if lack on different dcs {\n return true\n }else{\n return false\n }\n }\n if not on primary dc {\n return false\n }\n if different from existing racks {\n if lack on different racks {\n return true\n }else{\n return false\n }\n }\n if not on primary rack {\n return false\n }\n if lacks on same rack {\n return true\n } else {\n return false\n }\n*\/\nfunc satisfyReplicaPlacement(replicaPlacement *super_block.ReplicaPlacement, existingLocations []location, possibleLocation location) bool {\n\n\texistingDataNodes := make(map[string]int)\n\tfor _, loc := range existingLocations {\n\t\texistingDataNodes[loc.String()] += 1\n\t}\n\tsameDataNodeCount := existingDataNodes[possibleLocation.String()]\n\t\/\/ avoid duplicated volume on the same data node\n\tif sameDataNodeCount > 0 {\n\t\treturn false\n\t}\n\n\texistingDataCenters := make(map[string]int)\n\tfor _, loc := range existingLocations {\n\t\texistingDataCenters[loc.DataCenter()] += 1\n\t}\n\tprimaryDataCenters, _ := findTopKeys(existingDataCenters)\n\n\t\/\/ ensure data center count is within limit\n\tif _, found := existingDataCenters[possibleLocation.DataCenter()]; !found {\n\t\t\/\/ different from existing dcs\n\t\tif len(existingDataCenters) < replicaPlacement.DiffDataCenterCount+1 {\n\t\t\t\/\/ lack on different dcs\n\t\t\treturn true\n\t\t} else {\n\t\t\t\/\/ adding this would go over the different dcs limit\n\t\t\treturn false\n\t\t}\n\t}\n\t\/\/ now this is same as one of the existing data center\n\tif !isAmong(possibleLocation.DataCenter(), primaryDataCenters) {\n\t\t\/\/ not on one of the primary dcs\n\t\treturn false\n\t}\n\n\t\/\/ now this is one of the primary dcs\n\texistingRacks := make(map[string]int)\n\tfor _, loc := range existingLocations {\n\t\tif loc.DataCenter() != possibleLocation.DataCenter() {\n\t\t\tcontinue\n\t\t}\n\t\texistingRacks[loc.Rack()] += 1\n\t}\n\tprimaryRacks, _ := findTopKeys(existingRacks)\n\tsameRackCount := existingRacks[possibleLocation.Rack()]\n\n\t\/\/ ensure rack count is within limit\n\tif _, found := existingRacks[possibleLocation.Rack()]; !found {\n\t\t\/\/ different from existing racks\n\t\tif len(existingRacks) < replicaPlacement.DiffRackCount+1 {\n\t\t\t\/\/ lack on different racks\n\t\t\treturn true\n\t\t} else {\n\t\t\t\/\/ adding this would go over the different racks limit\n\t\t\treturn false\n\t\t}\n\t}\n\t\/\/ now this is same as one of the existing racks\n\tif !isAmong(possibleLocation.Rack(), primaryRacks) {\n\t\t\/\/ not on the primary rack\n\t\treturn false\n\t}\n\n\t\/\/ now this is on the primary rack\n\n\t\/\/ different from existing data nodes\n\tif sameRackCount < replicaPlacement.SameRackCount+1 {\n\t\t\/\/ lack on same rack\n\t\treturn true\n\t} else {\n\t\t\/\/ adding this would go over the same data node limit\n\t\treturn false\n\t}\n\n}\n\nfunc findTopKeys(m map[string]int) (topKeys []string, max int) {\n\tfor k, c := range m {\n\t\tif max < c {\n\t\t\ttopKeys = topKeys[:0]\n\t\t\ttopKeys = append(topKeys, k)\n\t\t\tmax = c\n\t\t} else if max == c {\n\t\t\ttopKeys = append(topKeys, k)\n\t\t}\n\t}\n\treturn\n}\n\nfunc isAmong(key string, keys []string) bool {\n\tfor _, k := range keys {\n\t\tif k == key {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\ntype location struct {\n\tdc string\n\track string\n\tdataNode *master_pb.DataNodeInfo\n}\n\nfunc newLocation(dc, rack string, dataNode *master_pb.DataNodeInfo) location {\n\treturn location{\n\t\tdc: dc,\n\t\track: rack,\n\t\tdataNode: dataNode,\n\t}\n}\n\nfunc (l location) String() string {\n\treturn fmt.Sprintf(\"%s %s %s\", l.dc, l.rack, l.dataNode.Id)\n}\n\nfunc (l location) Rack() string {\n\treturn fmt.Sprintf(\"%s %s\", l.dc, l.rack)\n}\n\nfunc (l location) DataCenter() string {\n\treturn l.dc\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Prometheus Team\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage test\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"syscall\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/client_golang\/api\/alertmanager\"\n\t\"github.com\/prometheus\/common\/model\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ AcceptanceTest provides declarative definition of given inputs and expected\n\/\/ output of an Alertmanager setup.\ntype AcceptanceTest struct {\n\t*testing.T\n\n\topts *AcceptanceOpts\n\n\tams []*Alertmanager\n\tcollectors []*Collector\n\n\tactions map[float64][]func()\n}\n\n\/\/ AcceptanceOpts defines configuration paramters for an acceptance test.\ntype AcceptanceOpts struct {\n\tTolerance time.Duration\n\tbaseTime time.Time\n}\n\nfunc (opts *AcceptanceOpts) alertString(a *model.Alert) string {\n\tif a.EndsAt.IsZero() {\n\t\treturn fmt.Sprintf(\"%s[%v:]\", a, opts.relativeTime(a.StartsAt))\n\t}\n\treturn fmt.Sprintf(\"%s[%v:%v]\", a, opts.relativeTime(a.StartsAt), opts.relativeTime(a.EndsAt))\n}\n\n\/\/ expandTime returns the absolute time for the relative time\n\/\/ calculated from the test's base time.\nfunc (opts *AcceptanceOpts) expandTime(rel float64) time.Time {\n\treturn opts.baseTime.Add(time.Duration(rel * float64(time.Second)))\n}\n\n\/\/ expandTime returns the relative time for the given time\n\/\/ calculated from the test's base time.\nfunc (opts *AcceptanceOpts) relativeTime(act time.Time) float64 {\n\treturn float64(act.Sub(opts.baseTime)) \/ float64(time.Second)\n}\n\n\/\/ NewAcceptanceTest returns a new acceptance test with the base time\n\/\/ set to the current time.\nfunc NewAcceptanceTest(t *testing.T, opts *AcceptanceOpts) *AcceptanceTest {\n\ttest := &AcceptanceTest{\n\t\tT: t,\n\t\topts: opts,\n\t\tactions: map[float64][]func(){},\n\t}\n\topts.baseTime = time.Now()\n\n\treturn test\n}\n\n\/\/ freeAddress returns a new listen address not currently in use.\nfunc freeAddress() string {\n\t\/\/ Let the OS allocate a free address, close it and hope\n\t\/\/ it is still free when starting Alertmanager.\n\tl, err := net.Listen(\"tcp4\", \"localhost:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer l.Close()\n\n\treturn l.Addr().String()\n}\n\n\/\/ Do sets the given function to be executed at the given time.\nfunc (t *AcceptanceTest) Do(at float64, f func()) {\n\tt.actions[at] = append(t.actions[at], f)\n}\n\n\/\/ Alertmanager returns a new structure that allows starting an instance\n\/\/ of Alertmanager on a random port.\nfunc (t *AcceptanceTest) Alertmanager(conf string) *Alertmanager {\n\tam := &Alertmanager{\n\t\tt: t,\n\t\topts: t.opts,\n\t}\n\n\tdir, err := ioutil.TempDir(\"\", \"am_test\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tam.dir = dir\n\n\tcf, err := os.Create(filepath.Join(dir, \"config.yml\"))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tam.confFile = cf\n\tam.UpdateConfig(conf)\n\n\tam.apiAddr = freeAddress()\n\tam.clusterAddr = freeAddress()\n\n\tt.Logf(\"AM on %s\", am.apiAddr)\n\n\tclient, err := alertmanager.New(alertmanager.Config{\n\t\tAddress: fmt.Sprintf(\"http:\/\/%s\", am.apiAddr),\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tam.client = client\n\n\tt.ams = append(t.ams, am)\n\n\treturn am\n}\n\n\/\/ Collector returns a new collector bound to the test instance.\nfunc (t *AcceptanceTest) Collector(name string) *Collector {\n\tco := &Collector{\n\t\tt: t.T,\n\t\tname: name,\n\t\topts: t.opts,\n\t\tcollected: map[float64][]model.Alerts{},\n\t\texpected: map[Interval][]model.Alerts{},\n\t}\n\tt.collectors = append(t.collectors, co)\n\n\treturn co\n}\n\n\/\/ Run starts all Alertmanagers and runs queries against them. It then checks\n\/\/ whether all expected notifications have arrived at the expected receiver.\nfunc (t *AcceptanceTest) Run() {\n\terrc := make(chan error)\n\n\tfor _, am := range t.ams {\n\t\tam.errc = errc\n\n\t\tam.Start()\n\t\tdefer func(am *Alertmanager) {\n\t\t\tam.Terminate()\n\t\t\tam.cleanup()\n\t\t}(am)\n\t}\n\n\tgo t.runActions()\n\n\tvar latest float64\n\tfor _, coll := range t.collectors {\n\t\tif l := coll.latest(); l > latest {\n\t\t\tlatest = l\n\t\t}\n\t}\n\n\tdeadline := t.opts.expandTime(latest)\n\n\tselect {\n\tcase <-time.After(deadline.Sub(time.Now())):\n\t\t\/\/ continue\n\tcase err := <-errc:\n\t\tt.Error(err)\n\t}\n\n\tfor _, coll := range t.collectors {\n\t\treport := coll.check()\n\t\tt.Log(report)\n\t}\n\n\tfor _, am := range t.ams {\n\t\tt.Logf(\"stdout:\\n%v\", am.cmd.Stdout)\n\t\tt.Logf(\"stderr:\\n%v\", am.cmd.Stderr)\n\t}\n}\n\n\/\/ runActions performs the stored actions at the defined times.\nfunc (t *AcceptanceTest) runActions() {\n\tvar wg sync.WaitGroup\n\n\tfor at, fs := range t.actions {\n\t\tts := t.opts.expandTime(at)\n\t\twg.Add(len(fs))\n\n\t\tfor _, f := range fs {\n\t\t\tgo func(f func()) {\n\t\t\t\ttime.Sleep(ts.Sub(time.Now()))\n\t\t\t\tf()\n\t\t\t\twg.Done()\n\t\t\t}(f)\n\t\t}\n\t}\n\n\twg.Wait()\n}\n\n\/\/ Alertmanager encapsulates an Alertmanager process and allows\n\/\/ declaring alerts being pushed to it at fixed points in time.\ntype Alertmanager struct {\n\tt *AcceptanceTest\n\topts *AcceptanceOpts\n\n\tapiAddr string\n\tclusterAddr string\n\tclient alertmanager.Client\n\tcmd *exec.Cmd\n\tconfFile *os.File\n\tdir string\n\n\terrc chan<- error\n}\n\n\/\/ Start the alertmanager and wait until it is ready to receive.\nfunc (am *Alertmanager) Start() {\n\tcmd := exec.Command(\"..\/..\/alertmanager\",\n\t\t\"--config.file\", am.confFile.Name(),\n\t\t\"--log.level\", \"debug\",\n\t\t\"--web.listen-address\", am.apiAddr,\n\t\t\"--storage.path\", am.dir,\n\t\t\"--cluster.address\", am.clusterAddr,\n\t)\n\n\tif am.cmd == nil {\n\t\tvar outb, errb bytes.Buffer\n\t\tcmd.Stdout = &outb\n\t\tcmd.Stderr = &errb\n\t} else {\n\t\tcmd.Stdout = am.cmd.Stdout\n\t\tcmd.Stderr = am.cmd.Stderr\n\t}\n\tam.cmd = cmd\n\n\tif err := am.cmd.Start(); err != nil {\n\t\tam.t.Fatalf(\"Starting alertmanager failed: %s\", err)\n\t}\n\n\tgo func() {\n\t\tif err := am.cmd.Wait(); err != nil {\n\t\t\tam.errc <- err\n\t\t}\n\t}()\n\n\ttime.Sleep(50 * time.Millisecond)\n\tfor i := 0; i < 10; i++ {\n\t\tresp, err := http.Get(fmt.Sprintf(\"http:\/\/%s\/status\", am.apiAddr))\n\t\tif err == nil {\n\t\t\t_, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\tam.t.Fatalf(\"Starting alertmanager failed: %s\", err)\n\t\t\t}\n\t\t\tresp.Body.Close()\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n\tam.t.Fatalf(\"Starting alertmanager failed: timeout\")\n}\n\n\/\/ Terminate kills the underlying Alertmanager process and remove intermediate\n\/\/ data.\nfunc (am *Alertmanager) Terminate() {\n\tsyscall.Kill(am.cmd.Process.Pid, syscall.SIGTERM)\n}\n\n\/\/ Reload sends the reloading signal to the Alertmanager process.\nfunc (am *Alertmanager) Reload() {\n\tsyscall.Kill(am.cmd.Process.Pid, syscall.SIGHUP)\n}\n\nfunc (am *Alertmanager) cleanup() {\n\tos.RemoveAll(am.confFile.Name())\n}\n\n\/\/ Push declares alerts that are to be pushed to the Alertmanager\n\/\/ server at a relative point in time.\nfunc (am *Alertmanager) Push(at float64, alerts ...*TestAlert) {\n\tvar nas model.Alerts\n\tfor _, a := range alerts {\n\t\tnas = append(nas, a.nativeAlert(am.opts))\n\t}\n\n\talertAPI := alertmanager.NewAlertAPI(am.client)\n\n\tam.t.Do(at, func() {\n\t\tif err := alertAPI.Push(context.Background(), nas...); err != nil {\n\t\t\tam.t.Errorf(\"Error pushing %v: %s\", nas, err)\n\t\t}\n\t})\n}\n\n\/\/ SetSilence updates or creates the given Silence.\nfunc (am *Alertmanager) SetSilence(at float64, sil *TestSilence) {\n\tam.t.Do(at, func() {\n\t\tvar buf bytes.Buffer\n\t\tif err := json.NewEncoder(&buf).Encode(sil.nativeSilence(am.opts)); err != nil {\n\t\t\tam.t.Errorf(\"Error setting silence %v: %s\", sil, err)\n\t\t\treturn\n\t\t}\n\n\t\tresp, err := http.Post(fmt.Sprintf(\"http:\/\/%s\/api\/v1\/silences\", am.apiAddr), \"application\/json\", &buf)\n\t\tif err != nil {\n\t\t\tam.t.Errorf(\"Error setting silence %v: %s\", sil, err)\n\t\t\treturn\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tvar v struct {\n\t\t\tStatus string `json:\"status\"`\n\t\t\tData struct {\n\t\t\t\tSilenceID string `json:\"silenceId\"`\n\t\t\t} `json:\"data\"`\n\t\t}\n\t\tif err := json.Unmarshal(b, &v); err != nil || resp.StatusCode\/100 != 2 {\n\t\t\tam.t.Errorf(\"error setting silence %v: %s\", sil, err)\n\t\t\treturn\n\t\t}\n\t\tsil.ID = v.Data.SilenceID\n\t})\n}\n\n\/\/ DelSilence deletes the silence with the sid at the given time.\nfunc (am *Alertmanager) DelSilence(at float64, sil *TestSilence) {\n\tam.t.Do(at, func() {\n\t\treq, err := http.NewRequest(\"DELETE\", fmt.Sprintf(\"http:\/\/%s\/api\/v1\/silence\/%s\", am.apiAddr, sil.ID), nil)\n\t\tif err != nil {\n\t\t\tam.t.Errorf(\"Error deleting silence %v: %s\", sil, err)\n\t\t\treturn\n\t\t}\n\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\tif err != nil || resp.StatusCode\/100 != 2 {\n\t\t\tam.t.Errorf(\"Error deleting silence %v: %s\", sil, err)\n\t\t\treturn\n\t\t}\n\t})\n}\n\n\/\/ UpdateConfig rewrites the configuration file for the Alertmanager. It does not\n\/\/ initiate config reloading.\nfunc (am *Alertmanager) UpdateConfig(conf string) {\n\tif _, err := am.confFile.WriteString(conf); err != nil {\n\t\tam.t.Fatal(err)\n\t\treturn\n\t}\n\tif err := am.confFile.Sync(); err != nil {\n\t\tam.t.Fatal(err)\n\t\treturn\n\t}\n}\nAdapt cluster listen address flag in tests\/\/ Copyright 2015 Prometheus Team\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage test\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"syscall\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/client_golang\/api\/alertmanager\"\n\t\"github.com\/prometheus\/common\/model\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ AcceptanceTest provides declarative definition of given inputs and expected\n\/\/ output of an Alertmanager setup.\ntype AcceptanceTest struct {\n\t*testing.T\n\n\topts *AcceptanceOpts\n\n\tams []*Alertmanager\n\tcollectors []*Collector\n\n\tactions map[float64][]func()\n}\n\n\/\/ AcceptanceOpts defines configuration paramters for an acceptance test.\ntype AcceptanceOpts struct {\n\tTolerance time.Duration\n\tbaseTime time.Time\n}\n\nfunc (opts *AcceptanceOpts) alertString(a *model.Alert) string {\n\tif a.EndsAt.IsZero() {\n\t\treturn fmt.Sprintf(\"%s[%v:]\", a, opts.relativeTime(a.StartsAt))\n\t}\n\treturn fmt.Sprintf(\"%s[%v:%v]\", a, opts.relativeTime(a.StartsAt), opts.relativeTime(a.EndsAt))\n}\n\n\/\/ expandTime returns the absolute time for the relative time\n\/\/ calculated from the test's base time.\nfunc (opts *AcceptanceOpts) expandTime(rel float64) time.Time {\n\treturn opts.baseTime.Add(time.Duration(rel * float64(time.Second)))\n}\n\n\/\/ expandTime returns the relative time for the given time\n\/\/ calculated from the test's base time.\nfunc (opts *AcceptanceOpts) relativeTime(act time.Time) float64 {\n\treturn float64(act.Sub(opts.baseTime)) \/ float64(time.Second)\n}\n\n\/\/ NewAcceptanceTest returns a new acceptance test with the base time\n\/\/ set to the current time.\nfunc NewAcceptanceTest(t *testing.T, opts *AcceptanceOpts) *AcceptanceTest {\n\ttest := &AcceptanceTest{\n\t\tT: t,\n\t\topts: opts,\n\t\tactions: map[float64][]func(){},\n\t}\n\topts.baseTime = time.Now()\n\n\treturn test\n}\n\n\/\/ freeAddress returns a new listen address not currently in use.\nfunc freeAddress() string {\n\t\/\/ Let the OS allocate a free address, close it and hope\n\t\/\/ it is still free when starting Alertmanager.\n\tl, err := net.Listen(\"tcp4\", \"localhost:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer l.Close()\n\n\treturn l.Addr().String()\n}\n\n\/\/ Do sets the given function to be executed at the given time.\nfunc (t *AcceptanceTest) Do(at float64, f func()) {\n\tt.actions[at] = append(t.actions[at], f)\n}\n\n\/\/ Alertmanager returns a new structure that allows starting an instance\n\/\/ of Alertmanager on a random port.\nfunc (t *AcceptanceTest) Alertmanager(conf string) *Alertmanager {\n\tam := &Alertmanager{\n\t\tt: t,\n\t\topts: t.opts,\n\t}\n\n\tdir, err := ioutil.TempDir(\"\", \"am_test\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tam.dir = dir\n\n\tcf, err := os.Create(filepath.Join(dir, \"config.yml\"))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tam.confFile = cf\n\tam.UpdateConfig(conf)\n\n\tam.apiAddr = freeAddress()\n\tam.clusterAddr = freeAddress()\n\n\tt.Logf(\"AM on %s\", am.apiAddr)\n\n\tclient, err := alertmanager.New(alertmanager.Config{\n\t\tAddress: fmt.Sprintf(\"http:\/\/%s\", am.apiAddr),\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tam.client = client\n\n\tt.ams = append(t.ams, am)\n\n\treturn am\n}\n\n\/\/ Collector returns a new collector bound to the test instance.\nfunc (t *AcceptanceTest) Collector(name string) *Collector {\n\tco := &Collector{\n\t\tt: t.T,\n\t\tname: name,\n\t\topts: t.opts,\n\t\tcollected: map[float64][]model.Alerts{},\n\t\texpected: map[Interval][]model.Alerts{},\n\t}\n\tt.collectors = append(t.collectors, co)\n\n\treturn co\n}\n\n\/\/ Run starts all Alertmanagers and runs queries against them. It then checks\n\/\/ whether all expected notifications have arrived at the expected receiver.\nfunc (t *AcceptanceTest) Run() {\n\terrc := make(chan error)\n\n\tfor _, am := range t.ams {\n\t\tam.errc = errc\n\n\t\tam.Start()\n\t\tdefer func(am *Alertmanager) {\n\t\t\tam.Terminate()\n\t\t\tam.cleanup()\n\t\t}(am)\n\t}\n\n\tgo t.runActions()\n\n\tvar latest float64\n\tfor _, coll := range t.collectors {\n\t\tif l := coll.latest(); l > latest {\n\t\t\tlatest = l\n\t\t}\n\t}\n\n\tdeadline := t.opts.expandTime(latest)\n\n\tselect {\n\tcase <-time.After(deadline.Sub(time.Now())):\n\t\t\/\/ continue\n\tcase err := <-errc:\n\t\tt.Error(err)\n\t}\n\n\tfor _, coll := range t.collectors {\n\t\treport := coll.check()\n\t\tt.Log(report)\n\t}\n\n\tfor _, am := range t.ams {\n\t\tt.Logf(\"stdout:\\n%v\", am.cmd.Stdout)\n\t\tt.Logf(\"stderr:\\n%v\", am.cmd.Stderr)\n\t}\n}\n\n\/\/ runActions performs the stored actions at the defined times.\nfunc (t *AcceptanceTest) runActions() {\n\tvar wg sync.WaitGroup\n\n\tfor at, fs := range t.actions {\n\t\tts := t.opts.expandTime(at)\n\t\twg.Add(len(fs))\n\n\t\tfor _, f := range fs {\n\t\t\tgo func(f func()) {\n\t\t\t\ttime.Sleep(ts.Sub(time.Now()))\n\t\t\t\tf()\n\t\t\t\twg.Done()\n\t\t\t}(f)\n\t\t}\n\t}\n\n\twg.Wait()\n}\n\n\/\/ Alertmanager encapsulates an Alertmanager process and allows\n\/\/ declaring alerts being pushed to it at fixed points in time.\ntype Alertmanager struct {\n\tt *AcceptanceTest\n\topts *AcceptanceOpts\n\n\tapiAddr string\n\tclusterAddr string\n\tclient alertmanager.Client\n\tcmd *exec.Cmd\n\tconfFile *os.File\n\tdir string\n\n\terrc chan<- error\n}\n\n\/\/ Start the alertmanager and wait until it is ready to receive.\nfunc (am *Alertmanager) Start() {\n\tcmd := exec.Command(\"..\/..\/alertmanager\",\n\t\t\"--config.file\", am.confFile.Name(),\n\t\t\"--log.level\", \"debug\",\n\t\t\"--web.listen-address\", am.apiAddr,\n\t\t\"--storage.path\", am.dir,\n\t\t\"--cluster.listen-address\", am.clusterAddr,\n\t)\n\n\tif am.cmd == nil {\n\t\tvar outb, errb bytes.Buffer\n\t\tcmd.Stdout = &outb\n\t\tcmd.Stderr = &errb\n\t} else {\n\t\tcmd.Stdout = am.cmd.Stdout\n\t\tcmd.Stderr = am.cmd.Stderr\n\t}\n\tam.cmd = cmd\n\n\tif err := am.cmd.Start(); err != nil {\n\t\tam.t.Fatalf(\"Starting alertmanager failed: %s\", err)\n\t}\n\n\tgo func() {\n\t\tif err := am.cmd.Wait(); err != nil {\n\t\t\tam.errc <- err\n\t\t}\n\t}()\n\n\ttime.Sleep(50 * time.Millisecond)\n\tfor i := 0; i < 10; i++ {\n\t\tresp, err := http.Get(fmt.Sprintf(\"http:\/\/%s\/status\", am.apiAddr))\n\t\tif err == nil {\n\t\t\t_, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\tam.t.Fatalf(\"Starting alertmanager failed: %s\", err)\n\t\t\t}\n\t\t\tresp.Body.Close()\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n\tam.t.Fatalf(\"Starting alertmanager failed: timeout\")\n}\n\n\/\/ Terminate kills the underlying Alertmanager process and remove intermediate\n\/\/ data.\nfunc (am *Alertmanager) Terminate() {\n\tsyscall.Kill(am.cmd.Process.Pid, syscall.SIGTERM)\n}\n\n\/\/ Reload sends the reloading signal to the Alertmanager process.\nfunc (am *Alertmanager) Reload() {\n\tsyscall.Kill(am.cmd.Process.Pid, syscall.SIGHUP)\n}\n\nfunc (am *Alertmanager) cleanup() {\n\tos.RemoveAll(am.confFile.Name())\n}\n\n\/\/ Push declares alerts that are to be pushed to the Alertmanager\n\/\/ server at a relative point in time.\nfunc (am *Alertmanager) Push(at float64, alerts ...*TestAlert) {\n\tvar nas model.Alerts\n\tfor _, a := range alerts {\n\t\tnas = append(nas, a.nativeAlert(am.opts))\n\t}\n\n\talertAPI := alertmanager.NewAlertAPI(am.client)\n\n\tam.t.Do(at, func() {\n\t\tif err := alertAPI.Push(context.Background(), nas...); err != nil {\n\t\t\tam.t.Errorf(\"Error pushing %v: %s\", nas, err)\n\t\t}\n\t})\n}\n\n\/\/ SetSilence updates or creates the given Silence.\nfunc (am *Alertmanager) SetSilence(at float64, sil *TestSilence) {\n\tam.t.Do(at, func() {\n\t\tvar buf bytes.Buffer\n\t\tif err := json.NewEncoder(&buf).Encode(sil.nativeSilence(am.opts)); err != nil {\n\t\t\tam.t.Errorf(\"Error setting silence %v: %s\", sil, err)\n\t\t\treturn\n\t\t}\n\n\t\tresp, err := http.Post(fmt.Sprintf(\"http:\/\/%s\/api\/v1\/silences\", am.apiAddr), \"application\/json\", &buf)\n\t\tif err != nil {\n\t\t\tam.t.Errorf(\"Error setting silence %v: %s\", sil, err)\n\t\t\treturn\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tvar v struct {\n\t\t\tStatus string `json:\"status\"`\n\t\t\tData struct {\n\t\t\t\tSilenceID string `json:\"silenceId\"`\n\t\t\t} `json:\"data\"`\n\t\t}\n\t\tif err := json.Unmarshal(b, &v); err != nil || resp.StatusCode\/100 != 2 {\n\t\t\tam.t.Errorf(\"error setting silence %v: %s\", sil, err)\n\t\t\treturn\n\t\t}\n\t\tsil.ID = v.Data.SilenceID\n\t})\n}\n\n\/\/ DelSilence deletes the silence with the sid at the given time.\nfunc (am *Alertmanager) DelSilence(at float64, sil *TestSilence) {\n\tam.t.Do(at, func() {\n\t\treq, err := http.NewRequest(\"DELETE\", fmt.Sprintf(\"http:\/\/%s\/api\/v1\/silence\/%s\", am.apiAddr, sil.ID), nil)\n\t\tif err != nil {\n\t\t\tam.t.Errorf(\"Error deleting silence %v: %s\", sil, err)\n\t\t\treturn\n\t\t}\n\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\tif err != nil || resp.StatusCode\/100 != 2 {\n\t\t\tam.t.Errorf(\"Error deleting silence %v: %s\", sil, err)\n\t\t\treturn\n\t\t}\n\t})\n}\n\n\/\/ UpdateConfig rewrites the configuration file for the Alertmanager. It does not\n\/\/ initiate config reloading.\nfunc (am *Alertmanager) UpdateConfig(conf string) {\n\tif _, err := am.confFile.WriteString(conf); err != nil {\n\t\tam.t.Fatal(err)\n\t\treturn\n\t}\n\tif err := am.confFile.Sync(); err != nil {\n\t\tam.t.Fatal(err)\n\t\treturn\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"crypto\/sha1\"\n\t\"encoding\/hex\"\n\t\"os\"\n\n\t\"github.com\/whitepages\/terraform-provider-stingray\/Godeps\/_workspace\/src\/github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/whitepages\/terraform-provider-stingray\/Godeps\/_workspace\/src\/github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/whitepages\/terraform-provider-stingray\/Godeps\/_workspace\/src\/github.com\/whitepages\/go-stingray\"\n)\n\n\/\/ Provider returns a terraform.ResourceProvider.\nfunc Provider() terraform.ResourceProvider {\n\treturn &schema.Provider{\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"url\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: envDefaultFunc(\"STINGRAY_URL\", nil),\n\t\t\t},\n\n\t\t\t\"username\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: envDefaultFunc(\"STINGRAY_USERNAME\", nil),\n\t\t\t},\n\n\t\t\t\"password\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: envDefaultFunc(\"STINGRAY_PASSWORD\", nil),\n\t\t\t},\n\n\t\t\t\"verify_ssl\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: envDefaultFunc(\"STINGRAY_VERIFY_SSL\", true),\n\t\t\t},\n\t\t},\n\n\t\tResourcesMap: map[string]*schema.Resource{\n\t\t\t\"stingray_action_program\": resourceActionProgram(),\n\t\t\t\"stingray_extra_file\": resourceExtraFile(),\n\t\t\t\"stingray_license_key\": resourceLicenseKey(),\n\t\t\t\"stingray_monitor_script\": resourceMonitorScript(),\n\t\t\t\"stingray_monitor\": resourceMonitor(),\n\t\t\t\"stingray_pool\": resourcePool(),\n\t\t\t\"stingray_rate\": resourceRate(),\n\t\t\t\"stingray_rule\": resourceRule(),\n\t\t\t\"stingray_service_level_monitor\": resourceServiceLevelMonitor(),\n\t\t\t\"stingray_ssl_cas\": resourceSSLCAs(),\n\t\t\t\"stingray_ssl_server_key\": resourceSSLServerKey(),\n\t\t\t\"stingray_traffic_ip_group\": resourceTrafficIPGroup(),\n\t\t\t\"stingray_virtual_server\": resourceVirtualServer(),\n\t\t},\n\n\t\tConfigureFunc: providerConfigure,\n\t}\n}\n\ntype providerConfig struct {\n\tclient *stingray.Client\n}\n\nfunc providerConfigure(d *schema.ResourceData) (interface{}, error) {\n\tconfig := Config{\n\t\tURL: d.Get(\"url\").(string),\n\t\tUsername: d.Get(\"username\").(string),\n\t\tPassword: d.Get(\"password\").(string),\n\t\tVerifySSL: d.Get(\"verify_ssl\").(bool),\n\t}\n\tclient, err := config.Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &providerConfig{client: client}, nil\n}\n\n\/\/ Takes the result of flatmap.Expand for an array of strings\n\/\/ and returns a []string\nfunc expandStringList(configured []interface{}) []string {\n\tvs := make([]string, 0, len(configured))\n\tfor _, v := range configured {\n\t\tvs = append(vs, v.(string))\n\t}\n\treturn vs\n}\n\n\/\/ hashString returns a hash of the input for use as a StateFunc\nfunc hashString(v interface{}) string {\n\tswitch v.(type) {\n\tcase string:\n\t\thash := sha1.Sum([]byte(v.(string)))\n\t\treturn hex.EncodeToString(hash[:])\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n\/\/ setBool sets the target if the key is set in the schema config\nfunc setBool(target **bool, d *schema.ResourceData, key string) {\n\tif v, ok := d.GetOk(key); ok {\n\t\t*target = stingray.Bool(v.(bool))\n\t}\n}\n\n\/\/ setInt sets the target if the key is set in the schema config\nfunc setInt(target **int, d *schema.ResourceData, key string) {\n\tif v, ok := d.GetOk(key); ok {\n\t\t*target = stingray.Int(v.(int))\n\t}\n}\n\n\/\/ setString sets the target if the key is set in the schema config\nfunc setString(target **string, d *schema.ResourceData, key string) {\n\tif v, ok := d.GetOk(key); ok {\n\t\t*target = stingray.String(v.(string))\n\t}\n}\n\n\/\/ setStringList sets the target if the key is set in the schema config\nfunc setStringList(target **[]string, d *schema.ResourceData, key string) {\n\tif v, ok := d.GetOk(key); ok {\n\t\tlist := expandStringList(v.([]interface{}))\n\t\t*target = &list\n\t}\n}\n\n\/\/ setStringSet sets the target if the key is set in the schema config\nfunc setStringSet(target **[]string, d *schema.ResourceData, key string) {\n\tif _, ok := d.GetOk(key); ok {\n\t\tlist := expandStringList(d.Get(key).(*schema.Set).List())\n\t\t*target = &list\n\t}\n}\n\nfunc envDefaultFunc(k string, alt interface{}) schema.SchemaDefaultFunc {\n\treturn func() (interface{}, error) {\n\t\tif v := os.Getenv(k); v != \"\" {\n\t\t\treturn v, nil\n\t\t}\n\n\t\treturn alt, nil\n\t}\n}\nAdd valid_networks provider configurationpackage main\n\nimport (\n\t\"crypto\/sha1\"\n\t\"encoding\/hex\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/whitepages\/terraform-provider-stingray\/Godeps\/_workspace\/src\/github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/whitepages\/terraform-provider-stingray\/Godeps\/_workspace\/src\/github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/whitepages\/terraform-provider-stingray\/Godeps\/_workspace\/src\/github.com\/whitepages\/go-stingray\"\n)\n\n\/\/ Provider returns a terraform.ResourceProvider.\nfunc Provider() terraform.ResourceProvider {\n\treturn &schema.Provider{\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"url\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: envDefaultFunc(\"STINGRAY_URL\", nil),\n\t\t\t},\n\n\t\t\t\"username\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: envDefaultFunc(\"STINGRAY_USERNAME\", nil),\n\t\t\t},\n\n\t\t\t\"password\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDefaultFunc: envDefaultFunc(\"STINGRAY_PASSWORD\", nil),\n\t\t\t},\n\n\t\t\t\"valid_networks\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: envDefaultFunc(\"STINGRAY_VALID_NETWORKS\", \"\"),\n\t\t\t},\n\n\t\t\t\"verify_ssl\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefaultFunc: envDefaultFunc(\"STINGRAY_VERIFY_SSL\", true),\n\t\t\t},\n\t\t},\n\n\t\tResourcesMap: map[string]*schema.Resource{\n\t\t\t\"stingray_action_program\": resourceActionProgram(),\n\t\t\t\"stingray_extra_file\": resourceExtraFile(),\n\t\t\t\"stingray_license_key\": resourceLicenseKey(),\n\t\t\t\"stingray_monitor_script\": resourceMonitorScript(),\n\t\t\t\"stingray_monitor\": resourceMonitor(),\n\t\t\t\"stingray_pool\": resourcePool(),\n\t\t\t\"stingray_rate\": resourceRate(),\n\t\t\t\"stingray_rule\": resourceRule(),\n\t\t\t\"stingray_service_level_monitor\": resourceServiceLevelMonitor(),\n\t\t\t\"stingray_ssl_cas\": resourceSSLCAs(),\n\t\t\t\"stingray_ssl_server_key\": resourceSSLServerKey(),\n\t\t\t\"stingray_traffic_ip_group\": resourceTrafficIPGroup(),\n\t\t\t\"stingray_virtual_server\": resourceVirtualServer(),\n\t\t},\n\n\t\tConfigureFunc: providerConfigure,\n\t}\n}\n\ntype providerConfig struct {\n\tclient *stingray.Client\n\tvalidNetworks netList\n}\n\nfunc providerConfigure(d *schema.ResourceData) (interface{}, error) {\n\tconfig := Config{\n\t\tURL: d.Get(\"url\").(string),\n\t\tUsername: d.Get(\"username\").(string),\n\t\tPassword: d.Get(\"password\").(string),\n\t\tVerifySSL: d.Get(\"verify_ssl\").(bool),\n\t}\n\tclient, err := config.Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalidNetworks := d.Get(\"valid_networks\").(string)\n\tns := netList{}\n\n\tif len(validNetworks) > 0 {\n\t\tcidrList := strings.Split(validNetworks, \",\")\n\t\tns, err = parseCIDRList(cidrList)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &providerConfig{client: client, validNetworks: ns}, nil\n}\n\n\/\/ Takes the result of flatmap.Expand for an array of strings\n\/\/ and returns a []string\nfunc expandStringList(configured []interface{}) []string {\n\tvs := make([]string, 0, len(configured))\n\tfor _, v := range configured {\n\t\tvs = append(vs, v.(string))\n\t}\n\treturn vs\n}\n\n\/\/ hashString returns a hash of the input for use as a StateFunc\nfunc hashString(v interface{}) string {\n\tswitch v.(type) {\n\tcase string:\n\t\thash := sha1.Sum([]byte(v.(string)))\n\t\treturn hex.EncodeToString(hash[:])\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n\/\/ setBool sets the target if the key is set in the schema config\nfunc setBool(target **bool, d *schema.ResourceData, key string) {\n\tif v, ok := d.GetOk(key); ok {\n\t\t*target = stingray.Bool(v.(bool))\n\t}\n}\n\n\/\/ setInt sets the target if the key is set in the schema config\nfunc setInt(target **int, d *schema.ResourceData, key string) {\n\tif v, ok := d.GetOk(key); ok {\n\t\t*target = stingray.Int(v.(int))\n\t}\n}\n\n\/\/ setString sets the target if the key is set in the schema config\nfunc setString(target **string, d *schema.ResourceData, key string) {\n\tif v, ok := d.GetOk(key); ok {\n\t\t*target = stingray.String(v.(string))\n\t}\n}\n\n\/\/ setStringList sets the target if the key is set in the schema config\nfunc setStringList(target **[]string, d *schema.ResourceData, key string) {\n\tif v, ok := d.GetOk(key); ok {\n\t\tlist := expandStringList(v.([]interface{}))\n\t\t*target = &list\n\t}\n}\n\n\/\/ setStringSet sets the target if the key is set in the schema config\nfunc setStringSet(target **[]string, d *schema.ResourceData, key string) {\n\tif _, ok := d.GetOk(key); ok {\n\t\tlist := expandStringList(d.Get(key).(*schema.Set).List())\n\t\t*target = &list\n\t}\n}\n\nfunc envDefaultFunc(k string, alt interface{}) schema.SchemaDefaultFunc {\n\treturn func() (interface{}, error) {\n\t\tif v := os.Getenv(k); v != \"\" {\n\t\t\treturn v, nil\n\t\t}\n\n\t\treturn alt, nil\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"github.com\/franela\/goreq\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"log\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\nvar errTaskNotFresh = errors.New(\"This task has been running too long to request a token.\")\nvar errAlreadyGivenKey = errors.New(\"This task has already been given a token.\")\nvar usedTaskIds = NewTtlSet()\n\nfunc createToken(token string, opts interface{}) (string, error) {\n\tr, err := VaultRequest{goreq.Request{\n\t\tUri: vaultPath(\"\/v1\/auth\/token\/create\", \"\"),\n\t\tMethod: \"POST\",\n\t\tBody: opts,\n\t\tMaxRedirects: 10,\n\t\tRedirectHeaders: true,\n\t}.WithHeader(\"X-Vault-Token\", token)}.Do()\n\tif err == nil {\n\t\tdefer r.Body.Close()\n\t\tswitch r.StatusCode {\n\t\tcase 200:\n\t\t\tvar t vaultTokenResp\n\t\t\tif err := r.Body.FromJsonTo(&t); err == nil {\n\t\t\t\treturn t.Auth.ClientToken, nil\n\t\t\t} else {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\tdefault:\n\t\t\tvar e vaultError\n\t\t\te.Code = r.StatusCode\n\t\t\tif err := r.Body.FromJsonTo(&e); err == nil {\n\t\t\t\treturn \"\", e\n\t\t\t} else {\n\t\t\t\te.Errors = []string{\"communication error.\"}\n\t\t\t\treturn \"\", e\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn \"\", err\n\t}\n}\n\nfunc createWrappedToken(token string, opts interface{}, wrapTTL time.Duration) (string, error) {\n\twrapTTLSeconds := strconv.Itoa(int(wrapTTL.Seconds()))\n\n\tr, err := VaultRequest{\n\t\tgoreq.Request{\n\t\t\tUri: vaultPath(\"\/v1\/auth\/token\/create\", \"\"),\n\t\t\tMethod: \"POST\",\n\t\t\tBody: opts,\n\t\t\tMaxRedirects: 10,\n\t\t\tRedirectHeaders: true,\n\t\t}.WithHeader(\"X-Vault-Token\", token).WithHeader(\"X-Vault-Wrap-TTL\", wrapTTLSeconds),\n\t}.Do()\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer r.Body.Close()\n\n\tif r.StatusCode != 200 {\n\t\tvar e vaultError\n\t\te.Code = r.StatusCode\n\t\tif err := r.Body.FromJsonTo(&e); err == nil {\n\t\t\treturn \"\", e\n\t\t} else {\n\t\t\te.Errors = []string{\"communication error.\"}\n\t\t\treturn \"\", e\n\t\t}\n\t}\n\n\tt := &vaultTokenResp{}\n\tif err := r.Body.FromJsonTo(t); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif t.WrapInfo.Token == \"\" {\n\t\treturn \"\", errors.New(\"Request for wrapped token did not return wrapped response\")\n\t}\n\n\treturn t.WrapInfo.Token, nil\n}\n\nfunc createTokenPair(token string, p *policy) (string, error) {\n\tpol := p.Policies\n\tif len(pol) == 0 { \/\/ explicitly set the policy, else the token will inherit ours\n\t\tpol = []string{\"default\"}\n\t}\n\n\tpermTokenOpts := struct {\n\t\tTtl string `json:\"ttl,omitempty\"`\n\t\tPolicies []string `json:\"policies\"`\n\t\tMeta map[string]string `json:\"meta,omitempty\"`\n\t\tNumUses int `json:\"num_uses\"`\n\t\tNoParent bool `json:\"no_parent\"`\n\t\tRenewable bool `json:\"renewable\"`\n\t}{time.Duration(time.Duration(p.Ttl) * time.Second).String(), pol, p.Meta, p.NumUses, true, true}\n\n\treturn createWrappedToken(token, permTokenOpts, 10*time.Minute)\n}\n\nfunc Provide(c *gin.Context) {\n\trequestStartTime := time.Now()\n\tstate.RLock()\n\tstatus := state.Status\n\ttoken := state.Token\n\tstate.RUnlock()\n\n\tremoteIp := c.Request.RemoteAddr\n\n\tatomic.AddInt32(&state.Stats.Requests, 1)\n\n\tif status == StatusSealed {\n\t\tlog.Printf(\"Rejected token request from %s. Reason: sealed.\", remoteIp)\n\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\tc.JSON(503, struct {\n\t\t\tStatus string `json:\"status\"`\n\t\t\tOk bool `json:\"ok\"`\n\t\t\tError string `json:\"error\"`\n\t\t}{string(state.Status), false, \"Gatekeeper is sealed.\"})\n\t\treturn\n\t}\n\n\tvar reqParams struct {\n\t\tTaskId string `json:\"task_id\"`\n\t}\n\tdecoder := json.NewDecoder(c.Request.Body)\n\tif err := decoder.Decode(&reqParams); err == nil {\n\t\tif usedTaskIds.Has(reqParams.TaskId) {\n\t\t\tlog.Printf(\"Rejected token request from %s (Task Id: %s). Reason: %v\", remoteIp, reqParams.TaskId, errAlreadyGivenKey)\n\t\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\t\tc.JSON(403, struct {\n\t\t\t\tStatus string `json:\"status\"`\n\t\t\t\tOk bool `json:\"ok\"`\n\t\t\t\tError string `json:\"error\"`\n\t\t\t}{string(state.Status), false, errAlreadyGivenKey.Error()})\n\t\t\treturn\n\t\t}\n\t\t\/*\n\t\t\tThe task can start, but the task's framework may have not reported\n\t\t\tthat it is RUNNING back to mesos. In this case, the task will still\n\t\t\tbe STAGING and have a statuses length of 0.\n\n\t\t\tThis is a network race, so we just sleep and try again.\n\t\t*\/\n\t\tgMT := func(taskId string) (mesosTask, error) {\n\t\t\ttask, err := getMesosTask(taskId)\n\t\t\tfor i := time.Duration(0); i < 3 && err == nil && len(task.Statuses) == 0; i++ {\n\t\t\t\ttime.Sleep((500 + 250*i) * time.Millisecond)\n\t\t\t\ttask, err = getMesosTask(taskId)\n\t\t\t}\n\t\t\treturn task, err\n\t\t}\n\n\t\t\/\/ TODO: Remove this when we can incorporate Mesos in testing environment\n\t\tif reqParams.TaskId == state.testingTaskId && state.testingTaskId != \"\" {\n\t\t\tgMT = func(taskId string) (mesosTask, error) {\n\t\t\t\treturn mesosTask{\n\t\t\t\t\tStatuses: []struct {\n\t\t\t\t\t\tState string `json:\"state\"`\n\t\t\t\t\t\tTimestamp float64 `json:\"timestamp\"`\n\t\t\t\t\t}{{\"RUNNING\", float64(time.Now().UnixNano()) \/ float64(1000000000)}},\n\t\t\t\t\tId: reqParams.TaskId,\n\t\t\t\t\tName: \"Test\",\n\t\t\t\t}, nil\n\t\t\t}\n\t\t}\n\t\tif task, err := gMT(reqParams.TaskId); err == nil {\n\t\t\tif len(task.Statuses) == 0 {\n\t\t\t\tlog.Printf(\"Rejected token request from %s (Task Id: %s). Reason: %v (no status)\", remoteIp, reqParams.TaskId, errTaskNotFresh)\n\t\t\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\t\t\tc.JSON(403, struct {\n\t\t\t\t\tStatus string `json:\"status\"`\n\t\t\t\t\tOk bool `json:\"ok\"`\n\t\t\t\t\tError string `json:\"error\"`\n\t\t\t\t}{string(state.Status), false, errTaskNotFresh.Error()})\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ https:\/\/github.com\/apache\/mesos\/blob\/a61074586d778d432ba991701c9c4de9459db897\/src\/webui\/master\/static\/js\/controllers.js#L148\n\t\t\tstartTime := time.Unix(0, int64(task.Statuses[0].Timestamp*1000000000))\n\t\t\ttaskLife := time.Now().Sub(startTime)\n\t\t\tif taskLife > config.MaxTaskLife {\n\t\t\t\tlog.Printf(\"Rejected token request from %s (Task Id: %s). Reason: %v (no status) Task Life: %s\", remoteIp, reqParams.TaskId, errTaskNotFresh, taskLife)\n\t\t\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\t\t\tc.JSON(403, struct {\n\t\t\t\t\tStatus string `json:\"status\"`\n\t\t\t\t\tOk bool `json:\"ok\"`\n\t\t\t\t\tError string `json:\"error\"`\n\t\t\t\t}{string(state.Status), false, errTaskNotFresh.Error()})\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstate.RLock()\n\t\t\tpolicy := activePolicies.Get(task.Name)\n\t\t\tstate.RUnlock()\n\t\t\tif tempToken, err := createTokenPair(token, policy); err == nil {\n\t\t\t\tlog.Printf(\"Provided token pair for %s in %v. (Task Id: %s) (Task Name: %s). Policies: %v\", remoteIp, time.Now().Sub(requestStartTime), reqParams.TaskId, task.Name, policy.Policies)\n\t\t\t\tatomic.AddInt32(&state.Stats.Successful, 1)\n\t\t\t\tusedTaskIds.Put(reqParams.TaskId, config.MaxTaskLife+1*time.Minute)\n\t\t\t\tc.JSON(200, struct {\n\t\t\t\t\tStatus string `json:\"status\"`\n\t\t\t\t\tOk bool `json:\"ok\"`\n\t\t\t\t\tToken string `json:\"token\"`\n\t\t\t\t}{string(state.Status), true, tempToken})\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Failed to create token pair for %s (Task Id: %s). Error: %v\", remoteIp, reqParams.TaskId, err)\n\t\t\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\t\t\tc.JSON(500, struct {\n\t\t\t\t\tStatus string `json:\"status\"`\n\t\t\t\t\tOk bool `json:\"ok\"`\n\t\t\t\t\tError string `json:\"error\"`\n\t\t\t\t}{string(state.Status), false, err.Error()})\n\t\t\t}\n\t\t} else if err == errNoSuchTask {\n\t\t\tlog.Printf(\"Rejected token request from %s (Task Id: %s). Reason: %v\", remoteIp, reqParams.TaskId, errNoSuchTask)\n\t\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\t\tc.JSON(403, struct {\n\t\t\t\tStatus string `json:\"status\"`\n\t\t\t\tOk bool `json:\"ok\"`\n\t\t\t\tError string `json:\"error\"`\n\t\t\t}{string(state.Status), false, err.Error()})\n\t\t} else {\n\t\t\tlog.Printf(\"Failed to retrieve task information for %s (Task Id: %s). Reason: %v\", remoteIp, reqParams.TaskId, err)\n\t\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\t\tc.JSON(500, struct {\n\t\t\t\tStatus string `json:\"status\"`\n\t\t\t\tOk bool `json:\"ok\"`\n\t\t\t\tError string `json:\"error\"`\n\t\t\t}{string(state.Status), false, err.Error()})\n\t\t}\n\t} else {\n\t\tlog.Printf(\"Rejected token request from %s. Reason: %v\", remoteIp, err)\n\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\tc.JSON(400, struct {\n\t\t\tStatus string `json:\"status\"`\n\t\t\tOk bool `json:\"ok\"`\n\t\t\tError string `json:\"error\"`\n\t\t}{string(state.Status), false, err.Error()})\n\t}\n}\nClarify token rejection message when no statuses are returnedpackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"github.com\/franela\/goreq\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"log\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\nvar errTaskNotFresh = errors.New(\"This task has been running too long to request a token.\")\nvar errTaskEmptyStatuses = errors.New(\"This task does not have any statuses.\")\nvar errAlreadyGivenKey = errors.New(\"This task has already been given a token.\")\nvar usedTaskIds = NewTtlSet()\n\nfunc createToken(token string, opts interface{}) (string, error) {\n\tr, err := VaultRequest{goreq.Request{\n\t\tUri: vaultPath(\"\/v1\/auth\/token\/create\", \"\"),\n\t\tMethod: \"POST\",\n\t\tBody: opts,\n\t\tMaxRedirects: 10,\n\t\tRedirectHeaders: true,\n\t}.WithHeader(\"X-Vault-Token\", token)}.Do()\n\tif err == nil {\n\t\tdefer r.Body.Close()\n\t\tswitch r.StatusCode {\n\t\tcase 200:\n\t\t\tvar t vaultTokenResp\n\t\t\tif err := r.Body.FromJsonTo(&t); err == nil {\n\t\t\t\treturn t.Auth.ClientToken, nil\n\t\t\t} else {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\tdefault:\n\t\t\tvar e vaultError\n\t\t\te.Code = r.StatusCode\n\t\t\tif err := r.Body.FromJsonTo(&e); err == nil {\n\t\t\t\treturn \"\", e\n\t\t\t} else {\n\t\t\t\te.Errors = []string{\"communication error.\"}\n\t\t\t\treturn \"\", e\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn \"\", err\n\t}\n}\n\nfunc createWrappedToken(token string, opts interface{}, wrapTTL time.Duration) (string, error) {\n\twrapTTLSeconds := strconv.Itoa(int(wrapTTL.Seconds()))\n\n\tr, err := VaultRequest{\n\t\tgoreq.Request{\n\t\t\tUri: vaultPath(\"\/v1\/auth\/token\/create\", \"\"),\n\t\t\tMethod: \"POST\",\n\t\t\tBody: opts,\n\t\t\tMaxRedirects: 10,\n\t\t\tRedirectHeaders: true,\n\t\t}.WithHeader(\"X-Vault-Token\", token).WithHeader(\"X-Vault-Wrap-TTL\", wrapTTLSeconds),\n\t}.Do()\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer r.Body.Close()\n\n\tif r.StatusCode != 200 {\n\t\tvar e vaultError\n\t\te.Code = r.StatusCode\n\t\tif err := r.Body.FromJsonTo(&e); err == nil {\n\t\t\treturn \"\", e\n\t\t} else {\n\t\t\te.Errors = []string{\"communication error.\"}\n\t\t\treturn \"\", e\n\t\t}\n\t}\n\n\tt := &vaultTokenResp{}\n\tif err := r.Body.FromJsonTo(t); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif t.WrapInfo.Token == \"\" {\n\t\treturn \"\", errors.New(\"Request for wrapped token did not return wrapped response\")\n\t}\n\n\treturn t.WrapInfo.Token, nil\n}\n\nfunc createTokenPair(token string, p *policy) (string, error) {\n\tpol := p.Policies\n\tif len(pol) == 0 { \/\/ explicitly set the policy, else the token will inherit ours\n\t\tpol = []string{\"default\"}\n\t}\n\n\tpermTokenOpts := struct {\n\t\tTtl string `json:\"ttl,omitempty\"`\n\t\tPolicies []string `json:\"policies\"`\n\t\tMeta map[string]string `json:\"meta,omitempty\"`\n\t\tNumUses int `json:\"num_uses\"`\n\t\tNoParent bool `json:\"no_parent\"`\n\t\tRenewable bool `json:\"renewable\"`\n\t}{time.Duration(time.Duration(p.Ttl) * time.Second).String(), pol, p.Meta, p.NumUses, true, true}\n\n\treturn createWrappedToken(token, permTokenOpts, 10*time.Minute)\n}\n\nfunc Provide(c *gin.Context) {\n\trequestStartTime := time.Now()\n\tstate.RLock()\n\tstatus := state.Status\n\ttoken := state.Token\n\tstate.RUnlock()\n\n\tremoteIp := c.Request.RemoteAddr\n\n\tatomic.AddInt32(&state.Stats.Requests, 1)\n\n\tif status == StatusSealed {\n\t\tlog.Printf(\"Rejected token request from %s. Reason: sealed.\", remoteIp)\n\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\tc.JSON(503, struct {\n\t\t\tStatus string `json:\"status\"`\n\t\t\tOk bool `json:\"ok\"`\n\t\t\tError string `json:\"error\"`\n\t\t}{string(state.Status), false, \"Gatekeeper is sealed.\"})\n\t\treturn\n\t}\n\n\tvar reqParams struct {\n\t\tTaskId string `json:\"task_id\"`\n\t}\n\tdecoder := json.NewDecoder(c.Request.Body)\n\tif err := decoder.Decode(&reqParams); err == nil {\n\t\tif usedTaskIds.Has(reqParams.TaskId) {\n\t\t\tlog.Printf(\"Rejected token request from %s (Task Id: %s). Reason: %v\", remoteIp, reqParams.TaskId, errAlreadyGivenKey)\n\t\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\t\tc.JSON(403, struct {\n\t\t\t\tStatus string `json:\"status\"`\n\t\t\t\tOk bool `json:\"ok\"`\n\t\t\t\tError string `json:\"error\"`\n\t\t\t}{string(state.Status), false, errAlreadyGivenKey.Error()})\n\t\t\treturn\n\t\t}\n\t\t\/*\n\t\t\tThe task can start, but the task's framework may have not reported\n\t\t\tthat it is RUNNING back to mesos. In this case, the task will still\n\t\t\tbe STAGING and have a statuses length of 0.\n\n\t\t\tThis is a network race, so we just sleep and try again.\n\t\t*\/\n\t\tgMT := func(taskId string) (mesosTask, error) {\n\t\t\ttask, err := getMesosTask(taskId)\n\t\t\tfor i := time.Duration(0); i < 3 && err == nil && len(task.Statuses) == 0; i++ {\n\t\t\t\ttime.Sleep((500 + 250*i) * time.Millisecond)\n\t\t\t\ttask, err = getMesosTask(taskId)\n\t\t\t}\n\t\t\treturn task, err\n\t\t}\n\n\t\t\/\/ TODO: Remove this when we can incorporate Mesos in testing environment\n\t\tif reqParams.TaskId == state.testingTaskId && state.testingTaskId != \"\" {\n\t\t\tgMT = func(taskId string) (mesosTask, error) {\n\t\t\t\treturn mesosTask{\n\t\t\t\t\tStatuses: []struct {\n\t\t\t\t\t\tState string `json:\"state\"`\n\t\t\t\t\t\tTimestamp float64 `json:\"timestamp\"`\n\t\t\t\t\t}{{\"RUNNING\", float64(time.Now().UnixNano()) \/ float64(1000000000)}},\n\t\t\t\t\tId: reqParams.TaskId,\n\t\t\t\t\tName: \"Test\",\n\t\t\t\t}, nil\n\t\t\t}\n\t\t}\n\t\tif task, err := gMT(reqParams.TaskId); err == nil {\n\t\t\tif len(task.Statuses) == 0 {\n\t\t\t\tlog.Printf(\"Rejected token request from %s (Task Id: %s). Reason: %v (no status)\", remoteIp, reqParams.TaskId, errTaskEmptyStatuses)\n\t\t\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\t\t\tc.JSON(403, struct {\n\t\t\t\t\tStatus string `json:\"status\"`\n\t\t\t\t\tOk bool `json:\"ok\"`\n\t\t\t\t\tError string `json:\"error\"`\n\t\t\t\t}{string(state.Status), false, errTaskEmptyStatuses.Error()})\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ https:\/\/github.com\/apache\/mesos\/blob\/a61074586d778d432ba991701c9c4de9459db897\/src\/webui\/master\/static\/js\/controllers.js#L148\n\t\t\tstartTime := time.Unix(0, int64(task.Statuses[0].Timestamp*1000000000))\n\t\t\ttaskLife := time.Now().Sub(startTime)\n\t\t\tif taskLife > config.MaxTaskLife {\n\t\t\t\tlog.Printf(\"Rejected token request from %s (Task Id: %s). Reason: %v (no status) Task Life: %s\", remoteIp, reqParams.TaskId, errTaskNotFresh, taskLife)\n\t\t\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\t\t\tc.JSON(403, struct {\n\t\t\t\t\tStatus string `json:\"status\"`\n\t\t\t\t\tOk bool `json:\"ok\"`\n\t\t\t\t\tError string `json:\"error\"`\n\t\t\t\t}{string(state.Status), false, errTaskNotFresh.Error()})\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstate.RLock()\n\t\t\tpolicy := activePolicies.Get(task.Name)\n\t\t\tstate.RUnlock()\n\t\t\tif tempToken, err := createTokenPair(token, policy); err == nil {\n\t\t\t\tlog.Printf(\"Provided token pair for %s in %v. (Task Id: %s) (Task Name: %s). Policies: %v\", remoteIp, time.Now().Sub(requestStartTime), reqParams.TaskId, task.Name, policy.Policies)\n\t\t\t\tatomic.AddInt32(&state.Stats.Successful, 1)\n\t\t\t\tusedTaskIds.Put(reqParams.TaskId, config.MaxTaskLife+1*time.Minute)\n\t\t\t\tc.JSON(200, struct {\n\t\t\t\t\tStatus string `json:\"status\"`\n\t\t\t\t\tOk bool `json:\"ok\"`\n\t\t\t\t\tToken string `json:\"token\"`\n\t\t\t\t}{string(state.Status), true, tempToken})\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Failed to create token pair for %s (Task Id: %s). Error: %v\", remoteIp, reqParams.TaskId, err)\n\t\t\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\t\t\tc.JSON(500, struct {\n\t\t\t\t\tStatus string `json:\"status\"`\n\t\t\t\t\tOk bool `json:\"ok\"`\n\t\t\t\t\tError string `json:\"error\"`\n\t\t\t\t}{string(state.Status), false, err.Error()})\n\t\t\t}\n\t\t} else if err == errNoSuchTask {\n\t\t\tlog.Printf(\"Rejected token request from %s (Task Id: %s). Reason: %v\", remoteIp, reqParams.TaskId, errNoSuchTask)\n\t\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\t\tc.JSON(403, struct {\n\t\t\t\tStatus string `json:\"status\"`\n\t\t\t\tOk bool `json:\"ok\"`\n\t\t\t\tError string `json:\"error\"`\n\t\t\t}{string(state.Status), false, err.Error()})\n\t\t} else {\n\t\t\tlog.Printf(\"Failed to retrieve task information for %s (Task Id: %s). Reason: %v\", remoteIp, reqParams.TaskId, err)\n\t\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\t\tc.JSON(500, struct {\n\t\t\t\tStatus string `json:\"status\"`\n\t\t\t\tOk bool `json:\"ok\"`\n\t\t\t\tError string `json:\"error\"`\n\t\t\t}{string(state.Status), false, err.Error()})\n\t\t}\n\t} else {\n\t\tlog.Printf(\"Rejected token request from %s. Reason: %v\", remoteIp, err)\n\t\tatomic.AddInt32(&state.Stats.Denied, 1)\n\t\tc.JSON(400, struct {\n\t\t\tStatus string `json:\"status\"`\n\t\t\tOk bool `json:\"ok\"`\n\t\t\tError string `json:\"error\"`\n\t\t}{string(state.Status), false, err.Error()})\n\t}\n}\n<|endoftext|>"} {"text":"package docker\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/mitchellh\/mapstructure\"\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/helper\/config\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"github.com\/mitchellh\/packer\/template\/interpolate\"\n)\n\ntype Config struct {\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\n\tCommit bool\n\tExportPath string `mapstructure:\"export_path\"`\n\tImage string\n\tPull bool\n\tRunCommand []string `mapstructure:\"run_command\"`\n\tVolumes map[string]string\n\n\tLogin bool\n\tLoginEmail string `mapstructure:\"login_email\"`\n\tLoginUsername string `mapstructure:\"login_username\"`\n\tLoginPassword string `mapstructure:\"login_password\"`\n\tLoginServer string `mapstructure:\"login_server\"`\n\n\tctx interpolate.Context\n}\n\nfunc NewConfig(raws ...interface{}) (*Config, []string, error) {\n\tc := new(Config)\n\n\tvar md mapstructure.Metadata\n\terr := config.Decode(&c, &config.DecodeOpts{\n\t\tMetadata: &md,\n\t\tInterpolate: true,\n\t\tInterpolateFilter: &interpolate.RenderFilter{\n\t\t\tExclude: []string{\n\t\t\t\t\"run_command\",\n\t\t\t},\n\t\t},\n\t}, raws...)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ Defaults\n\tif len(c.RunCommand) == 0 {\n\t\tc.RunCommand = []string{\n\t\t\t\"-d\", \"-i\", \"-t\",\n\t\t\t\"{{.Image}}\",\n\t\t\t\"\/bin\/bash\",\n\t\t}\n\t}\n\n\t\/\/ Default Pull if it wasn't set\n\thasPull := false\n\tfor _, k := range md.Keys {\n\t\tif k == \"Pull\" {\n\t\t\thasPull = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !hasPull {\n\t\tc.Pull = true\n\t}\n\n\tvar errs *packer.MultiError\n\tif c.Image == \"\" {\n\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\tfmt.Errorf(\"image must be specified\"))\n\t}\n\n\tif c.ExportPath != \"\" && c.Commit {\n\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\tfmt.Errorf(\"both commit and export_path cannot be set\"))\n\t}\n\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn nil, nil, errs\n\t}\n\n\treturn c, nil, nil\n}\nbuilder\/docker: fix config parsingpackage docker\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/mitchellh\/mapstructure\"\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/helper\/config\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"github.com\/mitchellh\/packer\/template\/interpolate\"\n)\n\ntype Config struct {\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\n\tCommit bool\n\tExportPath string `mapstructure:\"export_path\"`\n\tImage string\n\tPull bool\n\tRunCommand []string `mapstructure:\"run_command\"`\n\tVolumes map[string]string\n\n\tLogin bool\n\tLoginEmail string `mapstructure:\"login_email\"`\n\tLoginUsername string `mapstructure:\"login_username\"`\n\tLoginPassword string `mapstructure:\"login_password\"`\n\tLoginServer string `mapstructure:\"login_server\"`\n\n\tctx interpolate.Context\n}\n\nfunc NewConfig(raws ...interface{}) (*Config, []string, error) {\n\tvar c Config\n\n\tvar md mapstructure.Metadata\n\terr := config.Decode(&c, &config.DecodeOpts{\n\t\tMetadata: &md,\n\t\tInterpolate: true,\n\t\tInterpolateFilter: &interpolate.RenderFilter{\n\t\t\tExclude: []string{\n\t\t\t\t\"run_command\",\n\t\t\t},\n\t\t},\n\t}, raws...)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t\/\/ Defaults\n\tif len(c.RunCommand) == 0 {\n\t\tc.RunCommand = []string{\n\t\t\t\"-d\", \"-i\", \"-t\",\n\t\t\t\"{{.Image}}\",\n\t\t\t\"\/bin\/bash\",\n\t\t}\n\t}\n\n\t\/\/ Default Pull if it wasn't set\n\thasPull := false\n\tfor _, k := range md.Keys {\n\t\tif k == \"Pull\" {\n\t\t\thasPull = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !hasPull {\n\t\tc.Pull = true\n\t}\n\n\tvar errs *packer.MultiError\n\tif c.Image == \"\" {\n\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\tfmt.Errorf(\"image must be specified\"))\n\t}\n\n\tif c.ExportPath != \"\" && c.Commit {\n\t\terrs = packer.MultiErrorAppend(errs,\n\t\t\tfmt.Errorf(\"both commit and export_path cannot be set\"))\n\t}\n\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn nil, nil, errs\n\t}\n\n\treturn &c, nil, nil\n}\n<|endoftext|>"} {"text":"package periodicproc\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\tgp \"github.com\/jbenet\/goprocess\"\n)\n\nvar (\n\tgrace = time.Millisecond * 5\n\tinterval = time.Millisecond * 10\n\ttimeout = time.Second * 5\n)\n\nfunc between(min, diff, max time.Duration) bool {\n\treturn min <= diff && diff <= max\n}\n\nfunc testBetween(t *testing.T, min, diff, max time.Duration) {\n\tif !between(min, diff, max) {\n\t\tt.Error(\"time diff incorrect:\", min, diff, max)\n\t}\n}\n\ntype intervalFunc func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process)\n\nfunc testSeq(t *testing.T, toTest intervalFunc) {\n\tt.Parallel()\n\n\tlast := time.Now()\n\ttimes := make(chan time.Time, 10)\n\tp := toTest(times, nil)\n\n\tfor i := 0; i < 5; i++ {\n\t\tnext := <-times\n\t\ttestBetween(t, interval-grace, next.Sub(last), interval+grace)\n\t\tlast = next\n\t}\n\n\tgo p.Close()\n\tselect {\n\tcase <-p.Closed():\n\tcase <-time.After(timeout):\n\t\tt.Error(\"proc failed to close\")\n\t}\n}\n\nfunc testSeqWait(t *testing.T, toTest intervalFunc) {\n\tt.Parallel()\n\n\tlast := time.Now()\n\ttimes := make(chan time.Time, 10)\n\twait := make(chan struct{})\n\tp := toTest(times, wait)\n\n\tfor i := 0; i < 5; i++ {\n\t\tnext := <-times\n\t\ttestBetween(t, interval-grace, next.Sub(last), interval+grace)\n\n\t\t<-time.After(interval * 2) \/\/ make it wait.\n\t\tlast = time.Now() \/\/ make it now (sequential)\n\t\twait <- struct{}{} \/\/ release it.\n\t}\n\n\tgo p.Close()\n\n\tselect {\n\tcase <-p.Closed():\n\tcase <-time.After(timeout):\n\t\tt.Error(\"proc failed to close\")\n\t}\n}\n\nfunc testSeqNoWait(t *testing.T, toTest intervalFunc) {\n\tt.Parallel()\n\n\tlast := time.Now()\n\ttimes := make(chan time.Time, 10)\n\twait := make(chan struct{})\n\tp := toTest(times, wait)\n\n\tfor i := 0; i < 5; i++ {\n\t\tnext := <-times\n\t\ttestBetween(t, 0, next.Sub(last), interval+grace) \/\/ min of 0\n\n\t\t<-time.After(interval * 2) \/\/ make it wait.\n\t\tlast = time.Now() \/\/ make it now (sequential)\n\t\twait <- struct{}{} \/\/ release it.\n\t}\n\n\tgo p.Close()\n\nend:\n\tselect {\n\tcase wait <- struct{}{}: \/\/ drain any extras.\n\t\tgoto end\n\tcase <-p.Closed():\n\tcase <-time.After(timeout):\n\t\tt.Error(\"proc failed to close\")\n\t}\n}\n\nfunc testParallel(t *testing.T, toTest intervalFunc) {\n\tt.Parallel()\n\n\tlast := time.Now()\n\ttimes := make(chan time.Time, 10)\n\twait := make(chan struct{})\n\tp := toTest(times, wait)\n\n\tfor i := 0; i < 5; i++ {\n\t\tnext := <-times\n\t\ttestBetween(t, interval-grace, next.Sub(last), interval+grace)\n\t\tlast = next\n\n\t\t<-time.After(interval * 2) \/\/ make it wait.\n\t\twait <- struct{}{} \/\/ release it.\n\t}\n\n\tgo p.Close()\n\nend:\n\tselect {\n\tcase wait <- struct{}{}: \/\/ drain any extras.\n\t\tgoto end\n\tcase <-p.Closed():\n\tcase <-time.After(timeout):\n\t\tt.Error(\"proc failed to close\")\n\t}\n}\n\nfunc TestEverySeq(t *testing.T) {\n\ttestSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn Every(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t})\n\t})\n}\n\nfunc TestEverySeqWait(t *testing.T) {\n\ttestSeqWait(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn Every(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\tcase <-proc.Closing():\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc TestEveryGoSeq(t *testing.T) {\n\ttestSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn EveryGo(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t})\n\t})\n}\n\nfunc TestEveryGoSeqParallel(t *testing.T) {\n\ttestParallel(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn EveryGo(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\tcase <-proc.Closing():\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc TestTickSeq(t *testing.T) {\n\ttestSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn Tick(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t})\n\t})\n}\n\nfunc TestTickSeqNoWait(t *testing.T) {\n\ttestSeqNoWait(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn Tick(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\tcase <-proc.Closing():\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc TestTickGoSeq(t *testing.T) {\n\ttestSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn TickGo(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t})\n\t})\n}\n\nfunc TestTickGoSeqParallel(t *testing.T) {\n\ttestParallel(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn TickGo(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\tcase <-proc.Closing():\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc TestTickerSeq(t *testing.T) {\n\ttestSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn Ticker(time.Tick(interval), func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t})\n\t})\n}\n\nfunc TestTickerSeqNoWait(t *testing.T) {\n\ttestSeqNoWait(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn Ticker(time.Tick(interval), func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\tcase <-proc.Closing():\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc TestTickerGoSeq(t *testing.T) {\n\ttestSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn TickerGo(time.Tick(interval), func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t})\n\t})\n}\n\nfunc TestTickerGoParallel(t *testing.T) {\n\ttestParallel(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn TickerGo(time.Tick(interval), func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\tcase <-proc.Closing():\n\t\t\t}\n\t\t})\n\t})\n}\nadded ci timing fixpackage periodicproc\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\tci \"github.com\/jbenet\/go-cienv\"\n\tgp \"github.com\/jbenet\/goprocess\"\n)\n\nvar (\n\tgrace = time.Millisecond * 5\n\tinterval = time.Millisecond * 10\n\ttimeout = time.Second * 5\n)\n\nfunc init() {\n\tif ci.IsRunning() {\n\t\tgrace = time.Millisecond * 500\n\t\tinterval = time.Millisecond * 1000\n\t\ttimeout = time.Second * 15\n\t}\n}\n\nfunc between(min, diff, max time.Duration) bool {\n\treturn min <= diff && diff <= max\n}\n\nfunc testBetween(t *testing.T, min, diff, max time.Duration) {\n\tif !between(min, diff, max) {\n\t\tt.Error(\"time diff incorrect:\", min, diff, max)\n\t}\n}\n\ntype intervalFunc func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process)\n\nfunc testSeq(t *testing.T, toTest intervalFunc) {\n\tt.Parallel()\n\n\tlast := time.Now()\n\ttimes := make(chan time.Time, 10)\n\tp := toTest(times, nil)\n\n\tfor i := 0; i < 5; i++ {\n\t\tnext := <-times\n\t\ttestBetween(t, interval-grace, next.Sub(last), interval+grace)\n\t\tlast = next\n\t}\n\n\tgo p.Close()\n\tselect {\n\tcase <-p.Closed():\n\tcase <-time.After(timeout):\n\t\tt.Error(\"proc failed to close\")\n\t}\n}\n\nfunc testSeqWait(t *testing.T, toTest intervalFunc) {\n\tt.Parallel()\n\n\tlast := time.Now()\n\ttimes := make(chan time.Time, 10)\n\twait := make(chan struct{})\n\tp := toTest(times, wait)\n\n\tfor i := 0; i < 5; i++ {\n\t\tnext := <-times\n\t\ttestBetween(t, interval-grace, next.Sub(last), interval+grace)\n\n\t\t<-time.After(interval * 2) \/\/ make it wait.\n\t\tlast = time.Now() \/\/ make it now (sequential)\n\t\twait <- struct{}{} \/\/ release it.\n\t}\n\n\tgo p.Close()\n\n\tselect {\n\tcase <-p.Closed():\n\tcase <-time.After(timeout):\n\t\tt.Error(\"proc failed to close\")\n\t}\n}\n\nfunc testSeqNoWait(t *testing.T, toTest intervalFunc) {\n\tt.Parallel()\n\n\tlast := time.Now()\n\ttimes := make(chan time.Time, 10)\n\twait := make(chan struct{})\n\tp := toTest(times, wait)\n\n\tfor i := 0; i < 5; i++ {\n\t\tnext := <-times\n\t\ttestBetween(t, 0, next.Sub(last), interval+grace) \/\/ min of 0\n\n\t\t<-time.After(interval * 2) \/\/ make it wait.\n\t\tlast = time.Now() \/\/ make it now (sequential)\n\t\twait <- struct{}{} \/\/ release it.\n\t}\n\n\tgo p.Close()\n\nend:\n\tselect {\n\tcase wait <- struct{}{}: \/\/ drain any extras.\n\t\tgoto end\n\tcase <-p.Closed():\n\tcase <-time.After(timeout):\n\t\tt.Error(\"proc failed to close\")\n\t}\n}\n\nfunc testParallel(t *testing.T, toTest intervalFunc) {\n\tt.Parallel()\n\n\tlast := time.Now()\n\ttimes := make(chan time.Time, 10)\n\twait := make(chan struct{})\n\tp := toTest(times, wait)\n\n\tfor i := 0; i < 5; i++ {\n\t\tnext := <-times\n\t\ttestBetween(t, interval-grace, next.Sub(last), interval+grace)\n\t\tlast = next\n\n\t\t<-time.After(interval * 2) \/\/ make it wait.\n\t\twait <- struct{}{} \/\/ release it.\n\t}\n\n\tgo p.Close()\n\nend:\n\tselect {\n\tcase wait <- struct{}{}: \/\/ drain any extras.\n\t\tgoto end\n\tcase <-p.Closed():\n\tcase <-time.After(timeout):\n\t\tt.Error(\"proc failed to close\")\n\t}\n}\n\nfunc TestEverySeq(t *testing.T) {\n\ttestSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn Every(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t})\n\t})\n}\n\nfunc TestEverySeqWait(t *testing.T) {\n\ttestSeqWait(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn Every(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\tcase <-proc.Closing():\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc TestEveryGoSeq(t *testing.T) {\n\ttestSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn EveryGo(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t})\n\t})\n}\n\nfunc TestEveryGoSeqParallel(t *testing.T) {\n\ttestParallel(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn EveryGo(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\tcase <-proc.Closing():\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc TestTickSeq(t *testing.T) {\n\ttestSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn Tick(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t})\n\t})\n}\n\nfunc TestTickSeqNoWait(t *testing.T) {\n\ttestSeqNoWait(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn Tick(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\tcase <-proc.Closing():\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc TestTickGoSeq(t *testing.T) {\n\ttestSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn TickGo(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t})\n\t})\n}\n\nfunc TestTickGoSeqParallel(t *testing.T) {\n\ttestParallel(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn TickGo(interval, func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\tcase <-proc.Closing():\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc TestTickerSeq(t *testing.T) {\n\ttestSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn Ticker(time.Tick(interval), func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t})\n\t})\n}\n\nfunc TestTickerSeqNoWait(t *testing.T) {\n\ttestSeqNoWait(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn Ticker(time.Tick(interval), func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\tcase <-proc.Closing():\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc TestTickerGoSeq(t *testing.T) {\n\ttestSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn TickerGo(time.Tick(interval), func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t})\n\t})\n}\n\nfunc TestTickerGoParallel(t *testing.T) {\n\ttestParallel(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) {\n\t\treturn TickerGo(time.Tick(interval), func(proc gp.Process) {\n\t\t\ttimes <- time.Now()\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\tcase <-proc.Closing():\n\t\t\t}\n\t\t})\n\t})\n}\n<|endoftext|>"} {"text":"package version\n\n\/\/ Version contains the LXD version number\nvar Version = \"4.22\"\nRelease LXD 4.23package version\n\n\/\/ Version contains the LXD version number\nvar Version = \"4.23\"\n<|endoftext|>"} {"text":"package job\n\nimport (\n\t\"gonzbee\/config\"\n\t\"gonzbee\/nntp\"\n\t\"gonzbee\/nzb\"\n\t\"gonzbee\/yenc\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype job struct {\n\tdir string\n\tn *nzb.Nzb\n}\n\ntype messagejob struct {\n\tgroup string\n\tmsgId string\n\tch chan io.ReadCloser\n}\n\nfunc init() {\n\tgo poolHandler()\n}\n\nvar download = make(chan *messagejob)\nvar downloadMux = make(chan *messagejob)\nvar reaper = make(chan int)\n\nfunc newConnection() error {\n\ts := config.C.Server.GetAddressStr()\n\tvar err error\n\tvar n *nntp.Conn\n\tif config.C.Server.TLS {\n\t\tn, err = nntp.DialTLS(s)\n\t} else {\n\t\tn, err = nntp.Dial(s)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = n.Authenticate(config.C.Server.Username, config.C.Server.Password)\n\tif err != nil {\n\t\tn.Close()\n\t\treturn err\n\t}\n\tlog.Println(\"spun up nntp connection\")\n\tgo func() {\n\t\tdefer n.Close()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase m := <-downloadMux:\n\t\t\t\terr = n.SwitchGroup(m.group)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tb, _ := n.GetMessageReader(m.msgId)\n\t\t\t\tm.ch <- b\n\t\t\tcase <-(after(10 * time.Second)):\n\t\t\t\treaper <- 1\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc after(d time.Duration) <-chan time.Time {\n\tt := time.NewTimer(d)\n\treturn t.C\n}\n\nfunc poolHandler() {\n\tvar number int\n\tfor {\n\t\tselect {\n\t\tcase msg := <-download:\n\t\t\tif number < 10 {\n\t\t\t\terr := newConnection()\n\t\t\t\tif err == nil {\n\t\t\t\t\tnumber++\n\t\t\t\t}\n\t\t\t}\n\t\t\tdownloadMux <- msg\n\t\tcase <-reaper:\n\t\t\tnumber--\n\t\t}\n\t}\n}\n\nfunc (j *job) handle() {\n\twg := new(sync.WaitGroup)\n\tfor _, f := range j.n.File {\n\t\tch := make(chan io.ReadCloser)\n\t\tgo func(ret chan io.ReadCloser) {\n\t\t\twg.Add(1)\n\t\t\tm := <-ret\n\t\t\tpart, _ := yenc.NewPart(m)\n\t\t\tfile, _ := os.Create(filepath.Join(j.dir, part.Name))\n\t\t\tpartsLeft := part.Parts\n\t\t\tfile.Seek(part.Begin, os.SEEK_SET)\n\t\t\tpart.Decode(file)\n\t\t\tm.Close()\n\t\t\tpartsLeft--\n\t\t\tfor partsLeft > 0 {\n\t\t\t\tm = <-ret\n\t\t\t\tpart, _ := yenc.NewPart(m)\n\t\t\t\tfile.Seek(part.Begin, os.SEEK_SET)\n\t\t\t\tpart.Decode(file)\n\t\t\t\tm.Close()\n\t\t\t\tpartsLeft--\n\t\t\t}\n\t\t\tfile.Close()\n\t\t\twg.Done()\n\t\t}(ch)\n\t\tfor _, seg := range f.Segments {\n\t\t\tmsg := &messagejob{\n\t\t\t\tmsgId: seg.MsgId,\n\t\t\t\tgroup: f.Groups[0],\n\t\t\t\tch: ch,\n\t\t\t}\n\t\t\tdownload <- msg\n\t\t}\n\t}\n\twg.Wait()\n}\n\nfunc Start(n *nzb.Nzb, name string) {\n\tincDir := config.C.GetIncompleteDir()\n\tworkDir := filepath.Join(incDir, name)\n\tos.Mkdir(workDir, 0777)\n\tj := &job{\n\t\tdir: workDir,\n\t\tn: n,\n\t}\n\tj.handle()\n}\nRemove possible race condition in jobpackage job\n\nimport (\n\t\"gonzbee\/config\"\n\t\"gonzbee\/nntp\"\n\t\"gonzbee\/nzb\"\n\t\"gonzbee\/yenc\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype job struct {\n\tdir string\n\tn *nzb.Nzb\n}\n\ntype messagejob struct {\n\tgroup string\n\tmsgId string\n\tch chan io.ReadCloser\n}\n\nfunc init() {\n\tgo poolHandler()\n}\n\nvar download = make(chan *messagejob)\nvar downloadMux = make(chan *messagejob)\nvar reaper = make(chan int)\n\nfunc newConnection() error {\n\ts := config.C.Server.GetAddressStr()\n\tvar err error\n\tvar n *nntp.Conn\n\tif config.C.Server.TLS {\n\t\tn, err = nntp.DialTLS(s)\n\t} else {\n\t\tn, err = nntp.Dial(s)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = n.Authenticate(config.C.Server.Username, config.C.Server.Password)\n\tif err != nil {\n\t\tn.Close()\n\t\treturn err\n\t}\n\tlog.Println(\"spun up nntp connection\")\n\tgo func() {\n\t\tdefer n.Close()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase m := <-downloadMux:\n\t\t\t\terr = n.SwitchGroup(m.group)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tb, _ := n.GetMessageReader(m.msgId)\n\t\t\t\tm.ch <- b\n\t\t\tcase <-(after(10 * time.Second)):\n\t\t\t\treaper <- 1\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc after(d time.Duration) <-chan time.Time {\n\tt := time.NewTimer(d)\n\treturn t.C\n}\n\nfunc poolHandler() {\n\tvar number int\n\tfor {\n\t\tselect {\n\t\tcase msg := <-download:\n\t\t\tif number < 10 {\n\t\t\t\terr := newConnection()\n\t\t\t\tif err == nil {\n\t\t\t\t\tnumber++\n\t\t\t\t}\n\t\t\t}\n\t\t\tdownloadMux <- msg\n\t\tcase <-reaper:\n\t\t\tnumber--\n\t\t}\n\t}\n}\n\nfunc (j *job) handle() {\n\twg := new(sync.WaitGroup)\n\tfor _, f := range j.n.File {\n\t\tch := make(chan io.ReadCloser)\n\t\twg.Add(1)\n\t\tgo func(ret chan io.ReadCloser) {\n\t\t\tm := <-ret\n\t\t\tpart, _ := yenc.NewPart(m)\n\t\t\tfile, _ := os.Create(filepath.Join(j.dir, part.Name))\n\t\t\tpartsLeft := part.Parts\n\t\t\tfile.Seek(part.Begin, os.SEEK_SET)\n\t\t\tpart.Decode(file)\n\t\t\tm.Close()\n\t\t\tpartsLeft--\n\t\t\tfor partsLeft > 0 {\n\t\t\t\tm = <-ret\n\t\t\t\tpart, _ := yenc.NewPart(m)\n\t\t\t\tfile.Seek(part.Begin, os.SEEK_SET)\n\t\t\t\tpart.Decode(file)\n\t\t\t\tm.Close()\n\t\t\t\tpartsLeft--\n\t\t\t}\n\t\t\tfile.Close()\n\t\t\twg.Done()\n\t\t}(ch)\n\t\tfor _, seg := range f.Segments {\n\t\t\tmsg := &messagejob{\n\t\t\t\tmsgId: seg.MsgId,\n\t\t\t\tgroup: f.Groups[0],\n\t\t\t\tch: ch,\n\t\t\t}\n\t\t\tdownload <- msg\n\t\t}\n\t}\n\twg.Wait()\n}\n\nfunc Start(n *nzb.Nzb, name string) {\n\tincDir := config.C.GetIncompleteDir()\n\tworkDir := filepath.Join(incDir, name)\n\tos.Mkdir(workDir, 0777)\n\tj := &job{\n\t\tdir: workDir,\n\t\tn: n,\n\t}\n\tj.handle()\n}\n<|endoftext|>"} {"text":"package graphql\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"strconv\"\n\n\t\"github.com\/sprucehealth\/graphql\/language\/ast\"\n)\n\nfunc coerceInt(value interface{}) interface{} {\n\tswitch value := value.(type) {\n\tcase bool:\n\t\tif value {\n\t\t\treturn 1\n\t\t}\n\t\treturn 0\n\tcase int:\n\t\treturn value\n\tcase int8:\n\t\treturn int(value)\n\tcase int16:\n\t\treturn int(value)\n\tcase int32:\n\t\treturn int(value)\n\tcase int64:\n\t\tif value < int64(math.MinInt32) || value > int64(math.MaxInt32) {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase uint:\n\t\treturn int(value)\n\tcase uint8:\n\t\treturn int(value)\n\tcase uint16:\n\t\treturn int(value)\n\tcase uint32:\n\t\tif value > uint32(math.MaxInt32) {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase uint64:\n\t\tif value > uint64(math.MaxInt32) {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase float32:\n\t\tif value < float32(math.MinInt32) || value > float32(math.MaxInt32) {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase float64:\n\t\tif value < float64(math.MinInt64) || value > float64(math.MaxInt64) {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase string:\n\t\tval, err := strconv.ParseFloat(value, 0)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn coerceInt(val)\n\t}\n\n\t\/\/ If the value cannot be transformed into an int, return nil instead of '0'\n\t\/\/ to denote 'no integer found'\n\treturn nil\n}\n\n\/\/ Int is the GraphQL Integer type definition.\nvar Int *Scalar = NewScalar(ScalarConfig{\n\tName: \"Int\",\n\tDescription: \"The `Int` scalar type represents non-fractional signed whole numeric \" +\n\t\t\"values. Int can represent values between -(2^53 - 1) and 2^53 - 1 since \" +\n\t\t\"represented in JSON as double-precision floating point numbers specified\" +\n\t\t\"by [IEEE 754](http:\/\/en.wikipedia.org\/wiki\/IEEE_floating_point).\",\n\tSerialize: coerceInt,\n\tParseValue: coerceInt,\n\tParseLiteral: func(valueAST ast.Value) interface{} {\n\t\tswitch valueAST := valueAST.(type) {\n\t\tcase *ast.IntValue:\n\t\t\tif intValue, err := strconv.Atoi(valueAST.Value); err == nil {\n\t\t\t\treturn intValue\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t},\n})\n\nfunc coerceFloat64(value interface{}) interface{} {\n\tswitch value := value.(type) {\n\tcase bool:\n\t\tif value {\n\t\t\treturn float64(1)\n\t\t}\n\t\treturn float64(0)\n\tcase int:\n\t\treturn float64(value)\n\tcase float32:\n\t\treturn float64(value)\n\tcase float64:\n\t\treturn value\n\tcase string:\n\t\tval, err := strconv.ParseFloat(value, 64)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn val\n\t}\n\treturn float64(0)\n}\n\n\/\/ Float is the GraphQL float type definition.\nvar Float *Scalar = NewScalar(ScalarConfig{\n\tName: \"Float\",\n\tDescription: \"The `Float` scalar type represents signed double-precision fractional \" +\n\t\t\"values as specified by \" +\n\t\t\"[IEEE 754](http:\/\/en.wikipedia.org\/wiki\/IEEE_floating_point). \",\n\tSerialize: coerceFloat64,\n\tParseValue: coerceFloat64,\n\tParseLiteral: func(valueAST ast.Value) interface{} {\n\t\tswitch valueAST := valueAST.(type) {\n\t\tcase *ast.FloatValue:\n\t\t\tif floatValue, err := strconv.ParseFloat(valueAST.Value, 64); err == nil {\n\t\t\t\treturn floatValue\n\t\t\t}\n\t\tcase *ast.IntValue:\n\t\t\tif floatValue, err := strconv.ParseFloat(valueAST.Value, 64); err == nil {\n\t\t\t\treturn floatValue\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t},\n})\n\nfunc coerceString(value interface{}) interface{} {\n\treturn fmt.Sprintf(\"%v\", value)\n}\n\n\/\/ String is the GraphQL string type definition\nvar String *Scalar = NewScalar(ScalarConfig{\n\tName: \"String\",\n\tDescription: \"The `String` scalar type represents textual data, represented as UTF-8 \" +\n\t\t\"character sequences. The String type is most often used by GraphQL to \" +\n\t\t\"represent free-form human-readable text.\",\n\tSerialize: coerceString,\n\tParseValue: coerceString,\n\tParseLiteral: func(valueAST ast.Value) interface{} {\n\t\tswitch valueAST := valueAST.(type) {\n\t\tcase *ast.StringValue:\n\t\t\treturn valueAST.Value\n\t\t}\n\t\treturn nil\n\t},\n})\n\nfunc coerceBool(value interface{}) interface{} {\n\tswitch value := value.(type) {\n\tcase bool:\n\t\treturn value\n\tcase string:\n\t\tswitch value {\n\t\tcase \"\", \"false\":\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\tcase float64:\n\t\tif value != 0 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase float32:\n\t\tif value != 0 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase int:\n\t\tif value != 0 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\treturn false\n}\n\n\/\/ Boolean is the GraphQL boolean type definition\nvar Boolean *Scalar = NewScalar(ScalarConfig{\n\tName: \"Boolean\",\n\tDescription: \"The `Boolean` scalar type represents `true` or `false`.\",\n\tSerialize: coerceBool,\n\tParseValue: coerceBool,\n\tParseLiteral: func(valueAST ast.Value) interface{} {\n\t\tswitch valueAST := valueAST.(type) {\n\t\tcase *ast.BooleanValue:\n\t\t\treturn valueAST.Value\n\t\t}\n\t\treturn nil\n\t},\n})\n\n\/\/ ID is the GraphQL id type definition\nvar ID *Scalar = NewScalar(ScalarConfig{\n\tName: \"ID\",\n\tDescription: \"The `ID` scalar type represents a unique identifier, often used to \" +\n\t\t\"refetch an object or as key for a cache. The ID type appears in a JSON \" +\n\t\t\"response as a String; however, it is not intended to be human-readable. \" +\n\t\t\"When expected as an input type, any string (such as `\\\"4\\\"`) or integer \" +\n\t\t\"(such as `4`) input value will be accepted as an ID.\",\n\tSerialize: coerceString,\n\tParseValue: coerceString,\n\tParseLiteral: func(valueAST ast.Value) interface{} {\n\t\tswitch valueAST := valueAST.(type) {\n\t\tcase *ast.IntValue:\n\t\t\treturn valueAST.Value\n\t\tcase *ast.StringValue:\n\t\t\treturn valueAST.Value\n\t\t}\n\t\treturn nil\n\t},\n})\nUpdate Int type description to match implementationpackage graphql\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"strconv\"\n\n\t\"github.com\/sprucehealth\/graphql\/language\/ast\"\n)\n\nfunc coerceInt(value interface{}) interface{} {\n\tswitch value := value.(type) {\n\tcase bool:\n\t\tif value {\n\t\t\treturn 1\n\t\t}\n\t\treturn 0\n\tcase int:\n\t\treturn value\n\tcase int8:\n\t\treturn int(value)\n\tcase int16:\n\t\treturn int(value)\n\tcase int32:\n\t\treturn int(value)\n\tcase int64:\n\t\tif value < int64(math.MinInt32) || value > int64(math.MaxInt32) {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase uint:\n\t\treturn int(value)\n\tcase uint8:\n\t\treturn int(value)\n\tcase uint16:\n\t\treturn int(value)\n\tcase uint32:\n\t\tif value > uint32(math.MaxInt32) {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase uint64:\n\t\tif value > uint64(math.MaxInt32) {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase float32:\n\t\tif value < float32(math.MinInt32) || value > float32(math.MaxInt32) {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase float64:\n\t\tif value < float64(math.MinInt64) || value > float64(math.MaxInt64) {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase string:\n\t\tval, err := strconv.ParseFloat(value, 0)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn coerceInt(val)\n\t}\n\n\t\/\/ If the value cannot be transformed into an int, return nil instead of '0'\n\t\/\/ to denote 'no integer found'\n\treturn nil\n}\n\n\/\/ Int is the GraphQL Integer type definition.\nvar Int *Scalar = NewScalar(ScalarConfig{\n\tName: \"Int\",\n\tDescription: \"The `Int` scalar type represents non-fractional signed whole numeric \" +\n\t\t\"values. Int can represent values between -(2^31) and 2^31 - 1. \",\n\tSerialize: coerceInt,\n\tParseValue: coerceInt,\n\tParseLiteral: func(valueAST ast.Value) interface{} {\n\t\tswitch valueAST := valueAST.(type) {\n\t\tcase *ast.IntValue:\n\t\t\tif intValue, err := strconv.Atoi(valueAST.Value); err == nil {\n\t\t\t\treturn intValue\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t},\n})\n\nfunc coerceFloat64(value interface{}) interface{} {\n\tswitch value := value.(type) {\n\tcase bool:\n\t\tif value {\n\t\t\treturn float64(1)\n\t\t}\n\t\treturn float64(0)\n\tcase int:\n\t\treturn float64(value)\n\tcase float32:\n\t\treturn float64(value)\n\tcase float64:\n\t\treturn value\n\tcase string:\n\t\tval, err := strconv.ParseFloat(value, 64)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn val\n\t}\n\treturn float64(0)\n}\n\n\/\/ Float is the GraphQL float type definition.\nvar Float *Scalar = NewScalar(ScalarConfig{\n\tName: \"Float\",\n\tDescription: \"The `Float` scalar type represents signed double-precision fractional \" +\n\t\t\"values as specified by \" +\n\t\t\"[IEEE 754](http:\/\/en.wikipedia.org\/wiki\/IEEE_floating_point). \",\n\tSerialize: coerceFloat64,\n\tParseValue: coerceFloat64,\n\tParseLiteral: func(valueAST ast.Value) interface{} {\n\t\tswitch valueAST := valueAST.(type) {\n\t\tcase *ast.FloatValue:\n\t\t\tif floatValue, err := strconv.ParseFloat(valueAST.Value, 64); err == nil {\n\t\t\t\treturn floatValue\n\t\t\t}\n\t\tcase *ast.IntValue:\n\t\t\tif floatValue, err := strconv.ParseFloat(valueAST.Value, 64); err == nil {\n\t\t\t\treturn floatValue\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t},\n})\n\nfunc coerceString(value interface{}) interface{} {\n\treturn fmt.Sprintf(\"%v\", value)\n}\n\n\/\/ String is the GraphQL string type definition\nvar String *Scalar = NewScalar(ScalarConfig{\n\tName: \"String\",\n\tDescription: \"The `String` scalar type represents textual data, represented as UTF-8 \" +\n\t\t\"character sequences. The String type is most often used by GraphQL to \" +\n\t\t\"represent free-form human-readable text.\",\n\tSerialize: coerceString,\n\tParseValue: coerceString,\n\tParseLiteral: func(valueAST ast.Value) interface{} {\n\t\tswitch valueAST := valueAST.(type) {\n\t\tcase *ast.StringValue:\n\t\t\treturn valueAST.Value\n\t\t}\n\t\treturn nil\n\t},\n})\n\nfunc coerceBool(value interface{}) interface{} {\n\tswitch value := value.(type) {\n\tcase bool:\n\t\treturn value\n\tcase string:\n\t\tswitch value {\n\t\tcase \"\", \"false\":\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\tcase float64:\n\t\tif value != 0 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase float32:\n\t\tif value != 0 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase int:\n\t\tif value != 0 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\treturn false\n}\n\n\/\/ Boolean is the GraphQL boolean type definition\nvar Boolean *Scalar = NewScalar(ScalarConfig{\n\tName: \"Boolean\",\n\tDescription: \"The `Boolean` scalar type represents `true` or `false`.\",\n\tSerialize: coerceBool,\n\tParseValue: coerceBool,\n\tParseLiteral: func(valueAST ast.Value) interface{} {\n\t\tswitch valueAST := valueAST.(type) {\n\t\tcase *ast.BooleanValue:\n\t\t\treturn valueAST.Value\n\t\t}\n\t\treturn nil\n\t},\n})\n\n\/\/ ID is the GraphQL id type definition\nvar ID *Scalar = NewScalar(ScalarConfig{\n\tName: \"ID\",\n\tDescription: \"The `ID` scalar type represents a unique identifier, often used to \" +\n\t\t\"refetch an object or as key for a cache. The ID type appears in a JSON \" +\n\t\t\"response as a String; however, it is not intended to be human-readable. \" +\n\t\t\"When expected as an input type, any string (such as `\\\"4\\\"`) or integer \" +\n\t\t\"(such as `4`) input value will be accepted as an ID.\",\n\tSerialize: coerceString,\n\tParseValue: coerceString,\n\tParseLiteral: func(valueAST ast.Value) interface{} {\n\t\tswitch valueAST := valueAST.(type) {\n\t\tcase *ast.IntValue:\n\t\t\treturn valueAST.Value\n\t\tcase *ast.StringValue:\n\t\t\treturn valueAST.Value\n\t\t}\n\t\treturn nil\n\t},\n})\n<|endoftext|>"} {"text":"\/\/ The quantile package implements the algorithm in the paper Effective\n\/\/ Computation of Biased Quantiles over Data Streams with both invariants.\n\/\/\n\/\/ This package is useful for calculating high-biased and targeted quantiles\n\/\/ for large datasets within low memory and CPU bounds. You trade a small\n\/\/ amount of accuracy in rank selection for efficiency.\n\/\/\n\/\/ Multiple Stream's can be merged before a Query, allowing clients to be\n\/\/ distributed across threads. See Stream.Merge and Stream.Samples.\n\/\/\n\/\/ For more detailed information about the algorithm, see:\n\/\/ http:\/\/www.cs.rutgers.edu\/~muthu\/bquant.pdf\npackage quantile\n\nimport (\n\t\"container\/list\"\n\t\"math\"\n\t\"sort\"\n)\n\n\/\/ Sample holds an observed value and meta information for compression. JSON\n\/\/ tags have been added for convenience.\ntype Sample struct {\n\tValue float64 `json:\",string\"`\n\tWidth float64 `json:\",string\"`\n\tDelta float64 `json:\",string\"`\n}\n\n\/\/ Samples represents a slice of samples. It implements sort.Interface.\ntype Samples []Sample\n\nfunc (a Samples) Len() int {\n\treturn len(a)\n}\n\nfunc (a Samples) Less(i, j int) bool {\n\treturn a[i].Value < a[j].Value\n}\n\nfunc (a Samples) Swap(i, j int) {\n\ta[i], a[j] = a[j], a[i]\n}\n\ntype invariant func(s *stream, r float64) float64\n\n\/\/ NewBiased returns an initialized Stream for high-biased quantiles (e.g.\n\/\/ 50th, 90th, 99th) not known a priori with finer error guarantees for the\n\/\/ higher ranks of the data distribution.\n\/\/ See http:\/\/www.cs.rutgers.edu\/~muthu\/bquant.pdf for time, space, and error properties.\nfunc NewBiased() *Stream {\n\tf := func(s *stream, r float64) float64 {\n\t\treturn 2 * s.epsilon * r\n\t}\n\treturn newStream(0.01, f)\n}\n\n\/\/ NewTargeted returns an initialized Stream concerned with a particular set of\n\/\/ quantile values that are supplied a priori. Knowing these a priori reduces\n\/\/ space and computation time.\n\/\/ See http:\/\/www.cs.rutgers.edu\/~muthu\/bquant.pdf for time, space, and error properties.\nfunc NewTargeted(quantiles ...float64) *Stream {\n\tf := func(s *stream, r float64) float64 {\n\t\tvar m float64 = math.MaxFloat64\n\t\tvar f float64\n\t\tfor _, q := range quantiles {\n\t\t\tif q*s.n <= r {\n\t\t\t\tf = (2 * s.epsilon * r) \/ q\n\t\t\t} else {\n\t\t\t\tf = (2 * s.epsilon * (s.n - r)) \/ (1 - q)\n\t\t\t}\n\t\t\tm = math.Min(m, f)\n\t\t}\n\t\treturn m\n\t}\n\treturn newStream(0.01, f)\n}\n\n\/\/ Stream calculates quantiles for a stream of float64s.\ntype Stream struct {\n\t*stream\n\tb Samples\n}\n\nfunc newStream(epsilon float64, ƒ invariant) *Stream {\n\tx := &stream{epsilon: epsilon, ƒ: ƒ, l: list.New()}\n\treturn &Stream{x, make(Samples, 0, 500)}\n}\n\n\/\/ Insert inserts v into the stream.\nfunc (s *Stream) Insert(v float64) {\n\ts.insert(Sample{Value: v, Width: 1})\n}\n\nfunc (s *Stream) insert(sample Sample) {\n\ts.b = append(s.b, sample)\n\tif len(s.b) == cap(s.b) {\n\t\ts.flush()\n\t\ts.compress()\n\t}\n}\n\n\/\/ Query returns the calculated qth percentiles value. If s was created with\n\/\/ NewTargeted, and q is not in the set of quantiles provided a priori, Query\n\/\/ will return an unspecified result.\nfunc (s *Stream) Query(q float64) float64 {\n\tif s.flushed() {\n\t\t\/\/ Fast path when there hasn't been enough data for a flush;\n\t\t\/\/ this also yeilds better accuracy for small sets of data.\n\t\ti := float64(len(s.b)) * q\n\t\treturn s.b[int(i)].Value\n\t}\n\ts.flush()\n\treturn s.stream.query(q)\n}\n\n\/\/ Merge merges samples into the underlying streams samples. This is handy when\n\/\/ merging multiple streams from separate threads, database shards, etc.\nfunc (s *Stream) Merge(samples Samples) {\n\ts.stream.merge(samples)\n}\n\n\/\/ Reset reinitializes and clears the list reusing the samples buffer memory.\nfunc (s *Stream) Reset() {\n\ts.stream.reset()\n\ts.b = s.b[:0]\n}\n\n\/\/ Samples returns stream samples held by s.\nfunc (s *Stream) Samples() Samples {\n\tif !s.flushed() {\n\t\treturn s.b\n\t}\n\treturn s.stream.samples()\n}\n\nfunc (s *Stream) flush() {\n\tsort.Sort(s.b)\n\ts.stream.merge(s.b)\n\ts.b = s.b[:0]\n}\n\nfunc (s *Stream) flushed() bool {\n\treturn s.stream.l.Len() == 0\n}\n\ntype stream struct {\n\tepsilon float64\n\tn float64\n\tl *list.List\n\tƒ invariant\n}\n\n\/\/ SetEpsilon sets the error epsilon for the Stream. The default epsilon is\n\/\/ 0.01 and is usually satisfactory.\n\/\/ To learn more, see: http:\/\/www.cs.rutgers.edu\/~muthu\/bquant.pdf\nfunc (s *stream) SetEpsilon(epsilon float64) {\n\ts.epsilon = epsilon\n}\n\nfunc (s *stream) reset() {\n\ts.l.Init()\n\ts.n = 0\n}\n\nfunc (s *stream) insert(v float64) {\n\tfn := s.mergeFunc()\n\tfn(v, 1)\n}\n\nfunc (s *stream) merge(samples Samples) {\n\tfn := s.mergeFunc()\n\tfor _, s := range samples {\n\t\tfn(s.Value, s.Width)\n\t}\n}\n\nfunc (s *stream) mergeFunc() func(v, w float64) {\n\t\/\/ NOTE: I used a goto over defer because it bought me a few extra\n\t\/\/ nanoseconds. I know. I know.\n\tvar r float64\n\te := s.l.Front()\n\treturn func(v, w float64) {\n\t\tfor ; e != nil; e = e.Next() {\n\t\t\tc := e.Value.(*Sample)\n\t\t\tif c.Value > v {\n\t\t\t\tsm := &Sample{v, w, math.Floor(s.ƒ(s, r)) - 1}\n\t\t\t\ts.l.InsertBefore(sm, e)\n\t\t\t\tgoto inserted\n\t\t\t}\n\t\t\tr += c.Width\n\t\t}\n\t\ts.l.PushBack(&Sample{v, w, 0})\n\tinserted:\n\t\ts.n += w\n\t}\n}\n\n\/\/ Count returns the total number of samples observed in the stream\n\/\/ since initialization.\nfunc (s *stream) Count() int {\n\treturn int(s.n)\n}\n\nfunc (s *stream) query(q float64) float64 {\n\te := s.l.Front()\n\tt := math.Ceil(q * s.n)\n\tt += math.Ceil(s.ƒ(s, t) \/ 2)\n\tp := e.Value.(*Sample)\n\te = e.Next()\n\tr := float64(0)\n\tfor e != nil {\n\t\tc := e.Value.(*Sample)\n\t\tif r+c.Width+c.Delta > t {\n\t\t\treturn p.Value\n\t\t}\n\t\tr += p.Width\n\t\tp = c\n\t\te = e.Next()\n\t}\n\treturn p.Value\n}\n\nfunc (s *stream) compress() {\n\tif s.l.Len() < 2 {\n\t\treturn\n\t}\n\te := s.l.Back()\n\tx := e.Value.(*Sample)\n\tr := s.n - 1 - x.Width\n\te = e.Prev()\n\tfor e != nil {\n\t\tc := e.Value.(*Sample)\n\t\tif c.Width+x.Width+x.Delta <= s.ƒ(s, r) {\n\t\t\tx.Width += c.Width\n\t\t\to := e\n\t\t\te = e.Prev()\n\t\t\ts.l.Remove(o)\n\t\t} else {\n\t\t\tx = c\n\t\t\te = e.Prev()\n\t\t}\n\t\tr -= c.Width\n\t}\n}\n\nfunc (s *stream) samples() Samples {\n\tsamples := make(Samples, 0, s.l.Len())\n\tfor e := s.l.Front(); e != nil; e = e.Next() {\n\t\tsamples = append(samples, *e.Value.(*Sample))\n\t}\n\treturn samples\n}\nbetter words\/\/ Package quantile computes approximate quantiles over an unbounded data\n\/\/ stream within low memory and CPU bounds.\n\/\/\n\/\/ A small amount of accuracy is traded to achieve the above properties.\n\/\/\n\/\/ Multiple streams can be merged before calling Query to generate a single set\n\/\/ of results. This is meaningful when the streams represent the same type of\n\/\/ data. See Merge and Samples.\n\/\/\n\/\/ For more detailed information about the algorithm, see:\n\/\/ http:\/\/www.cs.rutgers.edu\/~muthu\/bquant.pdf\n\/\/ The quantile package implements the algorithm in the paper Effective\n\/\/ Computation of Biased Quantiles over Data Streams with both invariants.\npackage quantile\n\nimport (\n\t\"container\/list\"\n\t\"math\"\n\t\"sort\"\n)\n\n\/\/ Sample holds an observed value and meta information for compression. JSON\n\/\/ tags have been added for convenience.\ntype Sample struct {\n\tValue float64 `json:\",string\"`\n\tWidth float64 `json:\",string\"`\n\tDelta float64 `json:\",string\"`\n}\n\n\/\/ Samples represents a slice of samples. It implements sort.Interface.\ntype Samples []Sample\n\nfunc (a Samples) Len() int {\n\treturn len(a)\n}\n\nfunc (a Samples) Less(i, j int) bool {\n\treturn a[i].Value < a[j].Value\n}\n\nfunc (a Samples) Swap(i, j int) {\n\ta[i], a[j] = a[j], a[i]\n}\n\ntype invariant func(s *stream, r float64) float64\n\n\/\/ NewBiased returns an initialized Stream for high-biased quantiles (e.g.\n\/\/ 50th, 90th, 99th) not known a priori with finer error guarantees for the\n\/\/ higher ranks of the data distribution.\n\/\/ See http:\/\/www.cs.rutgers.edu\/~muthu\/bquant.pdf for time, space, and error properties.\nfunc NewBiased() *Stream {\n\tf := func(s *stream, r float64) float64 {\n\t\treturn 2 * s.epsilon * r\n\t}\n\treturn newStream(0.01, f)\n}\n\n\/\/ NewTargeted returns an initialized Stream concerned with a particular set of\n\/\/ quantile values that are supplied a priori. Knowing these a priori reduces\n\/\/ space and computation time.\n\/\/ See http:\/\/www.cs.rutgers.edu\/~muthu\/bquant.pdf for time, space, and error properties.\nfunc NewTargeted(quantiles ...float64) *Stream {\n\tf := func(s *stream, r float64) float64 {\n\t\tvar m float64 = math.MaxFloat64\n\t\tvar f float64\n\t\tfor _, q := range quantiles {\n\t\t\tif q*s.n <= r {\n\t\t\t\tf = (2 * s.epsilon * r) \/ q\n\t\t\t} else {\n\t\t\t\tf = (2 * s.epsilon * (s.n - r)) \/ (1 - q)\n\t\t\t}\n\t\t\tm = math.Min(m, f)\n\t\t}\n\t\treturn m\n\t}\n\treturn newStream(0.01, f)\n}\n\n\/\/ Stream calculates quantiles for a stream of float64s.\ntype Stream struct {\n\t*stream\n\tb Samples\n}\n\nfunc newStream(epsilon float64, ƒ invariant) *Stream {\n\tx := &stream{epsilon: epsilon, ƒ: ƒ, l: list.New()}\n\treturn &Stream{x, make(Samples, 0, 500)}\n}\n\n\/\/ Insert inserts v into the stream.\nfunc (s *Stream) Insert(v float64) {\n\ts.insert(Sample{Value: v, Width: 1})\n}\n\nfunc (s *Stream) insert(sample Sample) {\n\ts.b = append(s.b, sample)\n\tif len(s.b) == cap(s.b) {\n\t\ts.flush()\n\t\ts.compress()\n\t}\n}\n\n\/\/ Query returns the calculated qth percentiles value. If s was created with\n\/\/ NewTargeted, and q is not in the set of quantiles provided a priori, Query\n\/\/ will return an unspecified result.\nfunc (s *Stream) Query(q float64) float64 {\n\tif s.flushed() {\n\t\t\/\/ Fast path when there hasn't been enough data for a flush;\n\t\t\/\/ this also yeilds better accuracy for small sets of data.\n\t\ti := float64(len(s.b)) * q\n\t\treturn s.b[int(i)].Value\n\t}\n\ts.flush()\n\treturn s.stream.query(q)\n}\n\n\/\/ Merge merges samples into the underlying streams samples. This is handy when\n\/\/ merging multiple streams from separate threads, database shards, etc.\nfunc (s *Stream) Merge(samples Samples) {\n\ts.stream.merge(samples)\n}\n\n\/\/ Reset reinitializes and clears the list reusing the samples buffer memory.\nfunc (s *Stream) Reset() {\n\ts.stream.reset()\n\ts.b = s.b[:0]\n}\n\n\/\/ Samples returns stream samples held by s.\nfunc (s *Stream) Samples() Samples {\n\tif !s.flushed() {\n\t\treturn s.b\n\t}\n\treturn s.stream.samples()\n}\n\nfunc (s *Stream) flush() {\n\tsort.Sort(s.b)\n\ts.stream.merge(s.b)\n\ts.b = s.b[:0]\n}\n\nfunc (s *Stream) flushed() bool {\n\treturn s.stream.l.Len() == 0\n}\n\ntype stream struct {\n\tepsilon float64\n\tn float64\n\tl *list.List\n\tƒ invariant\n}\n\n\/\/ SetEpsilon sets the error epsilon for the Stream. The default epsilon is\n\/\/ 0.01 and is usually satisfactory.\n\/\/ To learn more, see: http:\/\/www.cs.rutgers.edu\/~muthu\/bquant.pdf\nfunc (s *stream) SetEpsilon(epsilon float64) {\n\ts.epsilon = epsilon\n}\n\nfunc (s *stream) reset() {\n\ts.l.Init()\n\ts.n = 0\n}\n\nfunc (s *stream) insert(v float64) {\n\tfn := s.mergeFunc()\n\tfn(v, 1)\n}\n\nfunc (s *stream) merge(samples Samples) {\n\tfn := s.mergeFunc()\n\tfor _, s := range samples {\n\t\tfn(s.Value, s.Width)\n\t}\n}\n\nfunc (s *stream) mergeFunc() func(v, w float64) {\n\t\/\/ NOTE: I used a goto over defer because it bought me a few extra\n\t\/\/ nanoseconds. I know. I know.\n\tvar r float64\n\te := s.l.Front()\n\treturn func(v, w float64) {\n\t\tfor ; e != nil; e = e.Next() {\n\t\t\tc := e.Value.(*Sample)\n\t\t\tif c.Value > v {\n\t\t\t\tsm := &Sample{v, w, math.Floor(s.ƒ(s, r)) - 1}\n\t\t\t\ts.l.InsertBefore(sm, e)\n\t\t\t\tgoto inserted\n\t\t\t}\n\t\t\tr += c.Width\n\t\t}\n\t\ts.l.PushBack(&Sample{v, w, 0})\n\tinserted:\n\t\ts.n += w\n\t}\n}\n\n\/\/ Count returns the total number of samples observed in the stream\n\/\/ since initialization.\nfunc (s *stream) Count() int {\n\treturn int(s.n)\n}\n\nfunc (s *stream) query(q float64) float64 {\n\te := s.l.Front()\n\tt := math.Ceil(q * s.n)\n\tt += math.Ceil(s.ƒ(s, t) \/ 2)\n\tp := e.Value.(*Sample)\n\te = e.Next()\n\tr := float64(0)\n\tfor e != nil {\n\t\tc := e.Value.(*Sample)\n\t\tif r+c.Width+c.Delta > t {\n\t\t\treturn p.Value\n\t\t}\n\t\tr += p.Width\n\t\tp = c\n\t\te = e.Next()\n\t}\n\treturn p.Value\n}\n\nfunc (s *stream) compress() {\n\tif s.l.Len() < 2 {\n\t\treturn\n\t}\n\te := s.l.Back()\n\tx := e.Value.(*Sample)\n\tr := s.n - 1 - x.Width\n\te = e.Prev()\n\tfor e != nil {\n\t\tc := e.Value.(*Sample)\n\t\tif c.Width+x.Width+x.Delta <= s.ƒ(s, r) {\n\t\t\tx.Width += c.Width\n\t\t\to := e\n\t\t\te = e.Prev()\n\t\t\ts.l.Remove(o)\n\t\t} else {\n\t\t\tx = c\n\t\t\te = e.Prev()\n\t\t}\n\t\tr -= c.Width\n\t}\n}\n\nfunc (s *stream) samples() Samples {\n\tsamples := make(Samples, 0, s.l.Len())\n\tfor e := s.l.Front(); e != nil; e = e.Next() {\n\t\tsamples = append(samples, *e.Value.(*Sample))\n\t}\n\treturn samples\n}\n<|endoftext|>"} {"text":"package main\n\n\/\/ copy from go scanner\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n)\n\ntype Scanner struct {\n\tsrc []byte \/\/ source\n\n\t\/\/ scanning state\n\tch rune \/\/ current character\n\toffset int \/\/ character offset\n\trdOffset int \/\/ reading offset (position after current character)\n\n\tline int \/\/ current line\n\n\terr error\n\n\t\/\/ items for one command\n\titems []interface{}\n\t\/\/ handle array type\n\tarrayItems [][]interface{}\n}\n\nconst bom = 0xFEFF \/\/ byte order mark, only permitted as very first character\n\n\/\/ Read the next Unicode char into s.ch.\n\/\/ s.ch < 0 means end-of-file.\n\/\/\nfunc (s *Scanner) next() {\n\tif s.rdOffset < len(s.src) {\n\t\ts.offset = s.rdOffset\n\t\tif s.ch == '\\n' {\n\t\t\ts.line++\n\t\t}\n\t\tr, w := rune(s.src[s.rdOffset]), 1\n\t\tswitch {\n\t\tcase r == 0:\n\t\t\ts.error(s.offset, \"illegal character NUL\")\n\t\tcase r >= 0x80:\n\t\t\t\/\/ not ASCII\n\t\t\tr, w = utf8.DecodeRune(s.src[s.rdOffset:])\n\t\t\tif r == utf8.RuneError && w == 1 {\n\t\t\t\ts.error(s.offset, \"illegal UTF-8 encoding\")\n\t\t\t} else if r == bom && s.offset > 0 {\n\t\t\t\ts.error(s.offset, \"illegal byte order mark\")\n\t\t\t}\n\t\t}\n\t\ts.rdOffset += w\n\t\ts.ch = r\n\t} else {\n\t\ts.offset = len(s.src)\n\t\tif s.ch == '\\n' {\n\t\t\ts.line++\n\t\t}\n\t\ts.ch = -1 \/\/ eof\n\t}\n}\n\nfunc (s *Scanner) Init(src []byte) {\n\ts.src = src\n\n\ts.ch = ' '\n\ts.offset = 0\n\ts.rdOffset = 0\n\ts.line = 1\n\n\ts.next()\n\tif s.ch == bom {\n\t\ts.next() \/\/ ignore BOM at file beginning\n\t}\n}\n\nfunc (s *Scanner) error(offs int, msg string) {\n\tif s.err == nil {\n\t\ts.err = fmt.Errorf(\"An error occurs at line %d, offset %d, err: %v\", s.line, offs, msg)\n\t}\n}\n\nfunc (s *Scanner) scanComment() string {\n\toffs := s.offset - 1\n\n\ts.next()\n\tfor s.ch != '\\n' && s.ch >= 0 {\n\t\ts.next()\n\t}\n\n\tlit := s.src[offs:s.offset]\n\n\treturn string(lit)\n}\n\nfunc isLetter(ch rune) bool {\n\treturn 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)\n}\n\nfunc isDigit(ch rune) bool {\n\treturn '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)\n}\n\nfunc (s *Scanner) scanIdentifier() string {\n\toffs := s.offset\n\tfor isLetter(s.ch) || isDigit(s.ch) {\n\t\ts.next()\n\t}\n\treturn string(s.src[offs:s.offset])\n}\n\nfunc digitVal(ch rune) int {\n\tswitch {\n\tcase '0' <= ch && ch <= '9':\n\t\treturn int(ch - '0')\n\tcase 'a' <= ch && ch <= 'f':\n\t\treturn int(ch - 'a' + 10)\n\tcase 'A' <= ch && ch <= 'F':\n\t\treturn int(ch - 'A' + 10)\n\t}\n\treturn 16 \/\/ larger than any legal digit val\n}\n\nfunc (s *Scanner) scanMantissa(base int) {\n\tfor digitVal(s.ch) < base {\n\t\ts.next()\n\t}\n}\n\nfunc (s *Scanner) scanNumber() interface{} {\n\toffs := s.offset\n\n\tisInteger := true\n\n\tif s.ch == '0' {\n\t\t\/\/ int or float\n\t\toffs := s.offset\n\t\ts.next()\n\t\tif s.ch == 'x' || s.ch == 'X' {\n\t\t\t\/\/ hexadecimal int\n\t\t\ts.next()\n\t\t\ts.scanMantissa(16)\n\t\t\tif s.offset-offs <= 2 {\n\t\t\t\t\/\/ only scanned \"0x\" or \"0X\"\n\t\t\t\ts.error(offs, \"illegal hexadecimal number\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ octal int or float\n\t\t\tseenDecimalDigit := false\n\t\t\ts.scanMantissa(8)\n\t\t\tif s.ch == '8' || s.ch == '9' {\n\t\t\t\t\/\/ illegal octal int or float\n\t\t\t\tseenDecimalDigit = true\n\t\t\t\ts.scanMantissa(10)\n\t\t\t}\n\t\t\tif s.ch == '.' || s.ch == 'e' || s.ch == 'E' || s.ch == 'i' {\n\t\t\t\tgoto fraction\n\t\t\t} else if seenDecimalDigit {\n\t\t\t\t\/\/ octal int\n\t\t\t\ts.error(offs, \"illegal octal number\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tgoto exit\n\t}\n\n\ts.scanMantissa(10)\n\nfraction:\n\tif s.ch == '.' {\n\t\tisInteger = false\n\t\ts.next()\n\t\ts.scanMantissa(10)\n\t}\n\n\tif s.ch == 'e' || s.ch == 'E' {\n\t\tisInteger = false\n\t\ts.next()\n\t\tif s.ch == '-' || s.ch == '+' {\n\t\t\ts.next()\n\t\t}\n\t\ts.scanMantissa(10)\n\t}\n\n\tif s.ch == 'i' {\n\t\ts.error(offs, fmt.Sprintf(\"illegal number, can not support image number\"))\n\t\treturn nil\n\t}\n\nexit:\n\tvar v interface{}\n\tvar err error\n\tif isInteger {\n\t\tv, err = strconv.ParseInt(string(s.src[offs:s.offset]), 10, 64)\n\t} else {\n\t\tv, err = strconv.ParseFloat(string(s.src[offs:s.offset]), 64)\n\t}\n\n\tif err != nil {\n\t\ts.error(offs, fmt.Sprintf(\"illegal number, parse err: %v\", err))\n\t\treturn nil\n\t}\n\n\treturn v\n}\n\n\/\/ scanEscape parses an escape sequence where rune is the accepted\n\/\/ escaped quote. In case of a syntax error, it stops at the offending\n\/\/ character (without consuming it) and returns false. Otherwise\n\/\/ it returns true.\nfunc (s *Scanner) scanEscape(quote rune) bool {\n\toffs := s.offset\n\n\tvar n int\n\tvar base, max uint32\n\tswitch s.ch {\n\tcase 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\\\', quote:\n\t\ts.next()\n\t\treturn true\n\tcase '0', '1', '2', '3', '4', '5', '6', '7':\n\t\tn, base, max = 3, 8, 255\n\tcase 'x':\n\t\ts.next()\n\t\tn, base, max = 2, 16, 255\n\tcase 'u':\n\t\ts.next()\n\t\tn, base, max = 4, 16, unicode.MaxRune\n\tcase 'U':\n\t\ts.next()\n\t\tn, base, max = 8, 16, unicode.MaxRune\n\tdefault:\n\t\tmsg := \"unknown escape sequence\"\n\t\tif s.ch < 0 {\n\t\t\tmsg = \"escape sequence not terminated\"\n\t\t}\n\t\ts.error(offs, msg)\n\t\treturn false\n\t}\n\n\tvar x uint32\n\tfor n > 0 {\n\t\td := uint32(digitVal(s.ch))\n\t\tif d >= base {\n\t\t\tmsg := fmt.Sprintf(\"illegal character %#U in escape sequence\", s.ch)\n\t\t\tif s.ch < 0 {\n\t\t\t\tmsg = \"escape sequence not terminated\"\n\t\t\t}\n\t\t\ts.error(s.offset, msg)\n\t\t\treturn false\n\t\t}\n\t\tx = x*base + d\n\t\ts.next()\n\t\tn--\n\t}\n\n\tif x > max || 0xD800 <= x && x < 0xE000 {\n\t\ts.error(offs, \"escape sequence is invalid Unicode code point\")\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (s *Scanner) scanString() string {\n\t\/\/ '\"' opening already consumed\n\toffs := s.offset - 1\n\n\tfor {\n\t\tch := s.ch\n\t\tif ch == '\\n' || ch < 0 {\n\t\t\ts.error(offs, \"string literal not terminated\")\n\t\t\tbreak\n\t\t}\n\t\ts.next()\n\t\tif ch == '\"' {\n\t\t\tbreak\n\t\t}\n\t\tif ch == '\\\\' {\n\t\t\ts.scanEscape('\"')\n\t\t}\n\t}\n\n\t\/\/ remove quote\n\treturn string(s.src[offs+1 : s.offset-1])\n}\n\nfunc (s *Scanner) skipWhitespace() {\n\tfor s.ch == ' ' || s.ch == '\\t' || s.ch == '\\r' {\n\t\ts.next()\n\t}\n}\n\nfunc (s *Scanner) Err() error {\n\treturn s.err\n}\n\nfunc (s *Scanner) inBracket() bool {\n\treturn len(s.arrayItems) > 0\n}\n\nfunc (s *Scanner) ScanCommand() []interface{} {\n\ts.items = make([]interface{}, 0)\n\ts.arrayItems = make([][]interface{}, 0)\n\n\ts.scanCommand()\n\treturn s.items\n}\n\nfunc (s *Scanner) scanCommand() {\n\tvar v interface{}\n\tfor {\n\t\tv = nil\n\t\ts.skipWhitespace()\n\n\t\tswitch ch := s.ch; {\n\t\tcase isLetter(ch):\n\t\t\tv = s.scanIdentifier()\n\t\tcase '0' <= ch && ch <= '9':\n\t\t\tv = s.scanNumber()\n\t\tdefault:\n\t\t\ts.next()\n\t\t\tswitch ch {\n\t\t\tcase -1:\n\t\t\t\t\/\/ EOF\n\t\t\t\ts.err = io.EOF\n\t\t\t\treturn\n\t\t\tcase '\\n':\n\t\t\t\tif len(s.items) > 0 {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase '\"':\n\t\t\t\tv = s.scanString()\n\t\t\tcase '[':\n\t\t\t\ts.arrayItems = append(s.arrayItems, make([]interface{}, 0))\n\t\t\tcase ']':\n\t\t\t\tif len(s.arrayItems) == 0 {\n\t\t\t\t\ts.error(s.offset, \"invalid ], no corresponding [\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t\/\/ pop last array\n\t\t\t\tn := len(s.arrayItems) - 1\n\t\t\t\tv = s.arrayItems[n]\n\t\t\t\ts.arrayItems = s.arrayItems[0:n]\n\t\t\tcase '#':\n\t\t\t\ts.scanComment()\n\t\t\tcase ',':\n\t\t\t\tif !s.inBracket() {\n\t\t\t\t\ts.error(s.offset, fmt.Sprintf(\", must in bracket for array type\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\ts.error(s.offset, fmt.Sprintf(\"illegal character %#U\", ch))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tif v != nil {\n\t\t\tif s.inBracket() {\n\t\t\t\tn := len(s.arrayItems) - 1\n\t\t\t\tb := s.arrayItems[n]\n\t\t\t\tb = append(b, v)\n\t\t\t\ts.arrayItems[n] = b\n\t\t\t} else {\n\t\t\t\ts.items = append(s.items, v)\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\nbreak scan if errpackage main\n\n\/\/ copy from go scanner\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n)\n\ntype Scanner struct {\n\tsrc []byte \/\/ source\n\n\t\/\/ scanning state\n\tch rune \/\/ current character\n\toffset int \/\/ character offset\n\trdOffset int \/\/ reading offset (position after current character)\n\n\tline int \/\/ current line\n\n\terr error\n\n\t\/\/ items for one command\n\titems []interface{}\n\t\/\/ handle array type\n\tarrayItems [][]interface{}\n}\n\nconst bom = 0xFEFF \/\/ byte order mark, only permitted as very first character\n\n\/\/ Read the next Unicode char into s.ch.\n\/\/ s.ch < 0 means end-of-file.\n\/\/\nfunc (s *Scanner) next() {\n\tif s.rdOffset < len(s.src) {\n\t\ts.offset = s.rdOffset\n\t\tif s.ch == '\\n' {\n\t\t\ts.line++\n\t\t}\n\t\tr, w := rune(s.src[s.rdOffset]), 1\n\t\tswitch {\n\t\tcase r == 0:\n\t\t\ts.error(s.offset, \"illegal character NUL\")\n\t\tcase r >= 0x80:\n\t\t\t\/\/ not ASCII\n\t\t\tr, w = utf8.DecodeRune(s.src[s.rdOffset:])\n\t\t\tif r == utf8.RuneError && w == 1 {\n\t\t\t\ts.error(s.offset, \"illegal UTF-8 encoding\")\n\t\t\t} else if r == bom && s.offset > 0 {\n\t\t\t\ts.error(s.offset, \"illegal byte order mark\")\n\t\t\t}\n\t\t}\n\t\ts.rdOffset += w\n\t\ts.ch = r\n\t} else {\n\t\ts.offset = len(s.src)\n\t\tif s.ch == '\\n' {\n\t\t\ts.line++\n\t\t}\n\t\ts.ch = -1 \/\/ eof\n\t}\n}\n\nfunc (s *Scanner) Init(src []byte) {\n\ts.src = src\n\n\ts.ch = ' '\n\ts.offset = 0\n\ts.rdOffset = 0\n\ts.line = 1\n\n\ts.next()\n\tif s.ch == bom {\n\t\ts.next() \/\/ ignore BOM at file beginning\n\t}\n}\n\nfunc (s *Scanner) error(offs int, msg string) {\n\tif s.err == nil {\n\t\ts.err = fmt.Errorf(\"An error occurs at line %d, offset %d, err: %v\", s.line, offs, msg)\n\t}\n}\n\nfunc (s *Scanner) scanComment() string {\n\toffs := s.offset - 1\n\n\ts.next()\n\tfor s.ch != '\\n' && s.ch >= 0 {\n\t\ts.next()\n\t}\n\n\tlit := s.src[offs:s.offset]\n\n\treturn string(lit)\n}\n\nfunc isLetter(ch rune) bool {\n\treturn 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)\n}\n\nfunc isDigit(ch rune) bool {\n\treturn '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)\n}\n\nfunc (s *Scanner) scanIdentifier() string {\n\toffs := s.offset\n\tfor isLetter(s.ch) || isDigit(s.ch) {\n\t\ts.next()\n\t}\n\treturn string(s.src[offs:s.offset])\n}\n\nfunc digitVal(ch rune) int {\n\tswitch {\n\tcase '0' <= ch && ch <= '9':\n\t\treturn int(ch - '0')\n\tcase 'a' <= ch && ch <= 'f':\n\t\treturn int(ch - 'a' + 10)\n\tcase 'A' <= ch && ch <= 'F':\n\t\treturn int(ch - 'A' + 10)\n\t}\n\treturn 16 \/\/ larger than any legal digit val\n}\n\nfunc (s *Scanner) scanMantissa(base int) {\n\tfor digitVal(s.ch) < base {\n\t\ts.next()\n\t}\n}\n\nfunc (s *Scanner) scanNumber() interface{} {\n\toffs := s.offset\n\n\tisInteger := true\n\n\tif s.ch == '0' {\n\t\t\/\/ int or float\n\t\toffs := s.offset\n\t\ts.next()\n\t\tif s.ch == 'x' || s.ch == 'X' {\n\t\t\t\/\/ hexadecimal int\n\t\t\ts.next()\n\t\t\ts.scanMantissa(16)\n\t\t\tif s.offset-offs <= 2 {\n\t\t\t\t\/\/ only scanned \"0x\" or \"0X\"\n\t\t\t\ts.error(offs, \"illegal hexadecimal number\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ octal int or float\n\t\t\tseenDecimalDigit := false\n\t\t\ts.scanMantissa(8)\n\t\t\tif s.ch == '8' || s.ch == '9' {\n\t\t\t\t\/\/ illegal octal int or float\n\t\t\t\tseenDecimalDigit = true\n\t\t\t\ts.scanMantissa(10)\n\t\t\t}\n\t\t\tif s.ch == '.' || s.ch == 'e' || s.ch == 'E' || s.ch == 'i' {\n\t\t\t\tgoto fraction\n\t\t\t} else if seenDecimalDigit {\n\t\t\t\t\/\/ octal int\n\t\t\t\ts.error(offs, \"illegal octal number\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tgoto exit\n\t}\n\n\ts.scanMantissa(10)\n\nfraction:\n\tif s.ch == '.' {\n\t\tisInteger = false\n\t\ts.next()\n\t\ts.scanMantissa(10)\n\t}\n\n\tif s.ch == 'e' || s.ch == 'E' {\n\t\tisInteger = false\n\t\ts.next()\n\t\tif s.ch == '-' || s.ch == '+' {\n\t\t\ts.next()\n\t\t}\n\t\ts.scanMantissa(10)\n\t}\n\n\tif s.ch == 'i' {\n\t\ts.error(offs, fmt.Sprintf(\"illegal number, can not support image number\"))\n\t\treturn nil\n\t}\n\nexit:\n\tvar v interface{}\n\tvar err error\n\tif isInteger {\n\t\tv, err = strconv.ParseInt(string(s.src[offs:s.offset]), 10, 64)\n\t} else {\n\t\tv, err = strconv.ParseFloat(string(s.src[offs:s.offset]), 64)\n\t}\n\n\tif err != nil {\n\t\ts.error(offs, fmt.Sprintf(\"illegal number, parse err: %v\", err))\n\t\treturn nil\n\t}\n\n\treturn v\n}\n\n\/\/ scanEscape parses an escape sequence where rune is the accepted\n\/\/ escaped quote. In case of a syntax error, it stops at the offending\n\/\/ character (without consuming it) and returns false. Otherwise\n\/\/ it returns true.\nfunc (s *Scanner) scanEscape(quote rune) bool {\n\toffs := s.offset\n\n\tvar n int\n\tvar base, max uint32\n\tswitch s.ch {\n\tcase 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\\\', quote:\n\t\ts.next()\n\t\treturn true\n\tcase '0', '1', '2', '3', '4', '5', '6', '7':\n\t\tn, base, max = 3, 8, 255\n\tcase 'x':\n\t\ts.next()\n\t\tn, base, max = 2, 16, 255\n\tcase 'u':\n\t\ts.next()\n\t\tn, base, max = 4, 16, unicode.MaxRune\n\tcase 'U':\n\t\ts.next()\n\t\tn, base, max = 8, 16, unicode.MaxRune\n\tdefault:\n\t\tmsg := \"unknown escape sequence\"\n\t\tif s.ch < 0 {\n\t\t\tmsg = \"escape sequence not terminated\"\n\t\t}\n\t\ts.error(offs, msg)\n\t\treturn false\n\t}\n\n\tvar x uint32\n\tfor n > 0 {\n\t\td := uint32(digitVal(s.ch))\n\t\tif d >= base {\n\t\t\tmsg := fmt.Sprintf(\"illegal character %#U in escape sequence\", s.ch)\n\t\t\tif s.ch < 0 {\n\t\t\t\tmsg = \"escape sequence not terminated\"\n\t\t\t}\n\t\t\ts.error(s.offset, msg)\n\t\t\treturn false\n\t\t}\n\t\tx = x*base + d\n\t\ts.next()\n\t\tn--\n\t}\n\n\tif x > max || 0xD800 <= x && x < 0xE000 {\n\t\ts.error(offs, \"escape sequence is invalid Unicode code point\")\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (s *Scanner) scanString() string {\n\t\/\/ '\"' opening already consumed\n\toffs := s.offset - 1\n\n\tfor {\n\t\tch := s.ch\n\t\tif ch == '\\n' || ch < 0 {\n\t\t\ts.error(offs, \"string literal not terminated\")\n\t\t\tbreak\n\t\t}\n\t\ts.next()\n\t\tif ch == '\"' {\n\t\t\tbreak\n\t\t}\n\t\tif ch == '\\\\' {\n\t\t\ts.scanEscape('\"')\n\t\t}\n\t}\n\n\t\/\/ remove quote\n\treturn string(s.src[offs+1 : s.offset-1])\n}\n\nfunc (s *Scanner) skipWhitespace() {\n\tfor s.ch == ' ' || s.ch == '\\t' || s.ch == '\\r' {\n\t\ts.next()\n\t}\n}\n\nfunc (s *Scanner) Err() error {\n\treturn s.err\n}\n\nfunc (s *Scanner) inBracket() bool {\n\treturn len(s.arrayItems) > 0\n}\n\nfunc (s *Scanner) ScanCommand() []interface{} {\n\ts.items = make([]interface{}, 0)\n\ts.arrayItems = make([][]interface{}, 0)\n\n\ts.scanCommand()\n\treturn s.items\n}\n\nfunc (s *Scanner) scanCommand() {\n\tvar v interface{}\n\tfor {\n\t\tv = nil\n\t\ts.skipWhitespace()\n\n\t\tswitch ch := s.ch; {\n\t\tcase isLetter(ch):\n\t\t\tv = s.scanIdentifier()\n\t\tcase '0' <= ch && ch <= '9':\n\t\t\tv = s.scanNumber()\n\t\tdefault:\n\t\t\ts.next()\n\t\t\tswitch ch {\n\t\t\tcase -1:\n\t\t\t\t\/\/ EOF\n\t\t\t\ts.err = io.EOF\n\t\t\t\treturn\n\t\t\tcase '\\n':\n\t\t\t\tif len(s.items) > 0 {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase '\"':\n\t\t\t\tv = s.scanString()\n\t\t\tcase '[':\n\t\t\t\ts.arrayItems = append(s.arrayItems, make([]interface{}, 0))\n\t\t\tcase ']':\n\t\t\t\tif len(s.arrayItems) == 0 {\n\t\t\t\t\ts.error(s.offset, \"invalid ], no corresponding [\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t\/\/ pop last array\n\t\t\t\tn := len(s.arrayItems) - 1\n\t\t\t\tv = s.arrayItems[n]\n\t\t\t\ts.arrayItems = s.arrayItems[0:n]\n\t\t\tcase '#':\n\t\t\t\ts.scanComment()\n\t\t\tcase ',':\n\t\t\t\tif !s.inBracket() {\n\t\t\t\t\ts.error(s.offset, fmt.Sprintf(\", must in bracket for array type\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\ts.error(s.offset, fmt.Sprintf(\"illegal character %#U\", ch))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tif s.err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif v != nil {\n\t\t\tif s.inBracket() {\n\t\t\t\tn := len(s.arrayItems) - 1\n\t\t\t\tb := s.arrayItems[n]\n\t\t\t\tb = append(b, v)\n\t\t\t\ts.arrayItems[n] = b\n\t\t\t} else {\n\t\t\t\ts.items = append(s.items, v)\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"package missinggo\n\nimport (\n\t\"compress\/gzip\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\ntype gzipResponseWriter struct {\n\tio.Writer\n\thttp.ResponseWriter\n\thaveWritten bool\n}\n\nfunc (w *gzipResponseWriter) Write(b []byte) (int, error) {\n\tif w.haveWritten {\n\t\tgoto write\n\t}\n\tw.haveWritten = true\n\tif w.Header().Get(\"Content-Type\") != \"\" {\n\t\tgoto write\n\t}\n\tif type_ := http.DetectContentType(b); type_ != \"application\/octet-stream\" {\n\t\tw.Header().Set(\"Content-Type\", type_)\n\t}\nwrite:\n\treturn w.Writer.Write(b)\n}\n\n\/\/ Gzips response body if the request says it'll allow it.\nfunc GzipHTTPHandler(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif !strings.Contains(r.Header.Get(\"Accept-Encoding\"), \"gzip\") {\n\t\t\th.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Encoding\", \"gzip\")\n\t\tgz := gzip.NewWriter(w)\n\t\tdefer gz.Close()\n\t\th.ServeHTTP(&gzipResponseWriter{\n\t\t\tWriter: gz,\n\t\t\tResponseWriter: w,\n\t\t}, r)\n\t})\n}\nGzipHTTPHandler: Only gzip if a Content-Encoding isn't already presentpackage missinggo\n\nimport (\n\t\"compress\/gzip\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\ntype gzipResponseWriter struct {\n\tio.Writer\n\thttp.ResponseWriter\n\thaveWritten bool\n}\n\nfunc (w *gzipResponseWriter) Write(b []byte) (int, error) {\n\tif w.haveWritten {\n\t\tgoto write\n\t}\n\tw.haveWritten = true\n\tif w.Header().Get(\"Content-Type\") != \"\" {\n\t\tgoto write\n\t}\n\tif type_ := http.DetectContentType(b); type_ != \"application\/octet-stream\" {\n\t\tw.Header().Set(\"Content-Type\", type_)\n\t}\nwrite:\n\treturn w.Writer.Write(b)\n}\n\n\/\/ Gzips response body if the request says it'll allow it.\nfunc GzipHTTPHandler(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif !strings.Contains(r.Header.Get(\"Accept-Encoding\"), \"gzip\") || w.Header().Get(\"Content-Encoding\") != \"\" || w.Header().Get(\"Vary\") != \"\" {\n\t\t\th.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Encoding\", \"gzip\")\n\t\tgz := gzip.NewWriter(w)\n\t\tdefer gz.Close()\n\t\th.ServeHTTP(&gzipResponseWriter{\n\t\t\tWriter: gz,\n\t\t\tResponseWriter: w,\n\t\t}, r)\n\t})\n}\n<|endoftext|>"} {"text":"package admission\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"reflect\"\n\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\t\"k8s.io\/apiserver\/pkg\/admission\"\n\t\"k8s.io\/apiserver\/pkg\/admission\/plugin\/namespace\/lifecycle\"\n\tmutatingwebhook \"k8s.io\/apiserver\/pkg\/admission\/plugin\/webhook\/mutating\"\n\tvalidatingwebhook \"k8s.io\/apiserver\/pkg\/admission\/plugin\/webhook\/validating\"\n\t\"k8s.io\/apiserver\/pkg\/apis\/apiserver\"\n\t\"k8s.io\/kubernetes\/plugin\/pkg\/admission\/noderestriction\"\n\texpandpvcadmission \"k8s.io\/kubernetes\/plugin\/pkg\/admission\/storage\/persistentvolume\/resize\"\n\tstorageclassdefaultadmission \"k8s.io\/kubernetes\/plugin\/pkg\/admission\/storage\/storageclass\/setdefault\"\n\n\tconfigv1 \"github.com\/openshift\/api\/config\/v1\"\n\toadmission \"github.com\/openshift\/origin\/pkg\/cmd\/server\/admission\"\n\tconfigapi \"github.com\/openshift\/origin\/pkg\/cmd\/server\/apis\/config\"\n\tconfigapilatest \"github.com\/openshift\/origin\/pkg\/cmd\/server\/apis\/config\/latest\"\n\t\"github.com\/openshift\/origin\/pkg\/image\/apiserver\/admission\/apis\/imagepolicy\"\n\timageadmission \"github.com\/openshift\/origin\/pkg\/image\/apiserver\/admission\/limitrange\"\n\tingressadmission \"github.com\/openshift\/origin\/pkg\/network\/apiserver\/admission\"\n\toverrideapi \"github.com\/openshift\/origin\/pkg\/quota\/apiserver\/admission\/apis\/clusterresourceoverride\"\n\t\"github.com\/openshift\/origin\/pkg\/security\/apiserver\/admission\/sccadmission\"\n\t\"github.com\/openshift\/origin\/pkg\/service\/admission\/externalipranger\"\n\t\"github.com\/openshift\/origin\/pkg\/service\/admission\/restrictedendpoints\"\n)\n\nvar (\n\t\/\/ these are admission plugins that cannot be applied until after the kubeapiserver starts.\n\t\/\/ TODO if nothing comes to mind in 3.10, kill this\n\tSkipRunLevelZeroPlugins = sets.NewString()\n\t\/\/ these are admission plugins that cannot be applied until after the openshiftapiserver apiserver starts.\n\tSkipRunLevelOnePlugins = sets.NewString(\n\t\t\"ProjectRequestLimit\",\n\t\t\"openshift.io\/RestrictSubjectBindings\",\n\t\t\"openshift.io\/ClusterResourceQuota\",\n\t\timagepolicy.PluginName,\n\t\toverrideapi.PluginName,\n\t\t\"OriginPodNodeEnvironment\",\n\t\t\"RunOnceDuration\",\n\t\tsccadmission.PluginName,\n\t\t\"SCCExecRestrictions\",\n\t)\n\n\t\/\/ openshiftAdmissionControlPlugins gives the in-order default admission chain for openshift resources.\n\topenshiftAdmissionControlPlugins = []string{\n\t\tlifecycle.PluginName,\n\t\t\"ProjectRequestLimit\",\n\t\t\"openshift.io\/JenkinsBootstrapper\",\n\t\t\"openshift.io\/BuildConfigSecretInjector\",\n\t\t\"BuildByStrategy\",\n\t\timageadmission.PluginName,\n\t\t\"PodNodeConstraints\",\n\t\t\"OwnerReferencesPermissionEnforcement\",\n\t\t\"Initializers\",\n\t\t\"MutatingAdmissionWebhook\",\n\t\t\"ValidatingAdmissionWebhook\",\n\t\t\"ResourceQuota\",\n\t}\n\n\t\/\/ KubeAdmissionPlugins gives the in-order default admission chain for kube resources.\n\tKubeAdmissionPlugins = []string{\n\t\t\"AlwaysAdmit\",\n\t\t\"NamespaceAutoProvision\",\n\t\t\"NamespaceExists\",\n\t\tlifecycle.PluginName,\n\t\t\"EventRateLimit\",\n\t\t\"openshift.io\/RestrictSubjectBindings\",\n\t\t\"RunOnceDuration\",\n\t\t\"PodNodeConstraints\",\n\t\t\"OriginPodNodeEnvironment\",\n\t\t\"PodNodeSelector\",\n\t\toverrideapi.PluginName,\n\t\texternalipranger.ExternalIPPluginName,\n\t\trestrictedendpoints.RestrictedEndpointsPluginName,\n\t\timagepolicy.PluginName,\n\t\t\"ImagePolicyWebhook\",\n\t\t\"PodPreset\",\n\t\t\"LimitRanger\",\n\t\t\"ServiceAccount\",\n\t\tnoderestriction.PluginName,\n\t\t\"SecurityContextDeny\",\n\t\tsccadmission.PluginName,\n\t\t\"PodSecurityPolicy\",\n\t\t\"DenyEscalatingExec\",\n\t\t\"DenyExecOnPrivileged\",\n\t\tstorageclassdefaultadmission.PluginName,\n\t\texpandpvcadmission.PluginName,\n\t\t\"AlwaysPullImages\",\n\t\t\"LimitPodHardAntiAffinityTopology\",\n\t\t\"SCCExecRestrictions\",\n\t\t\"PersistentVolumeLabel\",\n\t\t\"OwnerReferencesPermissionEnforcement\",\n\t\tingressadmission.IngressAdmission,\n\t\t\"Priority\",\n\t\t\"ExtendedResourceToleration\",\n\t\t\"DefaultTolerationSeconds\",\n\t\t\"StorageObjectInUseProtection\",\n\t\t\"Initializers\",\n\t\tmutatingwebhook.PluginName,\n\t\tvalidatingwebhook.PluginName,\n\t\t\"PodTolerationRestriction\",\n\t\t\"AlwaysDeny\",\n\t\t\/\/ NOTE: ResourceQuota and ClusterResourceQuota must be the last 2 plugins.\n\t\t\/\/ DO NOT ADD ANY PLUGINS AFTER THIS LINE!\n\t\t\"ResourceQuota\",\n\t\t\"openshift.io\/ClusterResourceQuota\",\n\t}\n\n\t\/\/ combinedAdmissionControlPlugins gives the in-order default admission chain for all resources resources.\n\t\/\/ When possible, this list is used. The set of openshift+kube chains must exactly match this set. In addition,\n\t\/\/ the order specified in the openshift and kube chains must match the order here.\n\tCombinedAdmissionControlPlugins = []string{\n\t\t\"AlwaysAdmit\",\n\t\t\"NamespaceAutoProvision\",\n\t\t\"NamespaceExists\",\n\t\tlifecycle.PluginName,\n\t\t\"EventRateLimit\",\n\t\t\"ProjectRequestLimit\",\n\t\t\"openshift.io\/RestrictSubjectBindings\",\n\t\t\"openshift.io\/JenkinsBootstrapper\",\n\t\t\"openshift.io\/BuildConfigSecretInjector\",\n\t\t\"BuildByStrategy\",\n\t\timageadmission.PluginName,\n\t\t\"RunOnceDuration\",\n\t\t\"PodNodeConstraints\",\n\t\t\"OriginPodNodeEnvironment\",\n\t\t\"PodNodeSelector\",\n\t\toverrideapi.PluginName,\n\t\texternalipranger.ExternalIPPluginName,\n\t\trestrictedendpoints.RestrictedEndpointsPluginName,\n\t\timagepolicy.PluginName,\n\t\t\"ImagePolicyWebhook\",\n\t\t\"PodPreset\",\n\t\t\"LimitRanger\",\n\t\t\"ServiceAccount\",\n\t\tnoderestriction.PluginName,\n\t\t\"SecurityContextDeny\",\n\t\tsccadmission.PluginName,\n\t\t\"PodSecurityPolicy\",\n\t\t\"DenyEscalatingExec\",\n\t\t\"DenyExecOnPrivileged\",\n\t\tstorageclassdefaultadmission.PluginName,\n\t\texpandpvcadmission.PluginName,\n\t\t\"AlwaysPullImages\",\n\t\t\"LimitPodHardAntiAffinityTopology\",\n\t\t\"SCCExecRestrictions\",\n\t\t\"PersistentVolumeLabel\",\n\t\t\"OwnerReferencesPermissionEnforcement\",\n\t\tingressadmission.IngressAdmission,\n\t\t\"Priority\",\n\t\t\"ExtendedResourceToleration\",\n\t\t\"DefaultTolerationSeconds\",\n\t\t\"StorageObjectInUseProtection\",\n\t\t\"Initializers\",\n\t\tmutatingwebhook.PluginName,\n\t\tvalidatingwebhook.PluginName,\n\t\t\"PodTolerationRestriction\",\n\t\t\"AlwaysDeny\",\n\t\t\/\/ NOTE: ResourceQuota and ClusterResourceQuota must be the last 2 plugins.\n\t\t\/\/ DO NOT ADD ANY PLUGINS AFTER THIS LINE!\n\t\t\"ResourceQuota\",\n\t\t\"openshift.io\/ClusterResourceQuota\",\n\t}\n)\n\n\/\/ fixupAdmissionPlugins fixes the input plugins to handle deprecation and duplicates.\nfunc fixupAdmissionPlugins(plugins []string) []string {\n\tresult := replace(plugins, \"openshift.io\/OriginResourceQuota\", \"ResourceQuota\")\n\tresult = dedupe(result)\n\treturn result\n}\n\nfunc NewAdmissionChains(\n\tadmissionConfigFiles []string,\n\tpluginConfig map[string]configv1.AdmissionPluginConfig,\n\tadmissionInitializer admission.PluginInitializer,\n\tadmissionDecorator admission.Decorator,\n) (admission.Interface, error) {\n\tadmissionPluginConfigFilename := \"\"\n\tif len(admissionConfigFiles) > 0 {\n\t\tadmissionPluginConfigFilename = admissionConfigFiles[0]\n\n\t} else {\n\t\tupstreamAdmissionConfig, err := ConvertOpenshiftAdmissionConfigToKubeAdmissionConfig(pluginConfig)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconfigBytes, err := configapilatest.WriteYAML(upstreamAdmissionConfig)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttempFile, err := ioutil.TempFile(\"\", \"master-config.yaml\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer os.Remove(tempFile.Name())\n\t\tif _, err := tempFile.Write(configBytes); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttempFile.Close()\n\t\tadmissionPluginConfigFilename = tempFile.Name()\n\t}\n\n\tadmissionPluginNames := openshiftAdmissionControlPlugins\n\tadmissionPluginNames = fixupAdmissionPlugins(admissionPluginNames)\n\n\tadmissionChain, err := newAdmissionChainFunc(admissionPluginNames, admissionPluginConfigFilename, admissionInitializer, admissionDecorator)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn admissionChain, err\n}\n\n\/\/ newAdmissionChainFunc is for unit testing only. You should NEVER OVERRIDE THIS outside of a unit test.\nvar newAdmissionChainFunc = newAdmissionChain\n\nfunc newAdmissionChain(pluginNames []string, admissionConfigFilename string, admissionInitializer admission.PluginInitializer, admissionDecorator admission.Decorator) (admission.Interface, error) {\n\tplugins := []admission.Interface{}\n\tfor _, pluginName := range pluginNames {\n\t\tvar (\n\t\t\tplugin admission.Interface\n\t\t)\n\n\t\t\/\/ TODO this needs to be refactored to use the admission scheme we created upstream. I think this holds us for the rebase.\n\t\tpluginsConfigProvider, err := admission.ReadAdmissionConfiguration([]string{pluginName}, admissionConfigFilename, configapi.Scheme)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tplugin, err = OriginAdmissionPlugins.NewFromPlugins([]string{pluginName}, pluginsConfigProvider, admissionInitializer, admissionDecorator)\n\t\tif err != nil {\n\t\t\t\/\/ should have been caught with validation\n\t\t\treturn nil, err\n\t\t}\n\t\tif plugin == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tplugins = append(plugins, plugin)\n\n\t}\n\n\t\/\/ ensure that plugins have been properly initialized\n\tif err := oadmission.Validate(plugins); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn admission.NewChainHandler(plugins...), nil\n}\n\n\/\/ replace returns a slice where each instance of the input that is x is replaced with y\nfunc replace(input []string, x, y string) []string {\n\tresult := []string{}\n\tfor i := range input {\n\t\tif input[i] == x {\n\t\t\tresult = append(result, y)\n\t\t} else {\n\t\t\tresult = append(result, input[i])\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/ dedupe removes duplicate items from the input list.\n\/\/ the last instance of a duplicate is kept in the input list.\nfunc dedupe(input []string) []string {\n\titems := sets.NewString()\n\tresult := []string{}\n\tfor i := len(input) - 1; i >= 0; i-- {\n\t\tif items.Has(input[i]) {\n\t\t\tcontinue\n\t\t}\n\t\titems.Insert(input[i])\n\t\tresult = append([]string{input[i]}, result...)\n\t}\n\treturn result\n}\n\nfunc init() {\n\t\/\/ add a filter that will remove DefaultAdmissionConfig\n\tadmission.FactoryFilterFn = filterEnableAdmissionConfigs\n}\n\nfunc filterEnableAdmissionConfigs(delegate admission.Factory) admission.Factory {\n\treturn func(config io.Reader) (admission.Interface, error) {\n\t\tconfig1, config2, err := splitStream(config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ if the config isn't a DefaultAdmissionConfig, then assume we're enabled (we were called after all)\n\t\t\/\/ if the config *is* a DefaultAdmissionConfig and it explicitly said\n\t\tobj, err := configapilatest.ReadYAML(config1)\n\t\t\/\/ if we can't read it, let the plugin deal with it\n\t\tif err != nil {\n\t\t\treturn delegate(config2)\n\t\t}\n\t\t\/\/ if nothing was there, let the plugin deal with it\n\t\tif obj == nil {\n\t\t\treturn delegate(config2)\n\t\t}\n\t\t\/\/ if it wasn't a DefaultAdmissionConfig object, let the plugin deal with it\n\t\tif _, ok := obj.(*configapi.DefaultAdmissionConfig); !ok {\n\t\t\treturn delegate(config2)\n\t\t}\n\n\t\t\/\/ if it was a DefaultAdmissionConfig, then it must have said \"enabled\" and it wasn't really meant for the\n\t\t\/\/ admission plugin\n\t\treturn delegate(nil)\n\t}\n}\n\n\/\/ splitStream reads the stream bytes and constructs two copies of it.\nfunc splitStream(config io.Reader) (io.Reader, io.Reader, error) {\n\tif config == nil || reflect.ValueOf(config).IsNil() {\n\t\treturn nil, nil, nil\n\t}\n\n\tconfigBytes, err := ioutil.ReadAll(config)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn bytes.NewBuffer(configBytes), bytes.NewBuffer(configBytes), nil\n}\n\nfunc ConvertOpenshiftAdmissionConfigToKubeAdmissionConfig(in map[string]configv1.AdmissionPluginConfig) (*apiserver.AdmissionConfiguration, error) {\n\tret := &apiserver.AdmissionConfiguration{}\n\n\tfor _, pluginName := range sets.StringKeySet(in).List() {\n\t\topenshiftConfig := in[pluginName]\n\n\t\tkubeConfig := apiserver.AdmissionPluginConfiguration{\n\t\t\tName: pluginName,\n\t\t\tPath: openshiftConfig.Location,\n\t\t}\n\n\t\tkubeConfig.Configuration = &runtime.Unknown{\n\t\t\tRaw: openshiftConfig.Configuration.Raw,\n\t\t}\n\t\tret.Plugins = append(ret.Plugins, kubeConfig)\n\t}\n\n\treturn ret, nil\n}\nAdd openshift.io\/ClusterResourceQuota to openshift api serverpackage admission\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"reflect\"\n\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\t\"k8s.io\/apiserver\/pkg\/admission\"\n\t\"k8s.io\/apiserver\/pkg\/admission\/plugin\/namespace\/lifecycle\"\n\tmutatingwebhook \"k8s.io\/apiserver\/pkg\/admission\/plugin\/webhook\/mutating\"\n\tvalidatingwebhook \"k8s.io\/apiserver\/pkg\/admission\/plugin\/webhook\/validating\"\n\t\"k8s.io\/apiserver\/pkg\/apis\/apiserver\"\n\t\"k8s.io\/kubernetes\/plugin\/pkg\/admission\/noderestriction\"\n\texpandpvcadmission \"k8s.io\/kubernetes\/plugin\/pkg\/admission\/storage\/persistentvolume\/resize\"\n\tstorageclassdefaultadmission \"k8s.io\/kubernetes\/plugin\/pkg\/admission\/storage\/storageclass\/setdefault\"\n\n\tconfigv1 \"github.com\/openshift\/api\/config\/v1\"\n\toadmission \"github.com\/openshift\/origin\/pkg\/cmd\/server\/admission\"\n\tconfigapi \"github.com\/openshift\/origin\/pkg\/cmd\/server\/apis\/config\"\n\tconfigapilatest \"github.com\/openshift\/origin\/pkg\/cmd\/server\/apis\/config\/latest\"\n\t\"github.com\/openshift\/origin\/pkg\/image\/apiserver\/admission\/apis\/imagepolicy\"\n\timageadmission \"github.com\/openshift\/origin\/pkg\/image\/apiserver\/admission\/limitrange\"\n\tingressadmission \"github.com\/openshift\/origin\/pkg\/network\/apiserver\/admission\"\n\toverrideapi \"github.com\/openshift\/origin\/pkg\/quota\/apiserver\/admission\/apis\/clusterresourceoverride\"\n\t\"github.com\/openshift\/origin\/pkg\/security\/apiserver\/admission\/sccadmission\"\n\t\"github.com\/openshift\/origin\/pkg\/service\/admission\/externalipranger\"\n\t\"github.com\/openshift\/origin\/pkg\/service\/admission\/restrictedendpoints\"\n)\n\nvar (\n\t\/\/ these are admission plugins that cannot be applied until after the kubeapiserver starts.\n\t\/\/ TODO if nothing comes to mind in 3.10, kill this\n\tSkipRunLevelZeroPlugins = sets.NewString()\n\t\/\/ these are admission plugins that cannot be applied until after the openshiftapiserver apiserver starts.\n\tSkipRunLevelOnePlugins = sets.NewString(\n\t\t\"ProjectRequestLimit\",\n\t\t\"openshift.io\/RestrictSubjectBindings\",\n\t\t\"openshift.io\/ClusterResourceQuota\",\n\t\timagepolicy.PluginName,\n\t\toverrideapi.PluginName,\n\t\t\"OriginPodNodeEnvironment\",\n\t\t\"RunOnceDuration\",\n\t\tsccadmission.PluginName,\n\t\t\"SCCExecRestrictions\",\n\t)\n\n\t\/\/ openshiftAdmissionControlPlugins gives the in-order default admission chain for openshift resources.\n\topenshiftAdmissionControlPlugins = []string{\n\t\tlifecycle.PluginName,\n\t\t\"ProjectRequestLimit\",\n\t\t\"openshift.io\/JenkinsBootstrapper\",\n\t\t\"openshift.io\/BuildConfigSecretInjector\",\n\t\t\"BuildByStrategy\",\n\t\timageadmission.PluginName,\n\t\t\"PodNodeConstraints\",\n\t\t\"OwnerReferencesPermissionEnforcement\",\n\t\t\"Initializers\",\n\t\t\"MutatingAdmissionWebhook\",\n\t\t\"ValidatingAdmissionWebhook\",\n\t\t\"ResourceQuota\",\n\t\t\"openshift.io\/ClusterResourceQuota\",\n\t}\n\n\t\/\/ KubeAdmissionPlugins gives the in-order default admission chain for kube resources.\n\tKubeAdmissionPlugins = []string{\n\t\t\"AlwaysAdmit\",\n\t\t\"NamespaceAutoProvision\",\n\t\t\"NamespaceExists\",\n\t\tlifecycle.PluginName,\n\t\t\"EventRateLimit\",\n\t\t\"openshift.io\/RestrictSubjectBindings\",\n\t\t\"RunOnceDuration\",\n\t\t\"PodNodeConstraints\",\n\t\t\"OriginPodNodeEnvironment\",\n\t\t\"PodNodeSelector\",\n\t\toverrideapi.PluginName,\n\t\texternalipranger.ExternalIPPluginName,\n\t\trestrictedendpoints.RestrictedEndpointsPluginName,\n\t\timagepolicy.PluginName,\n\t\t\"ImagePolicyWebhook\",\n\t\t\"PodPreset\",\n\t\t\"LimitRanger\",\n\t\t\"ServiceAccount\",\n\t\tnoderestriction.PluginName,\n\t\t\"SecurityContextDeny\",\n\t\tsccadmission.PluginName,\n\t\t\"PodSecurityPolicy\",\n\t\t\"DenyEscalatingExec\",\n\t\t\"DenyExecOnPrivileged\",\n\t\tstorageclassdefaultadmission.PluginName,\n\t\texpandpvcadmission.PluginName,\n\t\t\"AlwaysPullImages\",\n\t\t\"LimitPodHardAntiAffinityTopology\",\n\t\t\"SCCExecRestrictions\",\n\t\t\"PersistentVolumeLabel\",\n\t\t\"OwnerReferencesPermissionEnforcement\",\n\t\tingressadmission.IngressAdmission,\n\t\t\"Priority\",\n\t\t\"ExtendedResourceToleration\",\n\t\t\"DefaultTolerationSeconds\",\n\t\t\"StorageObjectInUseProtection\",\n\t\t\"Initializers\",\n\t\tmutatingwebhook.PluginName,\n\t\tvalidatingwebhook.PluginName,\n\t\t\"PodTolerationRestriction\",\n\t\t\"AlwaysDeny\",\n\t\t\/\/ NOTE: ResourceQuota and ClusterResourceQuota must be the last 2 plugins.\n\t\t\/\/ DO NOT ADD ANY PLUGINS AFTER THIS LINE!\n\t\t\"ResourceQuota\",\n\t\t\"openshift.io\/ClusterResourceQuota\",\n\t}\n\n\t\/\/ combinedAdmissionControlPlugins gives the in-order default admission chain for all resources resources.\n\t\/\/ When possible, this list is used. The set of openshift+kube chains must exactly match this set. In addition,\n\t\/\/ the order specified in the openshift and kube chains must match the order here.\n\tCombinedAdmissionControlPlugins = []string{\n\t\t\"AlwaysAdmit\",\n\t\t\"NamespaceAutoProvision\",\n\t\t\"NamespaceExists\",\n\t\tlifecycle.PluginName,\n\t\t\"EventRateLimit\",\n\t\t\"ProjectRequestLimit\",\n\t\t\"openshift.io\/RestrictSubjectBindings\",\n\t\t\"openshift.io\/JenkinsBootstrapper\",\n\t\t\"openshift.io\/BuildConfigSecretInjector\",\n\t\t\"BuildByStrategy\",\n\t\timageadmission.PluginName,\n\t\t\"RunOnceDuration\",\n\t\t\"PodNodeConstraints\",\n\t\t\"OriginPodNodeEnvironment\",\n\t\t\"PodNodeSelector\",\n\t\toverrideapi.PluginName,\n\t\texternalipranger.ExternalIPPluginName,\n\t\trestrictedendpoints.RestrictedEndpointsPluginName,\n\t\timagepolicy.PluginName,\n\t\t\"ImagePolicyWebhook\",\n\t\t\"PodPreset\",\n\t\t\"LimitRanger\",\n\t\t\"ServiceAccount\",\n\t\tnoderestriction.PluginName,\n\t\t\"SecurityContextDeny\",\n\t\tsccadmission.PluginName,\n\t\t\"PodSecurityPolicy\",\n\t\t\"DenyEscalatingExec\",\n\t\t\"DenyExecOnPrivileged\",\n\t\tstorageclassdefaultadmission.PluginName,\n\t\texpandpvcadmission.PluginName,\n\t\t\"AlwaysPullImages\",\n\t\t\"LimitPodHardAntiAffinityTopology\",\n\t\t\"SCCExecRestrictions\",\n\t\t\"PersistentVolumeLabel\",\n\t\t\"OwnerReferencesPermissionEnforcement\",\n\t\tingressadmission.IngressAdmission,\n\t\t\"Priority\",\n\t\t\"ExtendedResourceToleration\",\n\t\t\"DefaultTolerationSeconds\",\n\t\t\"StorageObjectInUseProtection\",\n\t\t\"Initializers\",\n\t\tmutatingwebhook.PluginName,\n\t\tvalidatingwebhook.PluginName,\n\t\t\"PodTolerationRestriction\",\n\t\t\"AlwaysDeny\",\n\t\t\/\/ NOTE: ResourceQuota and ClusterResourceQuota must be the last 2 plugins.\n\t\t\/\/ DO NOT ADD ANY PLUGINS AFTER THIS LINE!\n\t\t\"ResourceQuota\",\n\t\t\"openshift.io\/ClusterResourceQuota\",\n\t}\n)\n\n\/\/ fixupAdmissionPlugins fixes the input plugins to handle deprecation and duplicates.\nfunc fixupAdmissionPlugins(plugins []string) []string {\n\tresult := replace(plugins, \"openshift.io\/OriginResourceQuota\", \"ResourceQuota\")\n\tresult = dedupe(result)\n\treturn result\n}\n\nfunc NewAdmissionChains(\n\tadmissionConfigFiles []string,\n\tpluginConfig map[string]configv1.AdmissionPluginConfig,\n\tadmissionInitializer admission.PluginInitializer,\n\tadmissionDecorator admission.Decorator,\n) (admission.Interface, error) {\n\tadmissionPluginConfigFilename := \"\"\n\tif len(admissionConfigFiles) > 0 {\n\t\tadmissionPluginConfigFilename = admissionConfigFiles[0]\n\n\t} else {\n\t\tupstreamAdmissionConfig, err := ConvertOpenshiftAdmissionConfigToKubeAdmissionConfig(pluginConfig)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconfigBytes, err := configapilatest.WriteYAML(upstreamAdmissionConfig)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttempFile, err := ioutil.TempFile(\"\", \"master-config.yaml\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer os.Remove(tempFile.Name())\n\t\tif _, err := tempFile.Write(configBytes); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttempFile.Close()\n\t\tadmissionPluginConfigFilename = tempFile.Name()\n\t}\n\n\tadmissionPluginNames := openshiftAdmissionControlPlugins\n\tadmissionPluginNames = fixupAdmissionPlugins(admissionPluginNames)\n\n\tadmissionChain, err := newAdmissionChainFunc(admissionPluginNames, admissionPluginConfigFilename, admissionInitializer, admissionDecorator)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn admissionChain, err\n}\n\n\/\/ newAdmissionChainFunc is for unit testing only. You should NEVER OVERRIDE THIS outside of a unit test.\nvar newAdmissionChainFunc = newAdmissionChain\n\nfunc newAdmissionChain(pluginNames []string, admissionConfigFilename string, admissionInitializer admission.PluginInitializer, admissionDecorator admission.Decorator) (admission.Interface, error) {\n\tplugins := []admission.Interface{}\n\tfor _, pluginName := range pluginNames {\n\t\tvar (\n\t\t\tplugin admission.Interface\n\t\t)\n\n\t\t\/\/ TODO this needs to be refactored to use the admission scheme we created upstream. I think this holds us for the rebase.\n\t\tpluginsConfigProvider, err := admission.ReadAdmissionConfiguration([]string{pluginName}, admissionConfigFilename, configapi.Scheme)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tplugin, err = OriginAdmissionPlugins.NewFromPlugins([]string{pluginName}, pluginsConfigProvider, admissionInitializer, admissionDecorator)\n\t\tif err != nil {\n\t\t\t\/\/ should have been caught with validation\n\t\t\treturn nil, err\n\t\t}\n\t\tif plugin == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tplugins = append(plugins, plugin)\n\n\t}\n\n\t\/\/ ensure that plugins have been properly initialized\n\tif err := oadmission.Validate(plugins); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn admission.NewChainHandler(plugins...), nil\n}\n\n\/\/ replace returns a slice where each instance of the input that is x is replaced with y\nfunc replace(input []string, x, y string) []string {\n\tresult := []string{}\n\tfor i := range input {\n\t\tif input[i] == x {\n\t\t\tresult = append(result, y)\n\t\t} else {\n\t\t\tresult = append(result, input[i])\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/ dedupe removes duplicate items from the input list.\n\/\/ the last instance of a duplicate is kept in the input list.\nfunc dedupe(input []string) []string {\n\titems := sets.NewString()\n\tresult := []string{}\n\tfor i := len(input) - 1; i >= 0; i-- {\n\t\tif items.Has(input[i]) {\n\t\t\tcontinue\n\t\t}\n\t\titems.Insert(input[i])\n\t\tresult = append([]string{input[i]}, result...)\n\t}\n\treturn result\n}\n\nfunc init() {\n\t\/\/ add a filter that will remove DefaultAdmissionConfig\n\tadmission.FactoryFilterFn = filterEnableAdmissionConfigs\n}\n\nfunc filterEnableAdmissionConfigs(delegate admission.Factory) admission.Factory {\n\treturn func(config io.Reader) (admission.Interface, error) {\n\t\tconfig1, config2, err := splitStream(config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ if the config isn't a DefaultAdmissionConfig, then assume we're enabled (we were called after all)\n\t\t\/\/ if the config *is* a DefaultAdmissionConfig and it explicitly said\n\t\tobj, err := configapilatest.ReadYAML(config1)\n\t\t\/\/ if we can't read it, let the plugin deal with it\n\t\tif err != nil {\n\t\t\treturn delegate(config2)\n\t\t}\n\t\t\/\/ if nothing was there, let the plugin deal with it\n\t\tif obj == nil {\n\t\t\treturn delegate(config2)\n\t\t}\n\t\t\/\/ if it wasn't a DefaultAdmissionConfig object, let the plugin deal with it\n\t\tif _, ok := obj.(*configapi.DefaultAdmissionConfig); !ok {\n\t\t\treturn delegate(config2)\n\t\t}\n\n\t\t\/\/ if it was a DefaultAdmissionConfig, then it must have said \"enabled\" and it wasn't really meant for the\n\t\t\/\/ admission plugin\n\t\treturn delegate(nil)\n\t}\n}\n\n\/\/ splitStream reads the stream bytes and constructs two copies of it.\nfunc splitStream(config io.Reader) (io.Reader, io.Reader, error) {\n\tif config == nil || reflect.ValueOf(config).IsNil() {\n\t\treturn nil, nil, nil\n\t}\n\n\tconfigBytes, err := ioutil.ReadAll(config)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn bytes.NewBuffer(configBytes), bytes.NewBuffer(configBytes), nil\n}\n\nfunc ConvertOpenshiftAdmissionConfigToKubeAdmissionConfig(in map[string]configv1.AdmissionPluginConfig) (*apiserver.AdmissionConfiguration, error) {\n\tret := &apiserver.AdmissionConfiguration{}\n\n\tfor _, pluginName := range sets.StringKeySet(in).List() {\n\t\topenshiftConfig := in[pluginName]\n\n\t\tkubeConfig := apiserver.AdmissionPluginConfiguration{\n\t\t\tName: pluginName,\n\t\t\tPath: openshiftConfig.Location,\n\t\t}\n\n\t\tkubeConfig.Configuration = &runtime.Unknown{\n\t\t\tRaw: openshiftConfig.Configuration.Raw,\n\t\t}\n\t\tret.Plugins = append(ret.Plugins, kubeConfig)\n\t}\n\n\treturn ret, nil\n}\n<|endoftext|>"} {"text":"package reseed\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com\/PuerkitoBio\/throttled\"\n\t\"github.com\/PuerkitoBio\/throttled\/store\"\n\t\"github.com\/gorilla\/handlers\"\n\t\"github.com\/justinas\/alice\"\n)\n\nconst (\n\tI2P_USER_AGENT = \"Wget\/1.11.4\"\n)\n\ntype Server struct {\n\t*http.Server\n\tReseeder Reseeder\n}\n\nfunc NewServer(prefix string, trustProxy bool) *Server {\n\tconfig := &tls.Config{\n\t\tMinVersion: tls.VersionTLS10,\n\t\tPreferServerCipherSuites: true,\n\t\tCipherSuites: []uint16{\n\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,\n\t\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,\n\t\t\ttls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,\n\t\t\ttls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,\n\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,\n\t\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,\n\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,\n\t\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,\n\t\t\t\/\/ tls.TLS_RSA_WITH_RC4_128_SHA,\n\t\t\ttls.TLS_RSA_WITH_AES_128_CBC_SHA,\n\t\t\ttls.TLS_RSA_WITH_AES_256_CBC_SHA,\n\t\t\ttls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\t\ttls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\t},\n\t}\n\th := &http.Server{TLSConfig: config}\n\tserver := Server{h, nil}\n\n\tth := throttled.RateLimit(throttled.PerHour(120), &throttled.VaryBy{RemoteAddr: true}, store.NewMemStore(10000))\n\n\tmiddlewareChain := alice.New()\n\tif trustProxy {\n\t\tmiddlewareChain = middlewareChain.Append(proxiedMiddleware)\n\t}\n\n\terrorHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Connection\", \"close\")\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tif _, err := w.Write(nil); nil != err {\n\t\t\tlog.Println(err)\n\t\t}\n\t})\n\n\tmux := http.NewServeMux()\n\tmux.Handle(\"\/\", middlewareChain.Append(loggingMiddleware).Then(errorHandler))\n\tmux.Handle(prefix+\"\/i2pseeds.su3\", middlewareChain.Append(loggingMiddleware, verifyMiddleware, th.Throttle).Then(http.HandlerFunc(server.reseedHandler)))\n\tserver.Handler = mux\n\n\treturn &server\n}\n\nfunc (s *Server) reseedHandler(w http.ResponseWriter, r *http.Request) {\n\tpeer := Peer(r.RemoteAddr)\n\n\tsu3Bytes, err := s.Reseeder.PeerSu3Bytes(peer)\n\tif nil != err {\n\t\thttp.Error(w, \"500 Unable to get SU3\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Disposition\", \"attachment; filename=i2pseeds.su3\")\n\tw.Header().Set(\"Content-Type\", \"application\/octet-stream\")\n\tw.Header().Set(\"Content-Length\", strconv.FormatInt(int64(len(su3Bytes)), 10))\n\n\tio.Copy(w, bytes.NewReader(su3Bytes))\n}\n\nfunc loggingMiddleware(next http.Handler) http.Handler {\n\treturn handlers.CombinedLoggingHandler(os.Stdout, next)\n}\n\nfunc verifyMiddleware(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tif I2P_USER_AGENT != r.UserAgent() {\n\t\t\thttp.Error(w, \"403 Forbidden\", http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}\n\nfunc proxiedMiddleware(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tif prior, ok := r.Header[\"X-Forwarded-For\"]; ok {\n\t\t\tr.RemoteAddr = prior[0]\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}\ndisable keep alivespackage reseed\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com\/PuerkitoBio\/throttled\"\n\t\"github.com\/PuerkitoBio\/throttled\/store\"\n\t\"github.com\/gorilla\/handlers\"\n\t\"github.com\/justinas\/alice\"\n)\n\nconst (\n\tI2P_USER_AGENT = \"Wget\/1.11.4\"\n)\n\ntype Server struct {\n\t*http.Server\n\tReseeder Reseeder\n}\n\nfunc NewServer(prefix string, trustProxy bool) *Server {\n\tconfig := &tls.Config{\n\t\tMinVersion: tls.VersionTLS10,\n\t\tPreferServerCipherSuites: true,\n\t\tCipherSuites: []uint16{\n\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,\n\t\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,\n\t\t\ttls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,\n\t\t\ttls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,\n\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,\n\t\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,\n\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,\n\t\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,\n\t\t\t\/\/ tls.TLS_RSA_WITH_RC4_128_SHA,\n\t\t\ttls.TLS_RSA_WITH_AES_128_CBC_SHA,\n\t\t\ttls.TLS_RSA_WITH_AES_256_CBC_SHA,\n\t\t\ttls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\t\ttls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\t},\n\t}\n\th := &http.Server{TLSConfig: config}\n\tserver := Server{h, nil}\n\n\tth := throttled.RateLimit(throttled.PerHour(120), &throttled.VaryBy{RemoteAddr: true}, store.NewMemStore(10000))\n\n\tmiddlewareChain := alice.New()\n\tif trustProxy {\n\t\tmiddlewareChain = middlewareChain.Append(proxiedMiddleware)\n\t}\n\n\terrorHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tif _, err := w.Write(nil); nil != err {\n\t\t\tlog.Println(err)\n\t\t}\n\t})\n\n\tmux := http.NewServeMux()\n\tmux.Handle(\"\/\", middlewareChain.Append(disableKeepAliveMiddleware, loggingMiddleware).Then(errorHandler))\n\tmux.Handle(prefix+\"\/i2pseeds.su3\", middlewareChain.Append(disableKeepAliveMiddleware, loggingMiddleware, verifyMiddleware, th.Throttle).Then(http.HandlerFunc(server.reseedHandler)))\n\tserver.Handler = mux\n\n\treturn &server\n}\n\nfunc (s *Server) reseedHandler(w http.ResponseWriter, r *http.Request) {\n\tpeer := Peer(r.RemoteAddr)\n\n\tsu3Bytes, err := s.Reseeder.PeerSu3Bytes(peer)\n\tif nil != err {\n\t\thttp.Error(w, \"500 Unable to get SU3\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Disposition\", \"attachment; filename=i2pseeds.su3\")\n\tw.Header().Set(\"Content-Type\", \"application\/octet-stream\")\n\tw.Header().Set(\"Content-Length\", strconv.FormatInt(int64(len(su3Bytes)), 10))\n\n\tio.Copy(w, bytes.NewReader(su3Bytes))\n}\n\nfunc disableKeepAliveMiddleware(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Connection\", \"close\")\n\t\tnext.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}\n\nfunc loggingMiddleware(next http.Handler) http.Handler {\n\treturn handlers.CombinedLoggingHandler(os.Stdout, next)\n}\n\nfunc verifyMiddleware(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tif I2P_USER_AGENT != r.UserAgent() {\n\t\t\thttp.Error(w, \"403 Forbidden\", http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}\n\nfunc proxiedMiddleware(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tif prior, ok := r.Header[\"X-Forwarded-For\"]; ok {\n\t\t\tr.RemoteAddr = prior[0]\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}\n<|endoftext|>"} {"text":"package scm\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/libgit2\/git2go\"\n)\n\n\/\/ RootPath discovers the base directory for a git repo\nfunc RootPath(path ...string) (string, error) {\n\tvar (\n\t\twd string\n\t\tp string\n\t\terr error\n\t)\n\tif len(path) > 0 {\n\t\twd = path[0]\n\t} else {\n\t\twd, err = os.Getwd()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\tp, err = git.Discover(wd, false, []string{})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.ToSlash(filepath.Dir(filepath.Dir(p))), nil\n}\n\n\/\/ CommitIDs returns commit SHA1 IDs starting from the head up to the limit\nfunc CommitIDs(limit int, wd ...string) ([]string, error) {\n\tvar (\n\t\trepo *git.Repository\n\t\tcnt int\n\t\tw *git.RevWalk\n\t\terr error\n\t)\n\tcommits := []string{}\n\n\tif len(wd) > 0 {\n\t\trepo, err = openRepository(wd[0])\n\t} else {\n\t\trepo, err = openRepository()\n\t}\n\n\tif err != nil {\n\t\treturn commits, err\n\t}\n\tdefer repo.Free()\n\n\tw, err = repo.Walk()\n\tif err != nil {\n\t\treturn commits, err\n\t}\n\tdefer w.Free()\n\n\terr = w.PushHead()\n\tif err != nil {\n\t\treturn commits, err\n\t}\n\n\terr = w.Iterate(\n\t\tfunc(commit *git.Commit) bool {\n\t\t\tif limit == cnt {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tcommits = append(commits, commit.Object.Id().String())\n\t\t\tcnt++\n\t\t\treturn true\n\t\t})\n\n\tif err != nil {\n\t\treturn commits, err\n\t}\n\n\treturn commits, nil\n}\n\n\/\/ Commit contains commit details\ntype Commit struct {\n\tID string\n\tOID *git.Oid\n\tSummary string\n\tMessage string\n\tAuthor string\n\tEmail string\n\tWhen time.Time\n\tFiles []string\n}\n\n\/\/ HeadCommit returns the latest commit\nfunc HeadCommit(wd ...string) (Commit, error) {\n\tvar (\n\t\trepo *git.Repository\n\t\terr error\n\t)\n\tcommit := Commit{}\n\n\tif len(wd) > 0 {\n\t\trepo, err = openRepository(wd[0])\n\t} else {\n\t\trepo, err = openRepository()\n\t}\n\tif err != nil {\n\t\treturn commit, err\n\t}\n\tdefer repo.Free()\n\n\theadCommit, err := lookupHeadCommit(repo)\n\tif err != nil {\n\t\tif err == ErrHeadUnborn {\n\t\t\treturn commit, nil\n\t\t}\n\t\treturn commit, err\n\t}\n\tdefer headCommit.Free()\n\n\theadTree, err := headCommit.Tree()\n\tif err != nil {\n\t\treturn commit, err\n\t}\n\tdefer headTree.Free()\n\n\tfiles := []string{}\n\tif headCommit.ParentCount() > 0 {\n\t\tparentTree, err := headCommit.Parent(0).Tree()\n\t\tif err != nil {\n\t\t\treturn commit, err\n\t\t}\n\t\tdefer parentTree.Free()\n\n\t\toptions, err := git.DefaultDiffOptions()\n\t\tif err != nil {\n\t\t\treturn commit, err\n\t\t}\n\n\t\tdiff, err := headCommit.Owner().DiffTreeToTree(parentTree, headTree, &options)\n\t\tif err != nil {\n\t\t\treturn commit, err\n\t\t}\n\t\tdefer diff.Free()\n\n\t\terr = diff.ForEach(\n\t\t\tfunc(file git.DiffDelta, progress float64) (git.DiffForEachHunkCallback, error) {\n\n\t\t\t\tfiles = append(files, filepath.ToSlash(file.NewFile.Path))\n\n\t\t\t\treturn func(hunk git.DiffHunk) (git.DiffForEachLineCallback, error) {\n\t\t\t\t\treturn func(line git.DiffLine) error {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}, nil\n\t\t\t\t}, nil\n\t\t\t}, git.DiffDetailFiles)\n\n\t\tif err != nil {\n\t\t\treturn commit, err\n\t\t}\n\n\t} else {\n\n\t\tpath := \"\"\n\t\terr := headTree.Walk(\n\t\t\tfunc(s string, entry *git.TreeEntry) int {\n\t\t\t\tswitch entry.Filemode {\n\t\t\t\tcase git.FilemodeTree:\n\t\t\t\t\tpath = filepath.ToSlash(entry.Name)\n\t\t\t\tdefault:\n\t\t\t\t\tfiles = append(files, filepath.Join(path, entry.Name))\n\t\t\t\t}\n\t\t\t\treturn 0\n\t\t\t})\n\n\t\tif err != nil {\n\t\t\treturn commit, err\n\t\t}\n\t}\n\n\tcommit = Commit{\n\t\tID: headCommit.Object.Id().String(),\n\t\tOID: headCommit.Object.Id(),\n\t\tSummary: headCommit.Summary(),\n\t\tMessage: headCommit.Message(),\n\t\tAuthor: headCommit.Author().Name,\n\t\tEmail: headCommit.Author().Email,\n\t\tWhen: headCommit.Author().When,\n\t\tFiles: files}\n\n\treturn commit, nil\n}\n\n\/\/ CreateNote creates a git note associated with the head commit\nfunc CreateNote(noteTxt string, nameSpace string, wd ...string) error {\n\tvar (\n\t\trepo *git.Repository\n\t\terr error\n\t)\n\n\tif len(wd) > 0 {\n\t\trepo, err = openRepository(wd[0])\n\t} else {\n\t\trepo, err = openRepository()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer repo.Free()\n\n\theadCommit, err := lookupHeadCommit(repo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsig := &git.Signature{\n\t\tName: headCommit.Author().Name,\n\t\tEmail: headCommit.Author().Email,\n\t\tWhen: headCommit.Author().When,\n\t}\n\n\t_, err = repo.Notes.Create(\"refs\/notes\/\"+nameSpace, sig, sig, headCommit.Id(), noteTxt, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ CommitNote contains a git note's details\ntype CommitNote struct {\n\tID string\n\tOID *git.Oid\n\tSummary string\n\tMessage string\n\tAuthor string\n\tEmail string\n\tWhen time.Time\n\tNote string\n}\n\n\/\/ ReadNote returns a commit note for the SHA1 commit id\nfunc ReadNote(commitID string, nameSpace string, wd ...string) (CommitNote, error) {\n\tvar (\n\t\terr error\n\t\trepo *git.Repository\n\t\tcommit *git.Commit\n\t\tn *git.Note\n\t)\n\n\tif len(wd) > 0 {\n\t\trepo, err = openRepository(wd[0])\n\t} else {\n\t\trepo, err = openRepository()\n\t}\n\n\tif err != nil {\n\t\treturn CommitNote{}, err\n\t}\n\n\tdefer func() {\n\t\tif commit != nil {\n\t\t\tcommit.Free()\n\t\t}\n\t\tif n != nil {\n\t\t\tn.Free()\n\t\t}\n\t\trepo.Free()\n\t}()\n\n\tid, err := git.NewOid(commitID)\n\tif err != nil {\n\t\treturn CommitNote{}, err\n\t}\n\n\tcommit, err = repo.LookupCommit(id)\n\tif err != nil {\n\t\treturn CommitNote{}, err\n\t}\n\n\tvar noteTxt string\n\tn, err = repo.Notes.Read(\"refs\/notes\/\"+nameSpace, id)\n\tif err != nil {\n\t\tnoteTxt = \"\"\n\t} else {\n\t\tnoteTxt = n.Message()\n\t}\n\n\treturn CommitNote{\n\t\tID: commit.Object.Id().String(),\n\t\tOID: commit.Object.Id(),\n\t\tSummary: commit.Summary(),\n\t\tMessage: commit.Message(),\n\t\tAuthor: commit.Author().Name,\n\t\tEmail: commit.Author().Email,\n\t\tWhen: commit.Author().When,\n\t\tNote: noteTxt,\n\t}, nil\n}\n\n\/\/ Config persists git configuration settings\nfunc Config(settings map[string]string, wd ...string) error {\n\tvar (\n\t\terr error\n\t\trepo *git.Repository\n\t\tcfg *git.Config\n\t)\n\n\tif len(wd) > 0 {\n\t\trepo, err = openRepository(wd[0])\n\t} else {\n\t\trepo, err = openRepository()\n\t}\n\n\tcfg, err = repo.Config()\n\tdefer cfg.Free()\n\n\tfor k, v := range settings {\n\t\terr = cfg.SetString(k, v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ SetHooks creates git hooks\nfunc SetHooks(hooks map[string]string, wd ...string) error {\n\tfor hook, command := range hooks {\n\t\tvar (\n\t\t\tp string\n\t\t\terr error\n\t\t)\n\n\t\tif len(wd) > 0 {\n\t\t\tp = wd[0]\n\t\t} else {\n\t\t\tp, err = os.Getwd()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tfp := filepath.Join(p, \".git\", \"hooks\", hook)\n\n\t\tvar output string\n\t\tif _, err := os.Stat(fp); !os.IsNotExist(err) {\n\t\t\tb, err := ioutil.ReadFile(fp)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\toutput = string(b)\n\n\t\t\tif strings.Contains(output, command+\"\\n\") {\n\t\t\t\t\/\/ if file already exists this will make sure it's executable\n\t\t\t\tif err := os.Chmod(fp, 0755); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tif err = ioutil.WriteFile(\n\t\t\tfp, []byte(fmt.Sprintf(\"%s\\n%s\\n\", output, command)), 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ if file already exists this will make sure it's executable\n\t\tif err := os.Chmod(fp, 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Ignore persists paths\/files to ignore for a git repo\nfunc Ignore(ignore string, wd ...string) error {\n\tvar (\n\t\tp string\n\t\terr error\n\t)\n\n\tif len(wd) > 0 {\n\t\tp = wd[0]\n\t} else {\n\t\tp, err = os.Getwd()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfp := filepath.Join(p, \".gitignore\")\n\n\tvar output string\n\tif _, err := os.Stat(fp); !os.IsNotExist(err) {\n\t\tb, err := ioutil.ReadFile(fp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\toutput = string(b)\n\n\t\tif strings.Contains(output, ignore+\"\\n\") {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif err = ioutil.WriteFile(\n\t\tfp, []byte(fmt.Sprintf(\"%s\\n%s\\n\", output, ignore)), 0644); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc openRepository(wd ...string) (*git.Repository, error) {\n\tvar (\n\t\tp string\n\t\terr error\n\t)\n\n\tif len(wd) > 0 {\n\t\tp, err = RootPath(wd[0])\n\t} else {\n\t\tp, err = RootPath()\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trepo, err := git.OpenRepository(p)\n\treturn repo, err\n}\n\nvar (\n\t\/\/ ErrHeadUnborn is raised when there are no commits yet in the git repo\n\tErrHeadUnborn = errors.New(\"Head commit not found\")\n)\n\nfunc lookupHeadCommit(repo *git.Repository) (*git.Commit, error) {\n\n\theadUnborn, err := repo.IsHeadUnborn()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif headUnborn {\n\t\treturn nil, ErrHeadUnborn\n\t}\n\n\theadRef, err := repo.Head()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer headRef.Free()\n\n\tcommit, err := repo.LookupCommit(headRef.Target())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn commit, nil\n}\n\n\/\/ Status contains the git file statuses\ntype Status struct {\n\tFiles []fileStatus\n}\n\n\/\/ NewStatus create a Status struct for a git repo\nfunc NewStatus(wd ...string) (Status, error) {\n\tvar (\n\t\trepo *git.Repository\n\t\terr error\n\t)\n\tstatus := Status{}\n\n\tif len(wd) > 0 {\n\t\trepo, err = openRepository(wd[0])\n\t} else {\n\t\trepo, err = openRepository()\n\t}\n\tif err != nil {\n\t\treturn status, err\n\t}\n\tdefer repo.Free()\n\n\t\/\/TODO: research what status options to set\n\topts := &git.StatusOptions{}\n\topts.Show = git.StatusShowIndexAndWorkdir\n\topts.Flags = git.StatusOptIncludeUntracked | git.StatusOptRenamesHeadToIndex | git.StatusOptSortCaseSensitively\n\tstatusList, err := repo.StatusList(opts)\n\n\tif err != nil {\n\t\treturn status, err\n\t}\n\tdefer statusList.Free()\n\n\tcnt, err := statusList.EntryCount()\n\tif err != nil {\n\t\treturn status, err\n\t}\n\n\tfor i := 0; i < cnt; i++ {\n\t\tentry, err := statusList.ByIndex(i)\n\t\tif err != nil {\n\t\t\treturn status, err\n\t\t}\n\t\tstatus.AddFile(entry)\n\t}\n\n\treturn status, nil\n}\n\n\/\/ AddFile adds a StatusEntry for each file in working and staging directories\nfunc (s *Status) AddFile(e git.StatusEntry) {\n\tvar path string\n\tif e.Status == git.StatusIndexNew ||\n\t\te.Status == git.StatusIndexModified ||\n\t\te.Status == git.StatusIndexDeleted ||\n\t\te.Status == git.StatusIndexRenamed ||\n\t\te.Status == git.StatusIndexTypeChange {\n\t\tpath = filepath.ToSlash(e.HeadToIndex.NewFile.Path)\n\t} else {\n\t\tpath = filepath.ToSlash(e.IndexToWorkdir.NewFile.Path)\n\t}\n\ts.Files = append(s.Files, fileStatus{Path: path, Status: e.Status})\n}\n\n\/\/ HasStaged returns true if there are any files in staging\nfunc (s *Status) HasStaged() bool {\n\tfor _, f := range s.Files {\n\t\tif f.InStaging() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ IsModified returns true if the file is modified in either working or staging\nfunc (s *Status) IsModified(path string, staging bool) bool {\n\tpath = filepath.ToSlash(path)\n\tfor _, f := range s.Files {\n\t\tif path == f.Path && f.InStaging() == staging {\n\t\t\treturn f.IsModified()\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ IsTracked returns true if file is tracked by the git repo\nfunc (s *Status) IsTracked(path string) bool {\n\tpath = filepath.ToSlash(path)\n\tfor _, f := range s.Files {\n\t\tif path == f.Path {\n\t\t\treturn f.IsTracked()\n\t\t}\n\t}\n\treturn false\n}\n\ntype fileStatus struct {\n\tStatus git.Status\n\tPath string\n}\n\n\/\/ InStaging returns true if the file is in staging\nfunc (f fileStatus) InStaging() bool {\n\treturn f.Status == git.StatusIndexNew ||\n\t\tf.Status == git.StatusIndexModified ||\n\t\tf.Status == git.StatusIndexDeleted ||\n\t\tf.Status == git.StatusIndexRenamed ||\n\t\tf.Status == git.StatusIndexTypeChange\n}\n\n\/\/ InWorking returns true if the file is in working\nfunc (f fileStatus) InWorking() bool {\n\treturn f.Status == git.StatusWtModified ||\n\t\tf.Status == git.StatusWtDeleted ||\n\t\tf.Status == git.StatusWtRenamed ||\n\t\tf.Status == git.StatusWtTypeChange\n}\n\n\/\/ IsTracked returns true if the file is tracked by git\nfunc (f fileStatus) IsTracked() bool {\n\treturn f.Status != git.StatusIgnored &&\n\t\tf.Status != git.StatusWtNew\n}\n\n\/\/ IsModified returns true if the file has been modified\nfunc (f fileStatus) IsModified() bool {\n\treturn f.InStaging() || f.InWorking()\n}\nAdd shebang to post commit hookpackage scm\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/libgit2\/git2go\"\n)\n\n\/\/ RootPath discovers the base directory for a git repo\nfunc RootPath(path ...string) (string, error) {\n\tvar (\n\t\twd string\n\t\tp string\n\t\terr error\n\t)\n\tif len(path) > 0 {\n\t\twd = path[0]\n\t} else {\n\t\twd, err = os.Getwd()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\tp, err = git.Discover(wd, false, []string{})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.ToSlash(filepath.Dir(filepath.Dir(p))), nil\n}\n\n\/\/ CommitIDs returns commit SHA1 IDs starting from the head up to the limit\nfunc CommitIDs(limit int, wd ...string) ([]string, error) {\n\tvar (\n\t\trepo *git.Repository\n\t\tcnt int\n\t\tw *git.RevWalk\n\t\terr error\n\t)\n\tcommits := []string{}\n\n\tif len(wd) > 0 {\n\t\trepo, err = openRepository(wd[0])\n\t} else {\n\t\trepo, err = openRepository()\n\t}\n\n\tif err != nil {\n\t\treturn commits, err\n\t}\n\tdefer repo.Free()\n\n\tw, err = repo.Walk()\n\tif err != nil {\n\t\treturn commits, err\n\t}\n\tdefer w.Free()\n\n\terr = w.PushHead()\n\tif err != nil {\n\t\treturn commits, err\n\t}\n\n\terr = w.Iterate(\n\t\tfunc(commit *git.Commit) bool {\n\t\t\tif limit == cnt {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tcommits = append(commits, commit.Object.Id().String())\n\t\t\tcnt++\n\t\t\treturn true\n\t\t})\n\n\tif err != nil {\n\t\treturn commits, err\n\t}\n\n\treturn commits, nil\n}\n\n\/\/ Commit contains commit details\ntype Commit struct {\n\tID string\n\tOID *git.Oid\n\tSummary string\n\tMessage string\n\tAuthor string\n\tEmail string\n\tWhen time.Time\n\tFiles []string\n}\n\n\/\/ HeadCommit returns the latest commit\nfunc HeadCommit(wd ...string) (Commit, error) {\n\tvar (\n\t\trepo *git.Repository\n\t\terr error\n\t)\n\tcommit := Commit{}\n\n\tif len(wd) > 0 {\n\t\trepo, err = openRepository(wd[0])\n\t} else {\n\t\trepo, err = openRepository()\n\t}\n\tif err != nil {\n\t\treturn commit, err\n\t}\n\tdefer repo.Free()\n\n\theadCommit, err := lookupHeadCommit(repo)\n\tif err != nil {\n\t\tif err == ErrHeadUnborn {\n\t\t\treturn commit, nil\n\t\t}\n\t\treturn commit, err\n\t}\n\tdefer headCommit.Free()\n\n\theadTree, err := headCommit.Tree()\n\tif err != nil {\n\t\treturn commit, err\n\t}\n\tdefer headTree.Free()\n\n\tfiles := []string{}\n\tif headCommit.ParentCount() > 0 {\n\t\tparentTree, err := headCommit.Parent(0).Tree()\n\t\tif err != nil {\n\t\t\treturn commit, err\n\t\t}\n\t\tdefer parentTree.Free()\n\n\t\toptions, err := git.DefaultDiffOptions()\n\t\tif err != nil {\n\t\t\treturn commit, err\n\t\t}\n\n\t\tdiff, err := headCommit.Owner().DiffTreeToTree(parentTree, headTree, &options)\n\t\tif err != nil {\n\t\t\treturn commit, err\n\t\t}\n\t\tdefer diff.Free()\n\n\t\terr = diff.ForEach(\n\t\t\tfunc(file git.DiffDelta, progress float64) (git.DiffForEachHunkCallback, error) {\n\n\t\t\t\tfiles = append(files, filepath.ToSlash(file.NewFile.Path))\n\n\t\t\t\treturn func(hunk git.DiffHunk) (git.DiffForEachLineCallback, error) {\n\t\t\t\t\treturn func(line git.DiffLine) error {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}, nil\n\t\t\t\t}, nil\n\t\t\t}, git.DiffDetailFiles)\n\n\t\tif err != nil {\n\t\t\treturn commit, err\n\t\t}\n\n\t} else {\n\n\t\tpath := \"\"\n\t\terr := headTree.Walk(\n\t\t\tfunc(s string, entry *git.TreeEntry) int {\n\t\t\t\tswitch entry.Filemode {\n\t\t\t\tcase git.FilemodeTree:\n\t\t\t\t\tpath = filepath.ToSlash(entry.Name)\n\t\t\t\tdefault:\n\t\t\t\t\tfiles = append(files, filepath.Join(path, entry.Name))\n\t\t\t\t}\n\t\t\t\treturn 0\n\t\t\t})\n\n\t\tif err != nil {\n\t\t\treturn commit, err\n\t\t}\n\t}\n\n\tcommit = Commit{\n\t\tID: headCommit.Object.Id().String(),\n\t\tOID: headCommit.Object.Id(),\n\t\tSummary: headCommit.Summary(),\n\t\tMessage: headCommit.Message(),\n\t\tAuthor: headCommit.Author().Name,\n\t\tEmail: headCommit.Author().Email,\n\t\tWhen: headCommit.Author().When,\n\t\tFiles: files}\n\n\treturn commit, nil\n}\n\n\/\/ CreateNote creates a git note associated with the head commit\nfunc CreateNote(noteTxt string, nameSpace string, wd ...string) error {\n\tvar (\n\t\trepo *git.Repository\n\t\terr error\n\t)\n\n\tif len(wd) > 0 {\n\t\trepo, err = openRepository(wd[0])\n\t} else {\n\t\trepo, err = openRepository()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer repo.Free()\n\n\theadCommit, err := lookupHeadCommit(repo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsig := &git.Signature{\n\t\tName: headCommit.Author().Name,\n\t\tEmail: headCommit.Author().Email,\n\t\tWhen: headCommit.Author().When,\n\t}\n\n\t_, err = repo.Notes.Create(\"refs\/notes\/\"+nameSpace, sig, sig, headCommit.Id(), noteTxt, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ CommitNote contains a git note's details\ntype CommitNote struct {\n\tID string\n\tOID *git.Oid\n\tSummary string\n\tMessage string\n\tAuthor string\n\tEmail string\n\tWhen time.Time\n\tNote string\n}\n\n\/\/ ReadNote returns a commit note for the SHA1 commit id\nfunc ReadNote(commitID string, nameSpace string, wd ...string) (CommitNote, error) {\n\tvar (\n\t\terr error\n\t\trepo *git.Repository\n\t\tcommit *git.Commit\n\t\tn *git.Note\n\t)\n\n\tif len(wd) > 0 {\n\t\trepo, err = openRepository(wd[0])\n\t} else {\n\t\trepo, err = openRepository()\n\t}\n\n\tif err != nil {\n\t\treturn CommitNote{}, err\n\t}\n\n\tdefer func() {\n\t\tif commit != nil {\n\t\t\tcommit.Free()\n\t\t}\n\t\tif n != nil {\n\t\t\tn.Free()\n\t\t}\n\t\trepo.Free()\n\t}()\n\n\tid, err := git.NewOid(commitID)\n\tif err != nil {\n\t\treturn CommitNote{}, err\n\t}\n\n\tcommit, err = repo.LookupCommit(id)\n\tif err != nil {\n\t\treturn CommitNote{}, err\n\t}\n\n\tvar noteTxt string\n\tn, err = repo.Notes.Read(\"refs\/notes\/\"+nameSpace, id)\n\tif err != nil {\n\t\tnoteTxt = \"\"\n\t} else {\n\t\tnoteTxt = n.Message()\n\t}\n\n\treturn CommitNote{\n\t\tID: commit.Object.Id().String(),\n\t\tOID: commit.Object.Id(),\n\t\tSummary: commit.Summary(),\n\t\tMessage: commit.Message(),\n\t\tAuthor: commit.Author().Name,\n\t\tEmail: commit.Author().Email,\n\t\tWhen: commit.Author().When,\n\t\tNote: noteTxt,\n\t}, nil\n}\n\n\/\/ Config persists git configuration settings\nfunc Config(settings map[string]string, wd ...string) error {\n\tvar (\n\t\terr error\n\t\trepo *git.Repository\n\t\tcfg *git.Config\n\t)\n\n\tif len(wd) > 0 {\n\t\trepo, err = openRepository(wd[0])\n\t} else {\n\t\trepo, err = openRepository()\n\t}\n\n\tcfg, err = repo.Config()\n\tdefer cfg.Free()\n\n\tfor k, v := range settings {\n\t\terr = cfg.SetString(k, v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ SetHooks creates git hooks\nfunc SetHooks(hooks map[string]string, wd ...string) error {\n\tconst shebang = \"#!\/bin\/sh\"\n\tfor hook, command := range hooks {\n\t\tvar (\n\t\t\tp string\n\t\t\terr error\n\t\t)\n\n\t\tif len(wd) > 0 {\n\t\t\tp = wd[0]\n\t\t} else {\n\t\t\tp, err = os.Getwd()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tfp := filepath.Join(p, \".git\", \"hooks\", hook)\n\n\t\tvar output string\n\t\tif _, err := os.Stat(fp); !os.IsNotExist(err) {\n\t\t\tb, err := ioutil.ReadFile(fp)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\toutput = string(b)\n\t\t}\n\n\t\tif !strings.Contains(output, shebang) {\n\t\t\toutput = fmt.Sprintf(\"%s\\n%s\", shebang, output)\n\t\t}\n\n\t\tif !strings.Contains(output, command) {\n\t\t\toutput = fmt.Sprintf(\"%s\\n%s\\n\", output, command)\n\t\t}\n\n\t\tif err = ioutil.WriteFile(fp, []byte(output), 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := os.Chmod(fp, 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Ignore persists paths\/files to ignore for a git repo\nfunc Ignore(ignore string, wd ...string) error {\n\tvar (\n\t\tp string\n\t\terr error\n\t)\n\n\tif len(wd) > 0 {\n\t\tp = wd[0]\n\t} else {\n\t\tp, err = os.Getwd()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfp := filepath.Join(p, \".gitignore\")\n\n\tvar output string\n\tif _, err := os.Stat(fp); !os.IsNotExist(err) {\n\t\tb, err := ioutil.ReadFile(fp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\toutput = string(b)\n\n\t\tif strings.Contains(output, ignore+\"\\n\") {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif err = ioutil.WriteFile(\n\t\tfp, []byte(fmt.Sprintf(\"%s\\n%s\\n\", output, ignore)), 0644); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc openRepository(wd ...string) (*git.Repository, error) {\n\tvar (\n\t\tp string\n\t\terr error\n\t)\n\n\tif len(wd) > 0 {\n\t\tp, err = RootPath(wd[0])\n\t} else {\n\t\tp, err = RootPath()\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trepo, err := git.OpenRepository(p)\n\treturn repo, err\n}\n\nvar (\n\t\/\/ ErrHeadUnborn is raised when there are no commits yet in the git repo\n\tErrHeadUnborn = errors.New(\"Head commit not found\")\n)\n\nfunc lookupHeadCommit(repo *git.Repository) (*git.Commit, error) {\n\n\theadUnborn, err := repo.IsHeadUnborn()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif headUnborn {\n\t\treturn nil, ErrHeadUnborn\n\t}\n\n\theadRef, err := repo.Head()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer headRef.Free()\n\n\tcommit, err := repo.LookupCommit(headRef.Target())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn commit, nil\n}\n\n\/\/ Status contains the git file statuses\ntype Status struct {\n\tFiles []fileStatus\n}\n\n\/\/ NewStatus create a Status struct for a git repo\nfunc NewStatus(wd ...string) (Status, error) {\n\tvar (\n\t\trepo *git.Repository\n\t\terr error\n\t)\n\tstatus := Status{}\n\n\tif len(wd) > 0 {\n\t\trepo, err = openRepository(wd[0])\n\t} else {\n\t\trepo, err = openRepository()\n\t}\n\tif err != nil {\n\t\treturn status, err\n\t}\n\tdefer repo.Free()\n\n\t\/\/TODO: research what status options to set\n\topts := &git.StatusOptions{}\n\topts.Show = git.StatusShowIndexAndWorkdir\n\topts.Flags = git.StatusOptIncludeUntracked | git.StatusOptRenamesHeadToIndex | git.StatusOptSortCaseSensitively\n\tstatusList, err := repo.StatusList(opts)\n\n\tif err != nil {\n\t\treturn status, err\n\t}\n\tdefer statusList.Free()\n\n\tcnt, err := statusList.EntryCount()\n\tif err != nil {\n\t\treturn status, err\n\t}\n\n\tfor i := 0; i < cnt; i++ {\n\t\tentry, err := statusList.ByIndex(i)\n\t\tif err != nil {\n\t\t\treturn status, err\n\t\t}\n\t\tstatus.AddFile(entry)\n\t}\n\n\treturn status, nil\n}\n\n\/\/ AddFile adds a StatusEntry for each file in working and staging directories\nfunc (s *Status) AddFile(e git.StatusEntry) {\n\tvar path string\n\tif e.Status == git.StatusIndexNew ||\n\t\te.Status == git.StatusIndexModified ||\n\t\te.Status == git.StatusIndexDeleted ||\n\t\te.Status == git.StatusIndexRenamed ||\n\t\te.Status == git.StatusIndexTypeChange {\n\t\tpath = filepath.ToSlash(e.HeadToIndex.NewFile.Path)\n\t} else {\n\t\tpath = filepath.ToSlash(e.IndexToWorkdir.NewFile.Path)\n\t}\n\ts.Files = append(s.Files, fileStatus{Path: path, Status: e.Status})\n}\n\n\/\/ HasStaged returns true if there are any files in staging\nfunc (s *Status) HasStaged() bool {\n\tfor _, f := range s.Files {\n\t\tif f.InStaging() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ IsModified returns true if the file is modified in either working or staging\nfunc (s *Status) IsModified(path string, staging bool) bool {\n\tpath = filepath.ToSlash(path)\n\tfor _, f := range s.Files {\n\t\tif path == f.Path && f.InStaging() == staging {\n\t\t\treturn f.IsModified()\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ IsTracked returns true if file is tracked by the git repo\nfunc (s *Status) IsTracked(path string) bool {\n\tpath = filepath.ToSlash(path)\n\tfor _, f := range s.Files {\n\t\tif path == f.Path {\n\t\t\treturn f.IsTracked()\n\t\t}\n\t}\n\treturn false\n}\n\ntype fileStatus struct {\n\tStatus git.Status\n\tPath string\n}\n\n\/\/ InStaging returns true if the file is in staging\nfunc (f fileStatus) InStaging() bool {\n\treturn f.Status == git.StatusIndexNew ||\n\t\tf.Status == git.StatusIndexModified ||\n\t\tf.Status == git.StatusIndexDeleted ||\n\t\tf.Status == git.StatusIndexRenamed ||\n\t\tf.Status == git.StatusIndexTypeChange\n}\n\n\/\/ InWorking returns true if the file is in working\nfunc (f fileStatus) InWorking() bool {\n\treturn f.Status == git.StatusWtModified ||\n\t\tf.Status == git.StatusWtDeleted ||\n\t\tf.Status == git.StatusWtRenamed ||\n\t\tf.Status == git.StatusWtTypeChange\n}\n\n\/\/ IsTracked returns true if the file is tracked by git\nfunc (f fileStatus) IsTracked() bool {\n\treturn f.Status != git.StatusIgnored &&\n\t\tf.Status != git.StatusWtNew\n}\n\n\/\/ IsModified returns true if the file has been modified\nfunc (f fileStatus) IsModified() bool {\n\treturn f.InStaging() || f.InWorking()\n}\n<|endoftext|>"} {"text":"package tests\n\nimport (\n\t\"socialapi\/models\"\n\t\"socialapi\/rest\"\n\t\"socialapi\/workers\/common\/tests\"\n\t\"testing\"\n\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n\t\"labix.org\/v2\/mgo\/bson\"\n)\n\nfunc TestmoderationSetting(t *testing.T) {\n\tvar AccountOldId = bson.NewObjectId()\n\tConvey(\"while testing troll mode\", t, func() {\n\t\tConvey(\"First Create User\", func() {\n\t\t\taccount := models.NewAccount()\n\t\t\taccount.OldId = AccountOldId.Hex()\n\t\t\taccount, err := rest.CreateAccount(account)\n\t\t\ttests.ResultedWithNoErrorCheck(account, err)\n\n\t\t\tConvey(\"then we should be able to mark as troll\", func() {\n\t\t\t\tres := rest.MarkAsTroll(account)\n\t\t\t\tSo(res, ShouldBeNil)\n\t\t\t\tConvey(\"shold be able to mark as troll twice\", func() {\n\t\t\t\t\tres := rest.MarkAsTroll(account)\n\t\t\t\t\tSo(res, ShouldBeNil)\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tConvey(\"should be able to unmark as troll\", func() {\n\t\t\t\tres := rest.UnMarkAsTroll(account)\n\t\t\t\tSo(res, ShouldBeNil)\n\t\t\t\tConvey(\"should be able to unmark as troll twice\", func() {\n\t\t\t\t\tres := rest.UnMarkAsTroll(account)\n\t\t\t\t\tSo(res, ShouldBeNil)\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n}\nSocialapi: added integration tests for moderation featurepackage tests\n\nimport (\n\t\"koding\/db\/mongodb\/modelhelper\"\n\t\"math\/rand\"\n\t\"socialapi\/models\"\n\t\"socialapi\/request\"\n\t\"socialapi\/rest\"\n\t\"socialapi\/workers\/common\/runner\"\n\t\"testing\"\n\t\"time\"\n\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n)\n\nfunc TestModeration(t *testing.T) {\n\tr := runner.New(\"test-moderation\")\n\tif err := r.Init(); err != nil {\n\t\tt.Fatalf(\"couldnt start bongo %s\", err.Error())\n\t}\n\tdefer r.Close()\n\n\trand.Seed(time.Now().UTC().UnixNano())\n\tmodelhelper.Initialize(r.Conf.Mongo)\n\tdefer modelhelper.Close()\n\n\tConvey(\"While creating a link to a channel\", t, func() {\n\t\t\/\/ create admin\n\t\tadmin, err := models.CreateAccountInBothDbs()\n\t\tSo(err, ShouldBeNil)\n\t\tSo(admin, ShouldNotBeNil)\n\n\t\t\/\/ create another account\n\t\tacc2, err := models.CreateAccountInBothDbs()\n\t\tSo(err, ShouldBeNil)\n\t\tSo(acc2, ShouldNotBeNil)\n\n\t\t\/\/ create root channel with second acc\n\t\troot, err := rest.CreateChannelWithType(acc2.Id, models.Channel_TYPE_TOPIC)\n\t\tSo(err, ShouldBeNil)\n\t\tSo(root, ShouldNotBeNil)\n\n\t\t\/\/ create leaf channel with second acc\n\t\tleaf, err := rest.CreateChannelWithType(acc2.Id, models.Channel_TYPE_TOPIC)\n\t\tSo(err, ShouldBeNil)\n\t\tSo(leaf, ShouldNotBeNil)\n\n\t\t\/\/ create leaf2 channel with second acc\n\t\tleaf2, err := rest.CreateChannelWithType(acc2.Id, models.Channel_TYPE_TOPIC)\n\t\tSo(err, ShouldBeNil)\n\t\tSo(leaf2, ShouldNotBeNil)\n\n\t\t\/\/ fetch admin's session\n\t\tses, err := models.FetchOrCreateSession(admin.Nick)\n\t\tSo(err, ShouldBeNil)\n\t\tSo(ses, ShouldNotBeNil)\n\n\t\tConvey(\"We should be able to create it first\", func() {\n\t\t\tres, err := rest.CreateLink(root.Id, leaf.Id, ses.ClientId)\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(res, ShouldNotBeNil)\n\n\t\t\tConvey(\"We should get error if we try to create the same link again\", func() {\n\t\t\t\tres, err := rest.CreateLink(root.Id, leaf.Id, ses.ClientId)\n\t\t\t\tSo(err, ShouldNotBeNil)\n\t\t\t\tSo(res, ShouldBeNil)\n\t\t\t})\n\n\t\t\tConvey(\"We should not be able to list with non set root id\", func() {\n\t\t\t\tlinks, err := rest.GetLinks(0, request.NewQuery(), ses.ClientId)\n\t\t\t\tSo(err, ShouldNotBeNil)\n\t\t\t\tSo(err.Error(), ShouldContainSubstring, models.ErrChannelIsNotSet.Error())\n\t\t\t\tSo(links, ShouldBeNil)\n\t\t\t})\n\n\t\t\tConvey(\"We should be able to list the linked channels\", func() {\n\t\t\t\tres, err := rest.CreateLink(root.Id, leaf2.Id, ses.ClientId)\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\tSo(res, ShouldNotBeNil)\n\n\t\t\t\tlinks, err := rest.GetLinks(root.Id, request.NewQuery(), ses.ClientId)\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\tSo(links, ShouldNotBeNil)\n\t\t\t\tSo(len(links), ShouldEqual, 2)\n\t\t\t})\n\n\t\t\tConvey(\"We should be able to unlink created link\", func() {\n\t\t\t\terr = rest.UnLink(root.Id, leaf.Id, ses.ClientId)\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t})\n\n\t\t\tConvey(\"We should not be able to unlink with non-set root id\", func() {\n\t\t\t\terr = rest.UnLink(0, rand.Int63(), ses.ClientId)\n\t\t\t\tSo(err, ShouldNotBeNil)\n\t\t\t\tSo(err.Error(), ShouldContainSubstring, models.ErrChannelIsNotSet.Error())\n\t\t\t})\n\n\t\t\tConvey(\"We should not be able to unlink with non-set leaf id\", func() {\n\t\t\t\terr = rest.UnLink(rand.Int63(), 0, ses.ClientId)\n\t\t\t\tSo(err, ShouldNotBeNil)\n\t\t\t\tSo(err.Error(), ShouldContainSubstring, models.ErrLeafIsNotSet.Error())\n\t\t\t})\n\n\t\t\tConvey(\"We should not be able to unlink non existing leaf\", func() {\n\t\t\t\terr = rest.UnLink(root.Id, rand.Int63(), ses.ClientId)\n\t\t\t\tSo(err, ShouldNotBeNil)\n\t\t\t})\n\n\t\t\tConvey(\"We should not be able to unlink from non existing root\", func() {\n\t\t\t\terr = rest.UnLink(rand.Int63(), leaf.Id, ses.ClientId)\n\t\t\t\tSo(err, ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"We should be able to blacklist channel without any leaves\", func() {\n\t\t\tSo(rest.BlackList(root.Id, ses.ClientId), ShouldBeNil)\n\t\t})\n\n\t\tConvey(\"We should not be able to blacklist channel with leaves\", func() {\n\t\t\tres, err := rest.CreateLink(root.Id, leaf.Id, ses.ClientId)\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(res, ShouldNotBeNil)\n\n\t\t\terr = rest.BlackList(root.Id, ses.ClientId)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Error(), ShouldContainSubstring, models.ErrChannelHasLeaves.Error())\n\t\t})\n\t})\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2016 Martin Hebnes Pedersen (LA5NTA). All rights reserved.\n\/\/ Use of this source code is governed by the MIT-license that can be\n\/\/ found in the LICENSE file.\n\npackage cmsapi\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n)\n\nconst (\n\tRootURL = \"http:\/\/cms.winlink.org:8085\"\n\tPathVersionAdd = \"\/version\/add\"\n\tPathGatewayStatus = \"\/gateway\/status.json\"\n)\n\ntype VersionAdd struct {\n\tCallsign string `json:\"callsign\"`\n\tProgram string `json:\"program\"`\n\tVersion string `json:\"version\"`\n\tComments string `json:\"comments,omitempty\"`\n}\n\nfunc (v VersionAdd) Post() error {\n\tb, _ := json.Marshal(v)\n\tbuf := bytes.NewBuffer(b)\n\n\treq, _ := http.NewRequest(\"POST\", RootURL+PathVersionAdd, buf)\n\treq.Header.Set(\"content-type\", \"application\/json\")\n\treq.Header.Set(\"accept\", \"application\/json\")\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar response map[string]interface{}\n\tif err := json.NewDecoder(resp.Body).Decode(&response); err != nil {\n\t\treturn err\n\t}\n\n\tif errMsg, ok := response[\"ErrorMessage\"]; ok {\n\t\treturn fmt.Errorf(\"Winlink CMS Web Services: %s\", errMsg)\n\t}\n\n\treturn nil\n}\n\ntype GatewayStatus struct {\n\tServerName string `json:\"ServerName\"`\n\tErrorCode int `json:\"ErrorCode\"`\n\tGateways []Gateway `json:\"Gateways\"`\n}\n\ntype Gateway struct {\n\tCallsign string\n\tBaseCallsign string\n\tRequestedMode string\n\tComments string\n\tLastStatus RFC1123Time\n\tLatitude float64\n\tLongitude float64\n\n\tChannels []GatewayChannel `json:\"GatewayChannels\"`\n}\n\ntype GatewayChannel struct {\n\tOperatingHours string\n\tSupportedModes string\n\tFrequency float64\n\tServiceCode string\n\tBaud string\n\tRadioRange string\n\tMode int\n\tGridsquare string\n\tAntenna string\n}\n\ntype RFC1123Time struct{ time.Time }\n\n\/\/ GetGatewayStatus fetches the gateway status list returned by GatewayStatusUrl\n\/\/\n\/\/ mode can be any of [packet, pactor, winmor, robustpacket, allhf or anyall]. Empty is AnyAll.\n\/\/ historyHours is the number of hours of history to include (maximum: 48). If < 1, then API default is used.\n\/\/ serviceCodes defaults to \"PUBLIC\".\nfunc GetGatewayStatus(mode string, historyHours int, serviceCodes ...string) (io.ReadCloser, error) {\n\tswitch {\n\tcase mode == \"\":\n\t\tmode = \"AnyAll\"\n\tcase historyHours > 48:\n\t\thistoryHours = 48\n\tcase len(serviceCodes) == 0:\n\t\tserviceCodes = []string{\"PUBLIC\"}\n\t}\n\n\tparams := url.Values{\"Mode\": {mode}}\n\tif historyHours >= 0 {\n\t\tparams.Add(\"HistoryHours\", fmt.Sprintf(\"%d\", historyHours))\n\t}\n\tfor _, str := range serviceCodes {\n\t\tparams.Add(\"ServiceCodes\", str)\n\t}\n\n\tresp, err := http.PostForm(RootURL+PathGatewayStatus, params)\n\tswitch {\n\tcase err != nil:\n\t\treturn nil, err\n\tcase resp.StatusCode != http.StatusOK:\n\t\treturn nil, fmt.Errorf(\"Unexpected http status '%s'.\", resp.Status)\n\t}\n\n\treturn resp.Body, err\n}\n\nfunc GetGatewayStatusCached(cacheFile string, forceDownload bool) (io.ReadCloser, error) {\n\tif !forceDownload {\n\t\tfile, err := os.Open(cacheFile)\n\t\tif err == nil {\n\t\t\treturn file, nil\n\t\t}\n\t}\n\n\tfile, err := os.Create(cacheFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Println(\"Downloading latest gateway status information...\")\n\tfresh, err := GetGatewayStatus(\"\", 48)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = io.Copy(file, fresh)\n\tfile.Seek(0, 0)\n\n\tif err == nil {\n\t\tlog.Println(\"download succeeded.\")\n\t}\n\n\treturn file, err\n}\n\nfunc (t *RFC1123Time) UnmarshalJSON(b []byte) (err error) {\n\tvar str string\n\tif err = json.Unmarshal(b, &str); err != nil {\n\t\treturn err\n\t}\n\tt.Time, err = time.Parse(time.RFC1123, str)\n\treturn err\n}\nSwitch to api.winlink.org (https)\/\/ Copyright 2016 Martin Hebnes Pedersen (LA5NTA). All rights reserved.\n\/\/ Use of this source code is governed by the MIT-license that can be\n\/\/ found in the LICENSE file.\n\npackage cmsapi\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n)\n\nconst (\n\tRootURL = \"https:\/\/api.winlink.org\"\n\tPathVersionAdd = \"\/version\/add\"\n\tPathGatewayStatus = \"\/gateway\/status.json\"\n\n\t\/\/ Issued December 2017 by the WDT for use with Pat\n\tAccessKey = \"1880278F11684B358F36845615BD039A\"\n)\n\ntype VersionAdd struct {\n\tCallsign string `json:\"callsign\"`\n\tProgram string `json:\"program\"`\n\tVersion string `json:\"version\"`\n\tComments string `json:\"comments,omitempty\"`\n}\n\nfunc (v VersionAdd) Post() error {\n\tb, _ := json.Marshal(v)\n\tbuf := bytes.NewBuffer(b)\n\n\turl := RootURL + PathVersionAdd + \"?key=\" + AccessKey\n\treq, _ := http.NewRequest(\"POST\", url, buf)\n\treq.Header.Set(\"content-type\", \"application\/json\")\n\treq.Header.Set(\"accept\", \"application\/json\")\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar response map[string]interface{}\n\tif err := json.NewDecoder(resp.Body).Decode(&response); err != nil {\n\t\treturn err\n\t}\n\n\tif errMsg, ok := response[\"ErrorMessage\"]; ok {\n\t\treturn fmt.Errorf(\"Winlink CMS Web Services: %s\", errMsg)\n\t}\n\n\treturn nil\n}\n\ntype GatewayStatus struct {\n\tServerName string `json:\"ServerName\"`\n\tErrorCode int `json:\"ErrorCode\"`\n\tGateways []Gateway `json:\"Gateways\"`\n}\n\ntype Gateway struct {\n\tCallsign string\n\tBaseCallsign string\n\tRequestedMode string\n\tComments string\n\tLastStatus RFC1123Time\n\tLatitude float64\n\tLongitude float64\n\n\tChannels []GatewayChannel `json:\"GatewayChannels\"`\n}\n\ntype GatewayChannel struct {\n\tOperatingHours string\n\tSupportedModes string\n\tFrequency float64\n\tServiceCode string\n\tBaud string\n\tRadioRange string\n\tMode int\n\tGridsquare string\n\tAntenna string\n}\n\ntype RFC1123Time struct{ time.Time }\n\n\/\/ GetGatewayStatus fetches the gateway status list returned by GatewayStatusUrl\n\/\/\n\/\/ mode can be any of [packet, pactor, winmor, robustpacket, allhf or anyall]. Empty is AnyAll.\n\/\/ historyHours is the number of hours of history to include (maximum: 48). If < 1, then API default is used.\n\/\/ serviceCodes defaults to \"PUBLIC\".\nfunc GetGatewayStatus(mode string, historyHours int, serviceCodes ...string) (io.ReadCloser, error) {\n\tswitch {\n\tcase mode == \"\":\n\t\tmode = \"AnyAll\"\n\tcase historyHours > 48:\n\t\thistoryHours = 48\n\tcase len(serviceCodes) == 0:\n\t\tserviceCodes = []string{\"PUBLIC\"}\n\t}\n\n\tparams := url.Values{\"Mode\": {mode}}\n\tparams.Set(\"key\", AccessKey)\n\tif historyHours >= 0 {\n\t\tparams.Add(\"HistoryHours\", fmt.Sprintf(\"%d\", historyHours))\n\t}\n\tfor _, str := range serviceCodes {\n\t\tparams.Add(\"ServiceCodes\", str)\n\t}\n\n\tresp, err := http.PostForm(RootURL+PathGatewayStatus, params)\n\tswitch {\n\tcase err != nil:\n\t\treturn nil, err\n\tcase resp.StatusCode != http.StatusOK:\n\t\treturn nil, fmt.Errorf(\"Unexpected http status '%s'.\", resp.Status)\n\t}\n\n\treturn resp.Body, err\n}\n\nfunc GetGatewayStatusCached(cacheFile string, forceDownload bool) (io.ReadCloser, error) {\n\tif !forceDownload {\n\t\tfile, err := os.Open(cacheFile)\n\t\tif err == nil {\n\t\t\treturn file, nil\n\t\t}\n\t}\n\n\tfile, err := os.Create(cacheFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Println(\"Downloading latest gateway status information...\")\n\tfresh, err := GetGatewayStatus(\"\", 48)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = io.Copy(file, fresh)\n\tfile.Seek(0, 0)\n\n\tif err == nil {\n\t\tlog.Println(\"download succeeded.\")\n\t}\n\n\treturn file, err\n}\n\nfunc (t *RFC1123Time) UnmarshalJSON(b []byte) (err error) {\n\tvar str string\n\tif err = json.Unmarshal(b, &str); err != nil {\n\t\treturn err\n\t}\n\tt.Time, err = time.Parse(time.RFC1123, str)\n\treturn err\n}\n<|endoftext|>"} {"text":"\/\/ RTLAMR - An rtl-sdr receiver for smart meters operating in the 900MHz ISM band.\n\/\/ Copyright (C) 2014 Douglas Hall\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as published\n\/\/ by the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Affero General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with this program. If not, see .\n\npackage scm\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com\/bemasher\/rtlamr\/crc\"\n\t\"github.com\/bemasher\/rtlamr\/decode\"\n\t\"github.com\/bemasher\/rtlamr\/parse\"\n)\n\nfunc NewPacketConfig(symbolLength int) (cfg decode.PacketConfig) {\n\tcfg.DataRate = 32768\n\n\tcfg.SymbolLength = symbolLength\n\tcfg.SymbolLength2 = cfg.SymbolLength << 1\n\n\tcfg.SampleRate = cfg.DataRate * cfg.SymbolLength\n\n\tcfg.PreambleSymbols = 21\n\tcfg.PacketSymbols = 96\n\n\tcfg.PreambleLength = cfg.PreambleSymbols * cfg.SymbolLength2\n\tcfg.PacketLength = cfg.PacketSymbols * cfg.SymbolLength2\n\n\tcfg.BlockSize = decode.NextPowerOf2(cfg.PreambleLength)\n\tcfg.BlockSize2 = cfg.BlockSize << 1\n\n\tcfg.BufferLength = cfg.PacketLength + cfg.BlockSize\n\n\tcfg.Preamble = \"111110010101001100000\"\n\n\treturn\n}\n\ntype Parser struct {\n\tdecode.Decoder\n\tcrc.CRC\n}\n\nfunc NewParser(symbolLength int, fastMag bool) (p Parser) {\n\tp.Decoder = decode.NewDecoder(NewPacketConfig(symbolLength), fastMag)\n\tp.CRC = crc.NewCRC(\"BCH\", 0, 0x6F63, 0)\n\treturn\n}\n\nfunc (p Parser) Dec() decode.Decoder {\n\treturn p.Decoder\n}\n\nfunc (p Parser) Cfg() decode.PacketConfig {\n\treturn p.Decoder.Cfg\n}\n\nfunc (p Parser) Parse(indices []int) (msgs []parse.Message) {\n\tseen := make(map[string]bool)\n\n\tfor _, pkt := range p.Decoder.Slice(indices) {\n\t\tif s := string(pkt); !seen[s] {\n\t\t\tseen[s] = true\n\t\t} else {\n\t\t\tcontinue\n\t\t}\n\n\t\tdata := parse.NewDataFromBytes(pkt)\n\n\t\t\/\/ If the packet is too short, bail.\n\t\tif l := len(data.Bytes); l < 12 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ If the checksum fails, bail.\n\t\tif p.Checksum(data.Bytes[2:12]) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tertid, _ := strconv.ParseUint(data.Bits[21:23]+data.Bits[56:80], 2, 32)\n\t\terttype, _ := strconv.ParseUint(data.Bits[26:30], 2, 8)\n\t\ttamperphy, _ := strconv.ParseUint(data.Bits[24:26], 2, 8)\n\t\ttamperenc, _ := strconv.ParseUint(data.Bits[30:32], 2, 8)\n\t\tconsumption, _ := strconv.ParseUint(data.Bits[32:56], 2, 32)\n\t\tchecksum, _ := strconv.ParseUint(data.Bits[80:96], 2, 16)\n\n\t\tvar scm SCM\n\n\t\tscm.ID = uint32(ertid)\n\t\tscm.Type = uint8(erttype)\n\t\tscm.TamperPhy = uint8(tamperphy)\n\t\tscm.TamperEnc = uint8(tamperenc)\n\t\tscm.Consumption = uint32(consumption)\n\t\tscm.Checksum = uint16(checksum)\n\n\t\t\/\/ If the meter id is 0, bail.\n\t\tif scm.ID == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tmsgs = append(msgs, scm)\n\t}\n\n\treturn\n}\n\n\/\/ Standard Consumption Message\ntype SCM struct {\n\tID uint32 `xml:\",attr\"`\n\tType uint8 `xml:\",attr\"`\n\tTamperPhy uint8 `xml:\",attr\"`\n\tTamperEnc uint8 `xml:\",attr\"`\n\tConsumption uint32 `xml:\",attr\"`\n\tChecksum uint16 `xml:\",attr\"`\n}\n\nfunc (scm SCM) MsgType() string {\n\treturn \"SCM\"\n}\n\nfunc (scm SCM) MeterID() uint32 {\n\treturn scm.ID\n}\n\nfunc (scm SCM) MeterType() uint8 {\n\treturn scm.Type\n}\n\nfunc (scm SCM) String() string {\n\treturn fmt.Sprintf(\"{ID:%8d Type:%2d Tamper:{Phy:%02X Enc:%02X} Consumption:%8d CRC:0x%04X}\",\n\t\tscm.ID, scm.Type, scm.TamperPhy, scm.TamperEnc, scm.Consumption, scm.Checksum,\n\t)\n}\n\nfunc (scm SCM) Record() (r []string) {\n\tr = append(r, strconv.FormatUint(uint64(scm.ID), 10))\n\tr = append(r, strconv.FormatUint(uint64(scm.Type), 10))\n\tr = append(r, \"0x\"+strconv.FormatUint(uint64(scm.TamperPhy), 16))\n\tr = append(r, \"0x\"+strconv.FormatUint(uint64(scm.TamperEnc), 16))\n\tr = append(r, strconv.FormatUint(uint64(scm.Consumption), 10))\n\tr = append(r, \"0x\"+strconv.FormatUint(uint64(scm.Checksum), 16))\n\n\treturn\n}\nSet actual field lengths.\/\/ RTLAMR - An rtl-sdr receiver for smart meters operating in the 900MHz ISM band.\n\/\/ Copyright (C) 2014 Douglas Hall\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as published\n\/\/ by the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Affero General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with this program. If not, see .\n\npackage scm\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com\/bemasher\/rtlamr\/crc\"\n\t\"github.com\/bemasher\/rtlamr\/decode\"\n\t\"github.com\/bemasher\/rtlamr\/parse\"\n)\n\nfunc NewPacketConfig(symbolLength int) (cfg decode.PacketConfig) {\n\tcfg.DataRate = 32768\n\n\tcfg.SymbolLength = symbolLength\n\tcfg.SymbolLength2 = cfg.SymbolLength << 1\n\n\tcfg.SampleRate = cfg.DataRate * cfg.SymbolLength\n\n\tcfg.PreambleSymbols = 21\n\tcfg.PacketSymbols = 96\n\n\tcfg.PreambleLength = cfg.PreambleSymbols * cfg.SymbolLength2\n\tcfg.PacketLength = cfg.PacketSymbols * cfg.SymbolLength2\n\n\tcfg.BlockSize = decode.NextPowerOf2(cfg.PreambleLength)\n\tcfg.BlockSize2 = cfg.BlockSize << 1\n\n\tcfg.BufferLength = cfg.PacketLength + cfg.BlockSize\n\n\tcfg.Preamble = \"111110010101001100000\"\n\n\treturn\n}\n\ntype Parser struct {\n\tdecode.Decoder\n\tcrc.CRC\n}\n\nfunc NewParser(symbolLength int, fastMag bool) (p Parser) {\n\tp.Decoder = decode.NewDecoder(NewPacketConfig(symbolLength), fastMag)\n\tp.CRC = crc.NewCRC(\"BCH\", 0, 0x6F63, 0)\n\treturn\n}\n\nfunc (p Parser) Dec() decode.Decoder {\n\treturn p.Decoder\n}\n\nfunc (p Parser) Cfg() decode.PacketConfig {\n\treturn p.Decoder.Cfg\n}\n\nfunc (p Parser) Parse(indices []int) (msgs []parse.Message) {\n\tseen := make(map[string]bool)\n\n\tfor _, pkt := range p.Decoder.Slice(indices) {\n\t\tif s := string(pkt); !seen[s] {\n\t\t\tseen[s] = true\n\t\t} else {\n\t\t\tcontinue\n\t\t}\n\n\t\tdata := parse.NewDataFromBytes(pkt)\n\n\t\t\/\/ If the packet is too short, bail.\n\t\tif l := len(data.Bytes); l < 12 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ If the checksum fails, bail.\n\t\tif p.Checksum(data.Bytes[2:12]) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tertid, _ := strconv.ParseUint(data.Bits[21:23]+data.Bits[56:80], 2, 26)\n\t\terttype, _ := strconv.ParseUint(data.Bits[26:30], 2, 4)\n\t\ttamperphy, _ := strconv.ParseUint(data.Bits[24:26], 2, 2)\n\t\ttamperenc, _ := strconv.ParseUint(data.Bits[30:32], 2, 2)\n\t\tconsumption, _ := strconv.ParseUint(data.Bits[32:56], 2, 24)\n\t\tchecksum, _ := strconv.ParseUint(data.Bits[80:96], 2, 16)\n\n\t\tvar scm SCM\n\n\t\tscm.ID = uint32(ertid)\n\t\tscm.Type = uint8(erttype)\n\t\tscm.TamperPhy = uint8(tamperphy)\n\t\tscm.TamperEnc = uint8(tamperenc)\n\t\tscm.Consumption = uint32(consumption)\n\t\tscm.Checksum = uint16(checksum)\n\n\t\t\/\/ If the meter id is 0, bail.\n\t\tif scm.ID == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tmsgs = append(msgs, scm)\n\t}\n\n\treturn\n}\n\n\/\/ Standard Consumption Message\ntype SCM struct {\n\tID uint32 `xml:\",attr\"`\n\tType uint8 `xml:\",attr\"`\n\tTamperPhy uint8 `xml:\",attr\"`\n\tTamperEnc uint8 `xml:\",attr\"`\n\tConsumption uint32 `xml:\",attr\"`\n\tChecksum uint16 `xml:\",attr\"`\n}\n\nfunc (scm SCM) MsgType() string {\n\treturn \"SCM\"\n}\n\nfunc (scm SCM) MeterID() uint32 {\n\treturn scm.ID\n}\n\nfunc (scm SCM) MeterType() uint8 {\n\treturn scm.Type\n}\n\nfunc (scm SCM) String() string {\n\treturn fmt.Sprintf(\"{ID:%8d Type:%2d Tamper:{Phy:%02X Enc:%02X} Consumption:%8d CRC:0x%04X}\",\n\t\tscm.ID, scm.Type, scm.TamperPhy, scm.TamperEnc, scm.Consumption, scm.Checksum,\n\t)\n}\n\nfunc (scm SCM) Record() (r []string) {\n\tr = append(r, strconv.FormatUint(uint64(scm.ID), 10))\n\tr = append(r, strconv.FormatUint(uint64(scm.Type), 10))\n\tr = append(r, \"0x\"+strconv.FormatUint(uint64(scm.TamperPhy), 16))\n\tr = append(r, \"0x\"+strconv.FormatUint(uint64(scm.TamperEnc), 16))\n\tr = append(r, strconv.FormatUint(uint64(scm.Consumption), 10))\n\tr = append(r, \"0x\"+strconv.FormatUint(uint64(scm.Checksum), 16))\n\n\treturn\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2014 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage driver\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/google\/pprof\/internal\/binutils\"\n\t\"github.com\/google\/pprof\/internal\/plugin\"\n)\n\ntype source struct {\n\tSources []string\n\tExecName string\n\tBuildID string\n\tBase []string\n\tDiffBase bool\n\tNormalize bool\n\n\tSeconds int\n\tTimeout int\n\tSymbolize string\n\tHTTPHostport string\n\tComment string\n}\n\n\/\/ Parse parses the command lines through the specified flags package\n\/\/ and returns the source of the profile and optionally the command\n\/\/ for the kind of report to generate (nil for interactive use).\nfunc parseFlags(o *plugin.Options) (*source, []string, error) {\n\tflag := o.Flagset\n\t\/\/ Comparisons.\n\tflagBase := flag.StringList(\"base\", \"\", \"Source for base profile for profile subtraction\")\n\tflagDiffBase := flag.StringList(\"diff_base\", \"\", \"Source for diff base profile for comparison\")\n\t\/\/ Source options.\n\tflagSymbolize := flag.String(\"symbolize\", \"\", \"Options for profile symbolization\")\n\tflagBuildID := flag.String(\"buildid\", \"\", \"Override build id for first mapping\")\n\tflagTimeout := flag.Int(\"timeout\", -1, \"Timeout in seconds for fetching a profile\")\n\tflagAddComment := flag.String(\"add_comment\", \"\", \"Annotation string to record in the profile\")\n\t\/\/ CPU profile options\n\tflagSeconds := flag.Int(\"seconds\", -1, \"Length of time for dynamic profiles\")\n\t\/\/ Heap profile options\n\tflagInUseSpace := flag.Bool(\"inuse_space\", false, \"Display in-use memory size\")\n\tflagInUseObjects := flag.Bool(\"inuse_objects\", false, \"Display in-use object counts\")\n\tflagAllocSpace := flag.Bool(\"alloc_space\", false, \"Display allocated memory size\")\n\tflagAllocObjects := flag.Bool(\"alloc_objects\", false, \"Display allocated object counts\")\n\t\/\/ Contention profile options\n\tflagTotalDelay := flag.Bool(\"total_delay\", false, \"Display total delay at each region\")\n\tflagContentions := flag.Bool(\"contentions\", false, \"Display number of delays at each region\")\n\tflagMeanDelay := flag.Bool(\"mean_delay\", false, \"Display mean delay at each region\")\n\tflagTools := flag.String(\"tools\", os.Getenv(\"PPROF_TOOLS\"), \"Path for object tool pathnames\")\n\n\tflagHTTP := flag.String(\"http\", \"\", \"Present interactive web based UI at the specified http host:port\")\n\n\t\/\/ Flags used during command processing\n\tinstalledFlags := installFlags(flag)\n\n\tflagCommands := make(map[string]*bool)\n\tflagParamCommands := make(map[string]*string)\n\tfor name, cmd := range pprofCommands {\n\t\tif cmd.hasParam {\n\t\t\tflagParamCommands[name] = flag.String(name, \"\", \"Generate a report in \"+name+\" format, matching regexp\")\n\t\t} else {\n\t\t\tflagCommands[name] = flag.Bool(name, false, \"Generate a report in \"+name+\" format\")\n\t\t}\n\t}\n\n\targs := flag.Parse(func() {\n\t\to.UI.Print(usageMsgHdr +\n\t\t\tusage(true) +\n\t\t\tusageMsgSrc +\n\t\t\tflag.ExtraUsage() +\n\t\t\tusageMsgVars)\n\t})\n\tif len(args) == 0 {\n\t\treturn nil, nil, errors.New(\"no profile source specified\")\n\t}\n\n\tvar execName string\n\t\/\/ Recognize first argument as an executable or buildid override.\n\tif len(args) > 1 {\n\t\targ0 := args[0]\n\t\tif file, err := o.Obj.Open(arg0, 0, ^uint64(0), 0); err == nil {\n\t\t\tfile.Close()\n\t\t\texecName = arg0\n\t\t\targs = args[1:]\n\t\t} else if *flagBuildID == \"\" && isBuildID(arg0) {\n\t\t\t*flagBuildID = arg0\n\t\t\targs = args[1:]\n\t\t}\n\t}\n\n\t\/\/ Report conflicting options\n\tif err := updateFlags(installedFlags); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcmd, err := outputFormat(flagCommands, flagParamCommands)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif cmd != nil && *flagHTTP != \"\" {\n\t\treturn nil, nil, errors.New(\"-http is not compatible with an output format on the command line\")\n\t}\n\n\tsi := pprofVariables[\"sample_index\"].value\n\tsi = sampleIndex(flagTotalDelay, si, \"delay\", \"-total_delay\", o.UI)\n\tsi = sampleIndex(flagMeanDelay, si, \"delay\", \"-mean_delay\", o.UI)\n\tsi = sampleIndex(flagContentions, si, \"contentions\", \"-contentions\", o.UI)\n\tsi = sampleIndex(flagInUseSpace, si, \"inuse_space\", \"-inuse_space\", o.UI)\n\tsi = sampleIndex(flagInUseObjects, si, \"inuse_objects\", \"-inuse_objects\", o.UI)\n\tsi = sampleIndex(flagAllocSpace, si, \"alloc_space\", \"-alloc_space\", o.UI)\n\tsi = sampleIndex(flagAllocObjects, si, \"alloc_objects\", \"-alloc_objects\", o.UI)\n\tpprofVariables.set(\"sample_index\", si)\n\n\tif *flagMeanDelay {\n\t\tpprofVariables.set(\"mean\", \"true\")\n\t}\n\n\tsource := &source{\n\t\tSources: args,\n\t\tExecName: execName,\n\t\tBuildID: *flagBuildID,\n\t\tSeconds: *flagSeconds,\n\t\tTimeout: *flagTimeout,\n\t\tSymbolize: *flagSymbolize,\n\t\tHTTPHostport: *flagHTTP,\n\t\tComment: *flagAddComment,\n\t}\n\n\tif err := source.addBaseProfiles(*flagBase, *flagDiffBase); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tnormalize := pprofVariables[\"normalize\"].boolValue()\n\tif normalize && len(source.Base) == 0 {\n\t\treturn nil, nil, errors.New(\"must have base profile to normalize by\")\n\t}\n\tsource.Normalize = normalize\n\n\tif bu, ok := o.Obj.(*binutils.Binutils); ok {\n\t\tbu.SetTools(*flagTools)\n\t}\n\treturn source, cmd, nil\n}\n\n\/\/ addBaseProfiles adds the list of base profiles or diff base profiles to\n\/\/ the source. This function will return an error if both base and diff base\n\/\/ profiles are specified.\nfunc (source *source) addBaseProfiles(flagBase, flagDiffBase []*string) error {\n\tbase, diffBase := dropEmpty(flagBase), dropEmpty(flagDiffBase)\n\tif len(base) > 0 && len(diffBase) > 0 {\n\t\treturn errors.New(\"-base and -diff_base flags cannot both be specified\")\n\t}\n\n\tsource.Base = base\n\tif len(diffBase) > 0 {\n\t\tsource.Base, source.DiffBase = diffBase, true\n\t}\n\treturn nil\n}\n\n\/\/ dropEmpty list takes a slice of string pointers, and outputs a slice of\n\/\/ non-empty strings associated with the flag.\nfunc dropEmpty(list []*string) []string {\n\tvar l []string\n\tfor _, s := range list {\n\t\tif *s != \"\" {\n\t\t\tl = append(l, *s)\n\t\t}\n\t}\n\treturn l\n}\n\n\/\/ installFlags creates command line flags for pprof variables.\nfunc installFlags(flag plugin.FlagSet) flagsInstalled {\n\tf := flagsInstalled{\n\t\tints: make(map[string]*int),\n\t\tbools: make(map[string]*bool),\n\t\tfloats: make(map[string]*float64),\n\t\tstrings: make(map[string]*string),\n\t}\n\tfor n, v := range pprofVariables {\n\t\tswitch v.kind {\n\t\tcase boolKind:\n\t\t\tif v.group != \"\" {\n\t\t\t\t\/\/ Set all radio variables to false to identify conflicts.\n\t\t\t\tf.bools[n] = flag.Bool(n, false, v.help)\n\t\t\t} else {\n\t\t\t\tf.bools[n] = flag.Bool(n, v.boolValue(), v.help)\n\t\t\t}\n\t\tcase intKind:\n\t\t\tf.ints[n] = flag.Int(n, v.intValue(), v.help)\n\t\tcase floatKind:\n\t\t\tf.floats[n] = flag.Float64(n, v.floatValue(), v.help)\n\t\tcase stringKind:\n\t\t\tf.strings[n] = flag.String(n, v.value, v.help)\n\t\t}\n\t}\n\treturn f\n}\n\n\/\/ updateFlags updates the pprof variables according to the flags\n\/\/ parsed in the command line.\nfunc updateFlags(f flagsInstalled) error {\n\tvars := pprofVariables\n\tgroups := map[string]string{}\n\tfor n, v := range f.bools {\n\t\tvars.set(n, fmt.Sprint(*v))\n\t\tif *v {\n\t\t\tg := vars[n].group\n\t\t\tif g != \"\" && groups[g] != \"\" {\n\t\t\t\treturn fmt.Errorf(\"conflicting options %q and %q set\", n, groups[g])\n\t\t\t}\n\t\t\tgroups[g] = n\n\t\t}\n\t}\n\tfor n, v := range f.ints {\n\t\tvars.set(n, fmt.Sprint(*v))\n\t}\n\tfor n, v := range f.floats {\n\t\tvars.set(n, fmt.Sprint(*v))\n\t}\n\tfor n, v := range f.strings {\n\t\tvars.set(n, *v)\n\t}\n\treturn nil\n}\n\ntype flagsInstalled struct {\n\tints map[string]*int\n\tbools map[string]*bool\n\tfloats map[string]*float64\n\tstrings map[string]*string\n}\n\n\/\/ isBuildID determines if the profile may contain a build ID, by\n\/\/ checking that it is a string of hex digits.\nfunc isBuildID(id string) bool {\n\treturn strings.Trim(id, \"0123456789abcdefABCDEF\") == \"\"\n}\n\nfunc sampleIndex(flag *bool, si string, sampleType, option string, ui plugin.UI) string {\n\tif *flag {\n\t\tif si == \"\" {\n\t\t\treturn sampleType\n\t\t}\n\t\tui.PrintErr(\"Multiple value selections, ignoring \", option)\n\t}\n\treturn si\n}\n\nfunc outputFormat(bcmd map[string]*bool, acmd map[string]*string) (cmd []string, err error) {\n\tfor n, b := range bcmd {\n\t\tif *b {\n\t\t\tif cmd != nil {\n\t\t\t\treturn nil, errors.New(\"must set at most one output format\")\n\t\t\t}\n\t\t\tcmd = []string{n}\n\t\t}\n\t}\n\tfor n, s := range acmd {\n\t\tif *s != \"\" {\n\t\t\tif cmd != nil {\n\t\t\t\treturn nil, errors.New(\"must set at most one output format\")\n\t\t\t}\n\t\t\tcmd = []string{n, *s}\n\t\t}\n\t}\n\treturn cmd, nil\n}\n\nvar usageMsgHdr = `usage:\n\nProduce output in the specified format.\n\n pprof [options] [binary] ...\n\nOmit the format to get an interactive shell whose commands can be used\nto generate various views of a profile\n\n pprof [options] [binary] ...\n\nOmit the format and provide the \"-http\" flag to get an interactive web\ninterface at the specified host:port that can be used to navigate through\nvarious views of a profile.\n\n pprof -http [host]:[port] [options] [binary] ...\n\nDetails:\n`\n\nvar usageMsgSrc = \"\\n\\n\" +\n\t\" Source options:\\n\" +\n\t\" -seconds Duration for time-based profile collection\\n\" +\n\t\" -timeout Timeout in seconds for profile collection\\n\" +\n\t\" -buildid Override build id for main binary\\n\" +\n\t\" -add_comment Free-form annotation to add to the profile\\n\" +\n\t\" Displayed on some reports or with pprof -comments\\n\" +\n\t\" -base source Source of profile to use as baseline\\n\" +\n\t\" profile.pb.gz Profile in compressed protobuf format\\n\" +\n\t\" legacy_profile Profile in legacy pprof format\\n\" +\n\t\" http:\/\/host\/profile URL for profile handler to retrieve\\n\" +\n\t\" -symbolize= Controls source of symbol information\\n\" +\n\t\" none Do not attempt symbolization\\n\" +\n\t\" local Examine only local binaries\\n\" +\n\t\" fastlocal Only get function names from local binaries\\n\" +\n\t\" remote Do not examine local binaries\\n\" +\n\t\" force Force re-symbolization\\n\" +\n\t\" Binary Local path or build id of binary for symbolization\\n\"\n\nvar usageMsgVars = \"\\n\\n\" +\n\t\" Misc options:\\n\" +\n\t\" -http Provide web based interface at host:port.\\n\" +\n\t\" Host is optional and 'localhost' by default.\\n\" +\n\t\" Port is optional and a randomly available port by default.\\n\" +\n\t\" -tools Search path for object tools\\n\" +\n\t\"\\n\" +\n\t\" Legacy convenience options:\\n\" +\n\t\" -inuse_space Same as -sample_index=inuse_space\\n\" +\n\t\" -inuse_objects Same as -sample_index=inuse_objects\\n\" +\n\t\" -alloc_space Same as -sample_index=alloc_space\\n\" +\n\t\" -alloc_objects Same as -sample_index=alloc_objects\\n\" +\n\t\" -total_delay Same as -sample_index=delay\\n\" +\n\t\" -contentions Same as -sample_index=contentions\\n\" +\n\t\" -mean_delay Same as -mean -sample_index=delay\\n\" +\n\t\"\\n\" +\n\t\" Environment Variables:\\n\" +\n\t\" PPROF_TMPDIR Location for saved profiles (default $HOME\/pprof)\\n\" +\n\t\" PPROF_TOOLS Search path for object-level tools\\n\" +\n\t\" PPROF_BINARY_PATH Search path for local binary files\\n\" +\n\t\" default: $HOME\/pprof\/binaries\\n\" +\n\t\" searches $name, $path, $buildid\/$name, $path\/$buildid\\n\" +\n\t\" * On Windows, %USERPROFILE% is used instead of $HOME\"\ndocument diff_base flag (#384) (#390)\/\/ Copyright 2014 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage driver\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/google\/pprof\/internal\/binutils\"\n\t\"github.com\/google\/pprof\/internal\/plugin\"\n)\n\ntype source struct {\n\tSources []string\n\tExecName string\n\tBuildID string\n\tBase []string\n\tDiffBase bool\n\tNormalize bool\n\n\tSeconds int\n\tTimeout int\n\tSymbolize string\n\tHTTPHostport string\n\tComment string\n}\n\n\/\/ Parse parses the command lines through the specified flags package\n\/\/ and returns the source of the profile and optionally the command\n\/\/ for the kind of report to generate (nil for interactive use).\nfunc parseFlags(o *plugin.Options) (*source, []string, error) {\n\tflag := o.Flagset\n\t\/\/ Comparisons.\n\tflagDiffBase := flag.StringList(\"diff_base\", \"\", \"Source of base profile for comparison\")\n\tflagBase := flag.StringList(\"base\", \"\", \"Source of base profile for profile subtraction\")\n\t\/\/ Source options.\n\tflagSymbolize := flag.String(\"symbolize\", \"\", \"Options for profile symbolization\")\n\tflagBuildID := flag.String(\"buildid\", \"\", \"Override build id for first mapping\")\n\tflagTimeout := flag.Int(\"timeout\", -1, \"Timeout in seconds for fetching a profile\")\n\tflagAddComment := flag.String(\"add_comment\", \"\", \"Annotation string to record in the profile\")\n\t\/\/ CPU profile options\n\tflagSeconds := flag.Int(\"seconds\", -1, \"Length of time for dynamic profiles\")\n\t\/\/ Heap profile options\n\tflagInUseSpace := flag.Bool(\"inuse_space\", false, \"Display in-use memory size\")\n\tflagInUseObjects := flag.Bool(\"inuse_objects\", false, \"Display in-use object counts\")\n\tflagAllocSpace := flag.Bool(\"alloc_space\", false, \"Display allocated memory size\")\n\tflagAllocObjects := flag.Bool(\"alloc_objects\", false, \"Display allocated object counts\")\n\t\/\/ Contention profile options\n\tflagTotalDelay := flag.Bool(\"total_delay\", false, \"Display total delay at each region\")\n\tflagContentions := flag.Bool(\"contentions\", false, \"Display number of delays at each region\")\n\tflagMeanDelay := flag.Bool(\"mean_delay\", false, \"Display mean delay at each region\")\n\tflagTools := flag.String(\"tools\", os.Getenv(\"PPROF_TOOLS\"), \"Path for object tool pathnames\")\n\n\tflagHTTP := flag.String(\"http\", \"\", \"Present interactive web based UI at the specified http host:port\")\n\n\t\/\/ Flags used during command processing\n\tinstalledFlags := installFlags(flag)\n\n\tflagCommands := make(map[string]*bool)\n\tflagParamCommands := make(map[string]*string)\n\tfor name, cmd := range pprofCommands {\n\t\tif cmd.hasParam {\n\t\t\tflagParamCommands[name] = flag.String(name, \"\", \"Generate a report in \"+name+\" format, matching regexp\")\n\t\t} else {\n\t\t\tflagCommands[name] = flag.Bool(name, false, \"Generate a report in \"+name+\" format\")\n\t\t}\n\t}\n\n\targs := flag.Parse(func() {\n\t\to.UI.Print(usageMsgHdr +\n\t\t\tusage(true) +\n\t\t\tusageMsgSrc +\n\t\t\tflag.ExtraUsage() +\n\t\t\tusageMsgVars)\n\t})\n\tif len(args) == 0 {\n\t\treturn nil, nil, errors.New(\"no profile source specified\")\n\t}\n\n\tvar execName string\n\t\/\/ Recognize first argument as an executable or buildid override.\n\tif len(args) > 1 {\n\t\targ0 := args[0]\n\t\tif file, err := o.Obj.Open(arg0, 0, ^uint64(0), 0); err == nil {\n\t\t\tfile.Close()\n\t\t\texecName = arg0\n\t\t\targs = args[1:]\n\t\t} else if *flagBuildID == \"\" && isBuildID(arg0) {\n\t\t\t*flagBuildID = arg0\n\t\t\targs = args[1:]\n\t\t}\n\t}\n\n\t\/\/ Report conflicting options\n\tif err := updateFlags(installedFlags); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcmd, err := outputFormat(flagCommands, flagParamCommands)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif cmd != nil && *flagHTTP != \"\" {\n\t\treturn nil, nil, errors.New(\"-http is not compatible with an output format on the command line\")\n\t}\n\n\tsi := pprofVariables[\"sample_index\"].value\n\tsi = sampleIndex(flagTotalDelay, si, \"delay\", \"-total_delay\", o.UI)\n\tsi = sampleIndex(flagMeanDelay, si, \"delay\", \"-mean_delay\", o.UI)\n\tsi = sampleIndex(flagContentions, si, \"contentions\", \"-contentions\", o.UI)\n\tsi = sampleIndex(flagInUseSpace, si, \"inuse_space\", \"-inuse_space\", o.UI)\n\tsi = sampleIndex(flagInUseObjects, si, \"inuse_objects\", \"-inuse_objects\", o.UI)\n\tsi = sampleIndex(flagAllocSpace, si, \"alloc_space\", \"-alloc_space\", o.UI)\n\tsi = sampleIndex(flagAllocObjects, si, \"alloc_objects\", \"-alloc_objects\", o.UI)\n\tpprofVariables.set(\"sample_index\", si)\n\n\tif *flagMeanDelay {\n\t\tpprofVariables.set(\"mean\", \"true\")\n\t}\n\n\tsource := &source{\n\t\tSources: args,\n\t\tExecName: execName,\n\t\tBuildID: *flagBuildID,\n\t\tSeconds: *flagSeconds,\n\t\tTimeout: *flagTimeout,\n\t\tSymbolize: *flagSymbolize,\n\t\tHTTPHostport: *flagHTTP,\n\t\tComment: *flagAddComment,\n\t}\n\n\tif err := source.addBaseProfiles(*flagBase, *flagDiffBase); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tnormalize := pprofVariables[\"normalize\"].boolValue()\n\tif normalize && len(source.Base) == 0 {\n\t\treturn nil, nil, errors.New(\"must have base profile to normalize by\")\n\t}\n\tsource.Normalize = normalize\n\n\tif bu, ok := o.Obj.(*binutils.Binutils); ok {\n\t\tbu.SetTools(*flagTools)\n\t}\n\treturn source, cmd, nil\n}\n\n\/\/ addBaseProfiles adds the list of base profiles or diff base profiles to\n\/\/ the source. This function will return an error if both base and diff base\n\/\/ profiles are specified.\nfunc (source *source) addBaseProfiles(flagBase, flagDiffBase []*string) error {\n\tbase, diffBase := dropEmpty(flagBase), dropEmpty(flagDiffBase)\n\tif len(base) > 0 && len(diffBase) > 0 {\n\t\treturn errors.New(\"-base and -diff_base flags cannot both be specified\")\n\t}\n\n\tsource.Base = base\n\tif len(diffBase) > 0 {\n\t\tsource.Base, source.DiffBase = diffBase, true\n\t}\n\treturn nil\n}\n\n\/\/ dropEmpty list takes a slice of string pointers, and outputs a slice of\n\/\/ non-empty strings associated with the flag.\nfunc dropEmpty(list []*string) []string {\n\tvar l []string\n\tfor _, s := range list {\n\t\tif *s != \"\" {\n\t\t\tl = append(l, *s)\n\t\t}\n\t}\n\treturn l\n}\n\n\/\/ installFlags creates command line flags for pprof variables.\nfunc installFlags(flag plugin.FlagSet) flagsInstalled {\n\tf := flagsInstalled{\n\t\tints: make(map[string]*int),\n\t\tbools: make(map[string]*bool),\n\t\tfloats: make(map[string]*float64),\n\t\tstrings: make(map[string]*string),\n\t}\n\tfor n, v := range pprofVariables {\n\t\tswitch v.kind {\n\t\tcase boolKind:\n\t\t\tif v.group != \"\" {\n\t\t\t\t\/\/ Set all radio variables to false to identify conflicts.\n\t\t\t\tf.bools[n] = flag.Bool(n, false, v.help)\n\t\t\t} else {\n\t\t\t\tf.bools[n] = flag.Bool(n, v.boolValue(), v.help)\n\t\t\t}\n\t\tcase intKind:\n\t\t\tf.ints[n] = flag.Int(n, v.intValue(), v.help)\n\t\tcase floatKind:\n\t\t\tf.floats[n] = flag.Float64(n, v.floatValue(), v.help)\n\t\tcase stringKind:\n\t\t\tf.strings[n] = flag.String(n, v.value, v.help)\n\t\t}\n\t}\n\treturn f\n}\n\n\/\/ updateFlags updates the pprof variables according to the flags\n\/\/ parsed in the command line.\nfunc updateFlags(f flagsInstalled) error {\n\tvars := pprofVariables\n\tgroups := map[string]string{}\n\tfor n, v := range f.bools {\n\t\tvars.set(n, fmt.Sprint(*v))\n\t\tif *v {\n\t\t\tg := vars[n].group\n\t\t\tif g != \"\" && groups[g] != \"\" {\n\t\t\t\treturn fmt.Errorf(\"conflicting options %q and %q set\", n, groups[g])\n\t\t\t}\n\t\t\tgroups[g] = n\n\t\t}\n\t}\n\tfor n, v := range f.ints {\n\t\tvars.set(n, fmt.Sprint(*v))\n\t}\n\tfor n, v := range f.floats {\n\t\tvars.set(n, fmt.Sprint(*v))\n\t}\n\tfor n, v := range f.strings {\n\t\tvars.set(n, *v)\n\t}\n\treturn nil\n}\n\ntype flagsInstalled struct {\n\tints map[string]*int\n\tbools map[string]*bool\n\tfloats map[string]*float64\n\tstrings map[string]*string\n}\n\n\/\/ isBuildID determines if the profile may contain a build ID, by\n\/\/ checking that it is a string of hex digits.\nfunc isBuildID(id string) bool {\n\treturn strings.Trim(id, \"0123456789abcdefABCDEF\") == \"\"\n}\n\nfunc sampleIndex(flag *bool, si string, sampleType, option string, ui plugin.UI) string {\n\tif *flag {\n\t\tif si == \"\" {\n\t\t\treturn sampleType\n\t\t}\n\t\tui.PrintErr(\"Multiple value selections, ignoring \", option)\n\t}\n\treturn si\n}\n\nfunc outputFormat(bcmd map[string]*bool, acmd map[string]*string) (cmd []string, err error) {\n\tfor n, b := range bcmd {\n\t\tif *b {\n\t\t\tif cmd != nil {\n\t\t\t\treturn nil, errors.New(\"must set at most one output format\")\n\t\t\t}\n\t\t\tcmd = []string{n}\n\t\t}\n\t}\n\tfor n, s := range acmd {\n\t\tif *s != \"\" {\n\t\t\tif cmd != nil {\n\t\t\t\treturn nil, errors.New(\"must set at most one output format\")\n\t\t\t}\n\t\t\tcmd = []string{n, *s}\n\t\t}\n\t}\n\treturn cmd, nil\n}\n\nvar usageMsgHdr = `usage:\n\nProduce output in the specified format.\n\n pprof [options] [binary] ...\n\nOmit the format to get an interactive shell whose commands can be used\nto generate various views of a profile\n\n pprof [options] [binary] ...\n\nOmit the format and provide the \"-http\" flag to get an interactive web\ninterface at the specified host:port that can be used to navigate through\nvarious views of a profile.\n\n pprof -http [host]:[port] [options] [binary] ...\n\nDetails:\n`\n\nvar usageMsgSrc = \"\\n\\n\" +\n\t\" Source options:\\n\" +\n\t\" -seconds Duration for time-based profile collection\\n\" +\n\t\" -timeout Timeout in seconds for profile collection\\n\" +\n\t\" -buildid Override build id for main binary\\n\" +\n\t\" -add_comment Free-form annotation to add to the profile\\n\" +\n\t\" Displayed on some reports or with pprof -comments\\n\" +\n\t\" -diff_base source Source of base profile for comparison\\n\" +\n\t\" -base source Source of base profile for profile subtraction\\n\" +\n\t\" profile.pb.gz Profile in compressed protobuf format\\n\" +\n\t\" legacy_profile Profile in legacy pprof format\\n\" +\n\t\" http:\/\/host\/profile URL for profile handler to retrieve\\n\" +\n\t\" -symbolize= Controls source of symbol information\\n\" +\n\t\" none Do not attempt symbolization\\n\" +\n\t\" local Examine only local binaries\\n\" +\n\t\" fastlocal Only get function names from local binaries\\n\" +\n\t\" remote Do not examine local binaries\\n\" +\n\t\" force Force re-symbolization\\n\" +\n\t\" Binary Local path or build id of binary for symbolization\\n\"\n\nvar usageMsgVars = \"\\n\\n\" +\n\t\" Misc options:\\n\" +\n\t\" -http Provide web based interface at host:port.\\n\" +\n\t\" Host is optional and 'localhost' by default.\\n\" +\n\t\" Port is optional and a randomly available port by default.\\n\" +\n\t\" -tools Search path for object tools\\n\" +\n\t\"\\n\" +\n\t\" Legacy convenience options:\\n\" +\n\t\" -inuse_space Same as -sample_index=inuse_space\\n\" +\n\t\" -inuse_objects Same as -sample_index=inuse_objects\\n\" +\n\t\" -alloc_space Same as -sample_index=alloc_space\\n\" +\n\t\" -alloc_objects Same as -sample_index=alloc_objects\\n\" +\n\t\" -total_delay Same as -sample_index=delay\\n\" +\n\t\" -contentions Same as -sample_index=contentions\\n\" +\n\t\" -mean_delay Same as -mean -sample_index=delay\\n\" +\n\t\"\\n\" +\n\t\" Environment Variables:\\n\" +\n\t\" PPROF_TMPDIR Location for saved profiles (default $HOME\/pprof)\\n\" +\n\t\" PPROF_TOOLS Search path for object-level tools\\n\" +\n\t\" PPROF_BINARY_PATH Search path for local binary files\\n\" +\n\t\" default: $HOME\/pprof\/binaries\\n\" +\n\t\" searches $name, $path, $buildid\/$name, $path\/$buildid\\n\" +\n\t\" * On Windows, %USERPROFILE% is used instead of $HOME\"\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build darwin,!arm,!arm64 linux windows\n\/\/ +build !js\n\/\/ +build !android\n\/\/ +build !ios\n\npackage ui\n\nimport (\n\t\"errors\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com\/go-gl\/glfw\/v3.1\/glfw\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/graphics\/opengl\"\n)\n\ntype userInterface struct {\n\twindow *glfw.Window\n\twidth int\n\theight int\n\tscale int\n\tdeviceScale float64\n\tframebufferScale int\n\tcontext *opengl.Context\n\tfuncs chan func()\n\tsizeChanged bool\n}\n\nvar currentUI *userInterface\n\nfunc CurrentUI() UserInterface {\n\treturn currentUI\n}\n\nfunc initialize() (*opengl.Context, error) {\n\truntime.LockOSThread()\n\n\tif err := glfw.Init(); err != nil {\n\t\treturn nil, err\n\t}\n\tglfw.WindowHint(glfw.Visible, glfw.False)\n\tglfw.WindowHint(glfw.Resizable, glfw.False)\n\tglfw.WindowHint(glfw.ContextVersionMajor, 2)\n\tglfw.WindowHint(glfw.ContextVersionMinor, 1)\n\n\t\/\/ As start, create an window with temporary size to create OpenGL context thread.\n\twindow, err := glfw.CreateWindow(16, 16, \"\", nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tu := &userInterface{\n\t\twindow: window,\n\t\tfuncs: make(chan func()),\n\t\tsizeChanged: true,\n\t}\n\tch := make(chan error)\n\tgo func() {\n\t\truntime.LockOSThread()\n\t\tu.window.MakeContextCurrent()\n\t\tglfw.SwapInterval(1)\n\t\tvar err error\n\t\tu.context, err = opengl.NewContext()\n\t\tif err != nil {\n\t\t\tch <- err\n\t\t}\n\t\tclose(ch)\n\t\tu.context.Loop()\n\t}()\n\tcurrentUI = u\n\tif err := <-ch; err != nil {\n\t\treturn nil, err\n\t}\n\tif err := u.context.Init(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn u.context, nil\n}\n\nfunc Main() error {\n\treturn currentUI.main()\n}\n\nfunc (u *userInterface) main() error {\n\t\/\/ TODO: Check this is done on the main thread.\n\tfor f := range u.funcs {\n\t\tf()\n\t}\n\treturn nil\n}\n\nfunc (u *userInterface) runOnMainThread(f func()) {\n\tif u.funcs == nil {\n\t\t\/\/ already closed\n\t\treturn\n\t}\n\tch := make(chan struct{})\n\tu.funcs <- func() {\n\t\tf()\n\t\tclose(ch)\n\t}\n\t<-ch\n}\n\nfunc (u *userInterface) SetScreenSize(width, height int) bool {\n\tr := false\n\tu.runOnMainThread(func() {\n\t\tr = u.setScreenSize(width, height, u.scale)\n\t})\n\treturn r\n}\n\nfunc (u *userInterface) SetScreenScale(scale int) bool {\n\tr := false\n\tu.runOnMainThread(func() {\n\t\tr = u.setScreenSize(u.width, u.height, scale)\n\t})\n\treturn r\n}\n\nfunc (u *userInterface) ScreenScale() int {\n\ts := 0\n\tu.runOnMainThread(func() {\n\t\ts = u.scale\n\t})\n\treturn s\n}\n\nfunc (u *userInterface) Start(width, height, scale int, title string) error {\n\tvar err error\n\tu.runOnMainThread(func() {\n\t\tm := glfw.GetPrimaryMonitor()\n\t\tv := m.GetVideoMode()\n\t\tmw, _ := m.GetPhysicalSize()\n\t\tu.deviceScale = deviceScale()\n\t\tu.framebufferScale = 1\n\n\t\tif !u.setScreenSize(width, height, scale) {\n\t\t\terr = errors.New(\"ui: Fail to set the screen size\")\n\t\t\treturn\n\t\t}\n\t\tu.window.SetTitle(title)\n\t\tu.window.Show()\n\n\t\tx := (v.Width - width*u.windowScale()) \/ 2\n\t\ty := (v.Height - height*u.windowScale()) \/ 3\n\t\tu.window.SetPos(x, y)\n\t})\n\treturn err\n}\n\nfunc (u *userInterface) windowScale() int {\n\treturn u.scale * int(u.deviceScale)\n}\n\nfunc (u *userInterface) actualScreenScale() int {\n\treturn u.windowScale() * u.framebufferScale\n}\n\nfunc (u *userInterface) pollEvents() error {\n\tglfw.PollEvents()\n\treturn currentInput.update(u.window, u.windowScale())\n}\n\nfunc (u *userInterface) Update() (interface{}, error) {\n\tshouldClose := false\n\tu.runOnMainThread(func() {\n\t\tshouldClose = u.window.ShouldClose()\n\t})\n\tif shouldClose {\n\t\treturn CloseEvent{}, nil\n\t}\n\n\tvar screenSizeEvent *ScreenSizeEvent\n\tu.runOnMainThread(func() {\n\t\tif !u.sizeChanged {\n\t\t\treturn\n\t\t}\n\t\tu.sizeChanged = false\n\t\tscreenSizeEvent = &ScreenSizeEvent{\n\t\t\tWidth: u.width,\n\t\t\tHeight: u.height,\n\t\t\tScale: u.scale,\n\t\t\tActualScale: u.actualScreenScale(),\n\t\t}\n\t})\n\tif screenSizeEvent != nil {\n\t\treturn *screenSizeEvent, nil\n\t}\n\n\tvar ferr error\n\tu.runOnMainThread(func() {\n\t\tif err := u.pollEvents(); err != nil {\n\t\t\tferr = err\n\t\t\treturn\n\t\t}\n\t\tfor u.window.GetAttrib(glfw.Focused) == 0 {\n\t\t\t\/\/ Wait for an arbitrary period to avoid busy loop.\n\t\t\ttime.Sleep(time.Second \/ 60)\n\t\t\tif err := u.pollEvents(); err != nil {\n\t\t\t\tferr = err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif u.window.ShouldClose() {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n\tif ferr != nil {\n\t\treturn nil, ferr\n\t}\n\t\/\/ Dummy channel\n\tch := make(chan struct{}, 1)\n\treturn RenderEvent{ch}, nil\n}\n\nfunc (u *userInterface) Terminate() error {\n\tu.runOnMainThread(func() {\n\t\tglfw.Terminate()\n\t})\n\tclose(u.funcs)\n\tu.funcs = nil\n\treturn nil\n}\n\nfunc (u *userInterface) SwapBuffers() error {\n\tvar err error\n\tu.runOnMainThread(func() {\n\t\terr = u.swapBuffers()\n\t})\n\treturn err\n}\n\nfunc (u *userInterface) swapBuffers() error {\n\t\/\/ The bound framebuffer must be the default one (0) before swapping buffers.\n\tif err := u.context.BindScreenFramebuffer(); err != nil {\n\t\treturn err\n\t}\n\tu.context.RunOnContextThread(func() error {\n\t\tu.window.SwapBuffers()\n\t\treturn nil\n\t})\n\treturn nil\n}\n\nfunc (u *userInterface) FinishRendering() error {\n\treturn nil\n}\n\nfunc (u *userInterface) setScreenSize(width, height, scale int) bool {\n\tif u.width == width && u.height == height && u.scale == scale {\n\t\treturn false\n\t}\n\n\t\/\/ u.scale should be set first since this affects windowScale().\n\torigScale := u.scale\n\tu.scale = scale\n\n\t\/\/ On Windows, giving a too small width doesn't call a callback (#165).\n\t\/\/ To prevent hanging up, return asap if the width is too small.\n\t\/\/ 252 is an arbitrary number and I guess this is small enough.\n\tconst minWindowWidth = 252\n\tif width*u.actualScreenScale() < minWindowWidth {\n\t\tu.scale = origScale\n\t\treturn false\n\t}\n\tu.width = width\n\tu.height = height\n\n\t\/\/ To make sure the current existing framebuffers are rendered,\n\t\/\/ swap buffers here before SetSize is called.\n\tu.swapBuffers()\n\n\tch := make(chan struct{})\n\twindow := u.window\n\twindow.SetFramebufferSizeCallback(func(_ *glfw.Window, width, height int) {\n\t\twindow.SetFramebufferSizeCallback(nil)\n\t\tclose(ch)\n\t})\n\twindow.SetSize(width*u.windowScale(), height*u.windowScale())\n\nevent:\n\tfor {\n\t\tglfw.PollEvents()\n\t\tselect {\n\t\tcase <-ch:\n\t\t\tbreak event\n\t\tdefault:\n\t\t}\n\t}\n\t\/\/ This is usually 1, but sometimes more than 1 (e.g. Retina Mac)\n\tfw, _ := window.GetFramebufferSize()\n\tu.framebufferScale = fw \/ width \/ u.windowScale()\n\tu.sizeChanged = true\n\treturn true\n}\nui: Bug fix: unused variable\/\/ Copyright 2015 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build darwin,!arm,!arm64 linux windows\n\/\/ +build !js\n\/\/ +build !android\n\/\/ +build !ios\n\npackage ui\n\nimport (\n\t\"errors\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com\/go-gl\/glfw\/v3.1\/glfw\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/graphics\/opengl\"\n)\n\ntype userInterface struct {\n\twindow *glfw.Window\n\twidth int\n\theight int\n\tscale int\n\tdeviceScale float64\n\tframebufferScale int\n\tcontext *opengl.Context\n\tfuncs chan func()\n\tsizeChanged bool\n}\n\nvar currentUI *userInterface\n\nfunc CurrentUI() UserInterface {\n\treturn currentUI\n}\n\nfunc initialize() (*opengl.Context, error) {\n\truntime.LockOSThread()\n\n\tif err := glfw.Init(); err != nil {\n\t\treturn nil, err\n\t}\n\tglfw.WindowHint(glfw.Visible, glfw.False)\n\tglfw.WindowHint(glfw.Resizable, glfw.False)\n\tglfw.WindowHint(glfw.ContextVersionMajor, 2)\n\tglfw.WindowHint(glfw.ContextVersionMinor, 1)\n\n\t\/\/ As start, create an window with temporary size to create OpenGL context thread.\n\twindow, err := glfw.CreateWindow(16, 16, \"\", nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tu := &userInterface{\n\t\twindow: window,\n\t\tfuncs: make(chan func()),\n\t\tsizeChanged: true,\n\t}\n\tch := make(chan error)\n\tgo func() {\n\t\truntime.LockOSThread()\n\t\tu.window.MakeContextCurrent()\n\t\tglfw.SwapInterval(1)\n\t\tvar err error\n\t\tu.context, err = opengl.NewContext()\n\t\tif err != nil {\n\t\t\tch <- err\n\t\t}\n\t\tclose(ch)\n\t\tu.context.Loop()\n\t}()\n\tcurrentUI = u\n\tif err := <-ch; err != nil {\n\t\treturn nil, err\n\t}\n\tif err := u.context.Init(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn u.context, nil\n}\n\nfunc Main() error {\n\treturn currentUI.main()\n}\n\nfunc (u *userInterface) main() error {\n\t\/\/ TODO: Check this is done on the main thread.\n\tfor f := range u.funcs {\n\t\tf()\n\t}\n\treturn nil\n}\n\nfunc (u *userInterface) runOnMainThread(f func()) {\n\tif u.funcs == nil {\n\t\t\/\/ already closed\n\t\treturn\n\t}\n\tch := make(chan struct{})\n\tu.funcs <- func() {\n\t\tf()\n\t\tclose(ch)\n\t}\n\t<-ch\n}\n\nfunc (u *userInterface) SetScreenSize(width, height int) bool {\n\tr := false\n\tu.runOnMainThread(func() {\n\t\tr = u.setScreenSize(width, height, u.scale)\n\t})\n\treturn r\n}\n\nfunc (u *userInterface) SetScreenScale(scale int) bool {\n\tr := false\n\tu.runOnMainThread(func() {\n\t\tr = u.setScreenSize(u.width, u.height, scale)\n\t})\n\treturn r\n}\n\nfunc (u *userInterface) ScreenScale() int {\n\ts := 0\n\tu.runOnMainThread(func() {\n\t\ts = u.scale\n\t})\n\treturn s\n}\n\nfunc (u *userInterface) Start(width, height, scale int, title string) error {\n\tvar err error\n\tu.runOnMainThread(func() {\n\t\tm := glfw.GetPrimaryMonitor()\n\t\tv := m.GetVideoMode()\n\t\tu.deviceScale = deviceScale()\n\t\tu.framebufferScale = 1\n\n\t\tif !u.setScreenSize(width, height, scale) {\n\t\t\terr = errors.New(\"ui: Fail to set the screen size\")\n\t\t\treturn\n\t\t}\n\t\tu.window.SetTitle(title)\n\t\tu.window.Show()\n\n\t\tx := (v.Width - width*u.windowScale()) \/ 2\n\t\ty := (v.Height - height*u.windowScale()) \/ 3\n\t\tu.window.SetPos(x, y)\n\t})\n\treturn err\n}\n\nfunc (u *userInterface) windowScale() int {\n\treturn u.scale * int(u.deviceScale)\n}\n\nfunc (u *userInterface) actualScreenScale() int {\n\treturn u.windowScale() * u.framebufferScale\n}\n\nfunc (u *userInterface) pollEvents() error {\n\tglfw.PollEvents()\n\treturn currentInput.update(u.window, u.windowScale())\n}\n\nfunc (u *userInterface) Update() (interface{}, error) {\n\tshouldClose := false\n\tu.runOnMainThread(func() {\n\t\tshouldClose = u.window.ShouldClose()\n\t})\n\tif shouldClose {\n\t\treturn CloseEvent{}, nil\n\t}\n\n\tvar screenSizeEvent *ScreenSizeEvent\n\tu.runOnMainThread(func() {\n\t\tif !u.sizeChanged {\n\t\t\treturn\n\t\t}\n\t\tu.sizeChanged = false\n\t\tscreenSizeEvent = &ScreenSizeEvent{\n\t\t\tWidth: u.width,\n\t\t\tHeight: u.height,\n\t\t\tScale: u.scale,\n\t\t\tActualScale: u.actualScreenScale(),\n\t\t}\n\t})\n\tif screenSizeEvent != nil {\n\t\treturn *screenSizeEvent, nil\n\t}\n\n\tvar ferr error\n\tu.runOnMainThread(func() {\n\t\tif err := u.pollEvents(); err != nil {\n\t\t\tferr = err\n\t\t\treturn\n\t\t}\n\t\tfor u.window.GetAttrib(glfw.Focused) == 0 {\n\t\t\t\/\/ Wait for an arbitrary period to avoid busy loop.\n\t\t\ttime.Sleep(time.Second \/ 60)\n\t\t\tif err := u.pollEvents(); err != nil {\n\t\t\t\tferr = err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif u.window.ShouldClose() {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n\tif ferr != nil {\n\t\treturn nil, ferr\n\t}\n\t\/\/ Dummy channel\n\tch := make(chan struct{}, 1)\n\treturn RenderEvent{ch}, nil\n}\n\nfunc (u *userInterface) Terminate() error {\n\tu.runOnMainThread(func() {\n\t\tglfw.Terminate()\n\t})\n\tclose(u.funcs)\n\tu.funcs = nil\n\treturn nil\n}\n\nfunc (u *userInterface) SwapBuffers() error {\n\tvar err error\n\tu.runOnMainThread(func() {\n\t\terr = u.swapBuffers()\n\t})\n\treturn err\n}\n\nfunc (u *userInterface) swapBuffers() error {\n\t\/\/ The bound framebuffer must be the default one (0) before swapping buffers.\n\tif err := u.context.BindScreenFramebuffer(); err != nil {\n\t\treturn err\n\t}\n\tu.context.RunOnContextThread(func() error {\n\t\tu.window.SwapBuffers()\n\t\treturn nil\n\t})\n\treturn nil\n}\n\nfunc (u *userInterface) FinishRendering() error {\n\treturn nil\n}\n\nfunc (u *userInterface) setScreenSize(width, height, scale int) bool {\n\tif u.width == width && u.height == height && u.scale == scale {\n\t\treturn false\n\t}\n\n\t\/\/ u.scale should be set first since this affects windowScale().\n\torigScale := u.scale\n\tu.scale = scale\n\n\t\/\/ On Windows, giving a too small width doesn't call a callback (#165).\n\t\/\/ To prevent hanging up, return asap if the width is too small.\n\t\/\/ 252 is an arbitrary number and I guess this is small enough.\n\tconst minWindowWidth = 252\n\tif width*u.actualScreenScale() < minWindowWidth {\n\t\tu.scale = origScale\n\t\treturn false\n\t}\n\tu.width = width\n\tu.height = height\n\n\t\/\/ To make sure the current existing framebuffers are rendered,\n\t\/\/ swap buffers here before SetSize is called.\n\tu.swapBuffers()\n\n\tch := make(chan struct{})\n\twindow := u.window\n\twindow.SetFramebufferSizeCallback(func(_ *glfw.Window, width, height int) {\n\t\twindow.SetFramebufferSizeCallback(nil)\n\t\tclose(ch)\n\t})\n\twindow.SetSize(width*u.windowScale(), height*u.windowScale())\n\nevent:\n\tfor {\n\t\tglfw.PollEvents()\n\t\tselect {\n\t\tcase <-ch:\n\t\t\tbreak event\n\t\tdefault:\n\t\t}\n\t}\n\t\/\/ This is usually 1, but sometimes more than 1 (e.g. Retina Mac)\n\tfw, _ := window.GetFramebufferSize()\n\tu.framebufferScale = fw \/ width \/ u.windowScale()\n\tu.sizeChanged = true\n\treturn true\n}\n<|endoftext|>"} {"text":"package mock\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"sync\"\n\n\t\"github.com\/influxdata\/flux\"\n\t\"github.com\/influxdata\/influxdb\/kit\/check\"\n\t\"github.com\/influxdata\/influxdb\/query\"\n)\n\n\/\/ ProxyQueryService mocks the idep QueryService for testing.\ntype ProxyQueryService struct {\n\tQueryF func(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error)\n}\n\n\/\/ Query writes the results of the query request.\nfunc (s *ProxyQueryService) Query(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) {\n\treturn s.QueryF(ctx, w, req)\n}\n\nfunc (s *ProxyQueryService) Check(ctx context.Context) check.Response {\n\treturn check.Response{Name: \"Mock Proxy Query Service\", Status: check.StatusPass}\n}\n\n\/\/ QueryService mocks the idep QueryService for testing.\ntype QueryService struct {\n\tQueryF func(ctx context.Context, req *query.Request) (flux.ResultIterator, error)\n}\n\n\/\/ Query writes the results of the query request.\nfunc (s *QueryService) Query(ctx context.Context, req *query.Request) (flux.ResultIterator, error) {\n\treturn s.QueryF(ctx, req)\n}\n\nfunc (s *QueryService) Check(ctx context.Context) check.Response {\n\treturn check.Response{Name: \"Mock Query Service\", Status: check.StatusPass}\n}\n\n\/\/ AsyncQueryService mocks the idep QueryService for testing.\ntype AsyncQueryService struct {\n\tQueryF func(ctx context.Context, req *query.Request) (flux.Query, error)\n}\n\n\/\/ Query writes the results of the query request.\nfunc (s *AsyncQueryService) Query(ctx context.Context, req *query.Request) (flux.Query, error) {\n\treturn s.QueryF(ctx, req)\n}\n\n\/\/ Query is a mock implementation of a flux.Query.\n\/\/ It contains controls to ensure that the flux.Query object is used correctly.\ntype Query struct {\n\tMetadata flux.Metadata\n\n\tspec *flux.Spec\n\tready chan flux.Result\n\tonce sync.Once\n\terr error\n\tmu sync.Mutex\n\tdone bool\n}\n\nvar _ flux.Query = &Query{}\n\n\/\/ NewQuery constructs a new asynchronous query.\nfunc NewQuery(spec *flux.Spec) *Query {\n\treturn &Query{\n\t\tMetadata: make(flux.Metadata),\n\t\tspec: spec,\n\t\tready: make(chan flux.Result, 1),\n\t}\n}\n\nfunc (q *Query) SetResults(results flux.Result) *Query {\n\tq.ready <- results\n\treturn q\n}\n\nfunc (q *Query) SetErr(err error) *Query {\n\tq.err = err\n\tq.Cancel()\n\treturn q\n}\n\nfunc (q *Query) Spec() *flux.Spec {\n\treturn q.spec\n}\n\nfunc (q *Query) Results() <-chan flux.Result {\n\treturn q.ready\n}\n\nfunc (q *Query) Done() {\n\tq.Cancel()\n\n\tq.mu.Lock()\n\tq.done = true\n\tq.mu.Unlock()\n}\n\n\/\/ Cancel closes the ready channel.\nfunc (q *Query) Cancel() {\n\tq.once.Do(func() {\n\t\tclose(q.ready)\n\t})\n}\n\n\/\/ Err will return an error if one was set.\nfunc (q *Query) Err() error {\n\treturn q.err\n}\n\n\/\/ Statistics will return Statistics. Unlike the normal flux.Query, this\n\/\/ will panic if it is called before Done.\nfunc (q *Query) Statistics() flux.Statistics {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\tif !q.done {\n\t\tpanic(\"call to query.Statistics() before the query has been finished\")\n\t}\n\treturn flux.Statistics{\n\t\tMetadata: q.Metadata,\n\t}\n}\nfix(query): make mock Query close its results channel (#13242)package mock\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"sync\"\n\n\t\"github.com\/influxdata\/flux\"\n\t\"github.com\/influxdata\/influxdb\/kit\/check\"\n\t\"github.com\/influxdata\/influxdb\/query\"\n)\n\n\/\/ ProxyQueryService mocks the idpe QueryService for testing.\ntype ProxyQueryService struct {\n\tQueryF func(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error)\n}\n\n\/\/ Query writes the results of the query request.\nfunc (s *ProxyQueryService) Query(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) {\n\treturn s.QueryF(ctx, w, req)\n}\n\nfunc (s *ProxyQueryService) Check(ctx context.Context) check.Response {\n\treturn check.Response{Name: \"Mock Proxy Query Service\", Status: check.StatusPass}\n}\n\n\/\/ QueryService mocks the idep QueryService for testing.\ntype QueryService struct {\n\tQueryF func(ctx context.Context, req *query.Request) (flux.ResultIterator, error)\n}\n\n\/\/ Query writes the results of the query request.\nfunc (s *QueryService) Query(ctx context.Context, req *query.Request) (flux.ResultIterator, error) {\n\treturn s.QueryF(ctx, req)\n}\n\nfunc (s *QueryService) Check(ctx context.Context) check.Response {\n\treturn check.Response{Name: \"Mock Query Service\", Status: check.StatusPass}\n}\n\n\/\/ AsyncQueryService mocks the idep QueryService for testing.\ntype AsyncQueryService struct {\n\tQueryF func(ctx context.Context, req *query.Request) (flux.Query, error)\n}\n\n\/\/ Query writes the results of the query request.\nfunc (s *AsyncQueryService) Query(ctx context.Context, req *query.Request) (flux.Query, error) {\n\treturn s.QueryF(ctx, req)\n}\n\n\/\/ Query is a mock implementation of a flux.Query.\n\/\/ It contains controls to ensure that the flux.Query object is used correctly.\n\/\/ Note: Query will only return one result, specified by calling the SetResults method.\ntype Query struct {\n\tMetadata flux.Metadata\n\n\tresults chan flux.Result\n\tonce sync.Once\n\terr error\n\tmu sync.Mutex\n\tdone bool\n}\n\nvar _ flux.Query = &Query{}\n\n\/\/ NewQuery constructs a new asynchronous query.\nfunc NewQuery() *Query {\n\treturn &Query{\n\t\tMetadata: make(flux.Metadata),\n\t\tresults: make(chan flux.Result, 1),\n\t}\n}\n\nfunc (q *Query) SetResults(results flux.Result) *Query {\n\tq.results <- results\n\tq.once.Do(func() {\n\t\tclose(q.results)\n\t})\n\treturn q\n}\n\nfunc (q *Query) SetErr(err error) *Query {\n\tq.err = err\n\tq.Cancel()\n\treturn q\n}\n\nfunc (q *Query) Results() <-chan flux.Result {\n\treturn q.results\n}\n\nfunc (q *Query) Done() {\n\tq.Cancel()\n\n\tq.mu.Lock()\n\tq.done = true\n\tq.mu.Unlock()\n}\n\n\/\/ Cancel closes the results channel.\nfunc (q *Query) Cancel() {\n\tq.once.Do(func() {\n\t\tclose(q.results)\n\t})\n}\n\n\/\/ Err will return an error if one was set.\nfunc (q *Query) Err() error {\n\treturn q.err\n}\n\n\/\/ Statistics will return Statistics. Unlike the normal flux.Query, this\n\/\/ will panic if it is called before Done.\nfunc (q *Query) Statistics() flux.Statistics {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\tif !q.done {\n\t\tpanic(\"call to query.Statistics() before the query has been finished\")\n\t}\n\treturn flux.Statistics{\n\t\tMetadata: q.Metadata,\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"github.com\/btcsuite\/go-socks\/socks\"\n\t\"golang.org\/x\/crypto\/openpgp\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n)\n\ntype ClientChallengeRequest struct {\n\tUserID uint64 `json:\"user_id\"`\n\tFingerprint string `json:\"fingerprint\"`\n}\ntype ChallengeAPIResponse struct {\n\tChallenge string `json:\"challenge\"`\n\tChallengeID uint64 `json:\"challenge_id\"`\n\tUserID uint64 `json:\"user_id\"`\n\tStatusCode int `json:\"status_code\"`\n\tSuccess bool `json:\"success\"`\n\tMessage string `json:\"status_message\"`\n\tVersion int64 `json:\"version\"`\n}\n\n\/\/ GetChallenge will fetch a challenge nonce from the server\nfunc GetChallenge(UserID uint64, Fingerprint string, UseTor bool) (ChallengeAPIResponse, error) {\n\n\tvar client http.Client\n\n\tif UseTor == true {\n\t\tproxy := &socks.Proxy{TORSOCKS, \"\", \"\", true}\n\t\ttr := &http.Transport{\n\t\t\tDial: proxy.Dial,\n\t\t}\n\t\tclient = http.Client{Transport: tr}\n\t} else {\n\t\tclient = http.Client{}\n\t}\n\n\tjsonBuf, jsonErr := json.Marshal(ClientChallengeRequest{UserID: UserID, Fingerprint: Fingerprint})\n\n\tif jsonErr != nil {\n\t\treturn ChallengeAPIResponse{}, jsonErr\n\t}\n\treq, httpReqErr := http.NewRequest(\"POST\", RIPACRYPTURL+\"challenge\/\", bytes.NewBuffer(jsonBuf))\n\tif httpReqErr != nil {\n\t\treturn ChallengeAPIResponse{}, httpReqErr\n\t}\n\treq.Header.Set(\"X-CLIENT-VER\", CLIENTVERSION)\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\n\tresp, httpErr := client.Do(req)\n\tif httpErr != nil {\n\t\treturn ChallengeAPIResponse{}, httpErr\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\n\tvar apiResponse ChallengeAPIResponse\n\tjsonResponseParseErr := json.Unmarshal(body, &apiResponse)\n\tif jsonResponseParseErr != nil {\n\t\treturn ChallengeAPIResponse{}, jsonResponseParseErr\n\t} else {\n\t\treturn apiResponse, nil\n\t}\n\n\treturn ChallengeAPIResponse{}, nil\n}\n\n\/\/ DecryptChallenge will take an encrypted challenge nonce and a private key\n\/\/ then decrypt the challenge and return the plaintext\nfunc DecryptChallenge(challenge, privatekey string) (string, error) {\n\n\tkeyBuffer := bytes.NewBufferString(privatekey)\n\tentityList, err := openpgp.ReadArmoredKeyRing(keyBuffer)\n\tdec, err := base64.StdEncoding.DecodeString(challenge)\n\tmd, err := openpgp.ReadMessage(bytes.NewBuffer(dec), entityList, nil, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tbytes, err := ioutil.ReadAll(md.UnverifiedBody)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdecStr := string(bytes)\n\n\treturn decStr, nil\n}\nTesting GoDoc layoutpackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"github.com\/btcsuite\/go-socks\/socks\"\n\t\"golang.org\/x\/crypto\/openpgp\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n)\n\n\/\/ ClientChallengeRequest describes the JSON required for an API request.\ntype ClientChallengeRequest struct {\n\tUserID uint64 `json:\"user_id\"`\n\tFingerprint string `json:\"fingerprint\"`\n}\n\n\/\/ ChallengeAPIResponse describes the JSON returned from the API.\ntype ChallengeAPIResponse struct {\n\tChallenge string `json:\"challenge\"`\n\tChallengeID uint64 `json:\"challenge_id\"`\n\tUserID uint64 `json:\"user_id\"`\n\tStatusCode int `json:\"status_code\"`\n\tSuccess bool `json:\"success\"`\n\tMessage string `json:\"status_message\"`\n\tVersion int64 `json:\"version\"`\n}\n\n\/\/ GetChallenge will fetch a challenge nonce from the server.\nfunc GetChallenge(UserID uint64, Fingerprint string, UseTor bool) (ChallengeAPIResponse, error) {\n\n\tvar client http.Client\n\n\tif UseTor == true {\n\t\tproxy := &socks.Proxy{TORSOCKS, \"\", \"\", true}\n\t\ttr := &http.Transport{\n\t\t\tDial: proxy.Dial,\n\t\t}\n\t\tclient = http.Client{Transport: tr}\n\t} else {\n\t\tclient = http.Client{}\n\t}\n\n\tjsonBuf, jsonErr := json.Marshal(ClientChallengeRequest{UserID: UserID, Fingerprint: Fingerprint})\n\n\tif jsonErr != nil {\n\t\treturn ChallengeAPIResponse{}, jsonErr\n\t}\n\treq, httpReqErr := http.NewRequest(\"POST\", RIPACRYPTURL+\"challenge\/\", bytes.NewBuffer(jsonBuf))\n\tif httpReqErr != nil {\n\t\treturn ChallengeAPIResponse{}, httpReqErr\n\t}\n\treq.Header.Set(\"X-CLIENT-VER\", CLIENTVERSION)\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\n\tresp, httpErr := client.Do(req)\n\tif httpErr != nil {\n\t\treturn ChallengeAPIResponse{}, httpErr\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\n\tvar apiResponse ChallengeAPIResponse\n\tjsonResponseParseErr := json.Unmarshal(body, &apiResponse)\n\tif jsonResponseParseErr != nil {\n\t\treturn ChallengeAPIResponse{}, jsonResponseParseErr\n\t} else {\n\t\treturn apiResponse, nil\n\t}\n\n\treturn ChallengeAPIResponse{}, nil\n}\n\n\/\/ DecryptChallenge will take an encrypted challenge nonce and a private key then decrypt the challenge and return the plaintext.\nfunc DecryptChallenge(challenge, privatekey string) (string, error) {\n\n\tkeyBuffer := bytes.NewBufferString(privatekey)\n\tentityList, err := openpgp.ReadArmoredKeyRing(keyBuffer)\n\tdec, err := base64.StdEncoding.DecodeString(challenge)\n\tmd, err := openpgp.ReadMessage(bytes.NewBuffer(dec), entityList, nil, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tbytes, err := ioutil.ReadAll(md.UnverifiedBody)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdecStr := string(bytes)\n\n\treturn decStr, nil\n}\n<|endoftext|>"} {"text":"package signal\n\nimport (\n\t\"syscall\"\n\n\t\"golang.org\/x\/sys\/unix\"\n)\n\nconst (\n\tsigrtmin = 34\n\tsigrtmax = 64\n)\n\n\/\/ SignalMap is a map of Linux signals.\nvar SignalMap = map[string]syscall.Signal{\n\t\"ABRT\": unix.SIGABRT,\n\t\"ALRM\": unix.SIGALRM,\n\t\"BUS\": unix.SIGBUS,\n\t\"CHLD\": unix.SIGCHLD,\n\t\"CLD\": unix.SIGCLD,\n\t\"CONT\": unix.SIGCONT,\n\t\"FPE\": unix.SIGFPE,\n\t\"HUP\": unix.SIGHUP,\n\t\"ILL\": unix.SIGILL,\n\t\"INT\": unix.SIGINT,\n\t\"IO\": unix.SIGIO,\n\t\"IOT\": unix.SIGIOT,\n\t\"KILL\": unix.SIGKILL,\n\t\"PIPE\": unix.SIGPIPE,\n\t\"POLL\": unix.SIGPOLL,\n\t\"PROF\": unix.SIGPROF,\n\t\"PWR\": unix.SIGPWR,\n\t\"QUIT\": unix.SIGQUIT,\n\t\"SEGV\": unix.SIGSEGV,\n\t\"STKFLT\": unix.SIGSTKFLT,\n\t\"STOP\": unix.SIGSTOP,\n\t\"SYS\": unix.SIGSYS,\n\t\"TERM\": unix.SIGTERM,\n\t\"TRAP\": unix.SIGTRAP,\n\t\"TSTP\": unix.SIGTSTP,\n\t\"TTIN\": unix.SIGTTIN,\n\t\"TTOU\": unix.SIGTTOU,\n\t\"UNUSED\": unix.SIGUNUSED,\n\t\"URG\": unix.SIGURG,\n\t\"USR1\": unix.SIGUSR1,\n\t\"USR2\": unix.SIGUSR2,\n\t\"VTALRM\": unix.SIGVTALRM,\n\t\"WINCH\": unix.SIGWINCH,\n\t\"XCPU\": unix.SIGXCPU,\n\t\"XFSZ\": unix.SIGXFSZ,\n\t\"RTMIN\": sigrtmin,\n\t\"RTMIN+1\": sigrtmin + 1,\n\t\"RTMIN+2\": sigrtmin + 2,\n\t\"RTMIN+3\": sigrtmin + 3,\n\t\"RTMIN+4\": sigrtmin + 4,\n\t\"RTMIN+5\": sigrtmin + 5,\n\t\"RTMIN+6\": sigrtmin + 6,\n\t\"RTMIN+7\": sigrtmin + 7,\n\t\"RTMIN+8\": sigrtmin + 8,\n\t\"RTMIN+9\": sigrtmin + 9,\n\t\"RTMIN+10\": sigrtmin + 10,\n\t\"RTMIN+11\": sigrtmin + 11,\n\t\"RTMIN+12\": sigrtmin + 12,\n\t\"RTMIN+13\": sigrtmin + 13,\n\t\"RTMIN+14\": sigrtmin + 14,\n\t\"RTMIN+15\": sigrtmin + 15,\n\t\"RTMAX-14\": sigrtmax - 14,\n\t\"RTMAX-13\": sigrtmax - 13,\n\t\"RTMAX-12\": sigrtmax - 12,\n\t\"RTMAX-11\": sigrtmax - 11,\n\t\"RTMAX-10\": sigrtmax - 10,\n\t\"RTMAX-9\": sigrtmax - 9,\n\t\"RTMAX-8\": sigrtmax - 8,\n\t\"RTMAX-7\": sigrtmax - 7,\n\t\"RTMAX-6\": sigrtmax - 6,\n\t\"RTMAX-5\": sigrtmax - 5,\n\t\"RTMAX-4\": sigrtmax - 4,\n\t\"RTMAX-3\": sigrtmax - 3,\n\t\"RTMAX-2\": sigrtmax - 2,\n\t\"RTMAX-1\": sigrtmax - 1,\n\t\"RTMAX\": sigrtmax,\n}\nUse Mkdev, Major and Minor functions from golang.org\/x\/sys\/unixpackage signal\n\nimport (\n\t\"syscall\"\n\n\t\"golang.org\/x\/sys\/unix\"\n)\n\nconst (\n\tsigrtmin = 34\n\tsigrtmax = 64\n)\n\n\/\/ SignalMap is a map of Linux signals.\nvar SignalMap = map[string]syscall.Signal{\n\t\"ABRT\": unix.SIGABRT,\n\t\"ALRM\": unix.SIGALRM,\n\t\"BUS\": unix.SIGBUS,\n\t\"CHLD\": unix.SIGCHLD,\n\t\"CLD\": unix.SIGCLD,\n\t\"CONT\": unix.SIGCONT,\n\t\"FPE\": unix.SIGFPE,\n\t\"HUP\": unix.SIGHUP,\n\t\"ILL\": unix.SIGILL,\n\t\"INT\": unix.SIGINT,\n\t\"IO\": unix.SIGIO,\n\t\"IOT\": unix.SIGIOT,\n\t\"KILL\": unix.SIGKILL,\n\t\"PIPE\": unix.SIGPIPE,\n\t\"POLL\": unix.SIGPOLL,\n\t\"PROF\": unix.SIGPROF,\n\t\"PWR\": unix.SIGPWR,\n\t\"QUIT\": unix.SIGQUIT,\n\t\"SEGV\": unix.SIGSEGV,\n\t\"STKFLT\": unix.SIGSTKFLT,\n\t\"STOP\": unix.SIGSTOP,\n\t\"SYS\": unix.SIGSYS,\n\t\"TERM\": unix.SIGTERM,\n\t\"TRAP\": unix.SIGTRAP,\n\t\"TSTP\": unix.SIGTSTP,\n\t\"TTIN\": unix.SIGTTIN,\n\t\"TTOU\": unix.SIGTTOU,\n\t\"URG\": unix.SIGURG,\n\t\"USR1\": unix.SIGUSR1,\n\t\"USR2\": unix.SIGUSR2,\n\t\"VTALRM\": unix.SIGVTALRM,\n\t\"WINCH\": unix.SIGWINCH,\n\t\"XCPU\": unix.SIGXCPU,\n\t\"XFSZ\": unix.SIGXFSZ,\n\t\"RTMIN\": sigrtmin,\n\t\"RTMIN+1\": sigrtmin + 1,\n\t\"RTMIN+2\": sigrtmin + 2,\n\t\"RTMIN+3\": sigrtmin + 3,\n\t\"RTMIN+4\": sigrtmin + 4,\n\t\"RTMIN+5\": sigrtmin + 5,\n\t\"RTMIN+6\": sigrtmin + 6,\n\t\"RTMIN+7\": sigrtmin + 7,\n\t\"RTMIN+8\": sigrtmin + 8,\n\t\"RTMIN+9\": sigrtmin + 9,\n\t\"RTMIN+10\": sigrtmin + 10,\n\t\"RTMIN+11\": sigrtmin + 11,\n\t\"RTMIN+12\": sigrtmin + 12,\n\t\"RTMIN+13\": sigrtmin + 13,\n\t\"RTMIN+14\": sigrtmin + 14,\n\t\"RTMIN+15\": sigrtmin + 15,\n\t\"RTMAX-14\": sigrtmax - 14,\n\t\"RTMAX-13\": sigrtmax - 13,\n\t\"RTMAX-12\": sigrtmax - 12,\n\t\"RTMAX-11\": sigrtmax - 11,\n\t\"RTMAX-10\": sigrtmax - 10,\n\t\"RTMAX-9\": sigrtmax - 9,\n\t\"RTMAX-8\": sigrtmax - 8,\n\t\"RTMAX-7\": sigrtmax - 7,\n\t\"RTMAX-6\": sigrtmax - 6,\n\t\"RTMAX-5\": sigrtmax - 5,\n\t\"RTMAX-4\": sigrtmax - 4,\n\t\"RTMAX-3\": sigrtmax - 3,\n\t\"RTMAX-2\": sigrtmax - 2,\n\t\"RTMAX-1\": sigrtmax - 1,\n\t\"RTMAX\": sigrtmax,\n}\n<|endoftext|>"} {"text":"package ws\n\nimport (\n\t\"io\"\n\t\"log\"\n\n\t\"github.com\/pkg\/errors\"\n\n\txws \"golang.org\/x\/net\/websocket\"\n)\n\n\/\/ chans is a simple struct for binding a set of done, errors, and fail\n\/\/ channels from a typical websocket bind to simplify the ws.Bind func.\ntype chans struct {\n\t*xws.Conn\n\tSendRecver\n\n\terrs chan error\n\tdone, fail chan struct{}\n}\n\n\/\/ RecvWrite receives messages from the SendRecver and writes them to\n\/\/ the Conn. If an error is received from errs, it will write it to the\n\/\/ Conn using err.Error(). When done is closed, it will return. Any\n\/\/ error on Recv or Write will cause it to return silently.\nfunc (ch chans) RecvWrite() {\n\tdefer close(ch.fail)\n\t\/\/ Receive from sr; pass result to c Write.\n\tfor {\n\t\tselect {\n\t\tcase err := <-ch.errs:\n\t\t\t_, err = ch.Write([]byte(err.Error()))\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-ch.done:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\tif bs, err := ch.Recv(); err != nil {\n\t\t\treturn\n\t\t} else if _, err = ch.Write(bs); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ ReadSend uses the given SocketReader to read bytes from the Conn. In\n\/\/ case of errors.Cause(err) == io.EOF, it will return silently.\n\/\/ Otherwise, it will Send the read bytes on its SendRecver.\nfunc (ch chans) ReadSend(read SocketReader) {\n\tdefer close(ch.done)\n\t\/\/ Receive from websocket; pass result to sr Send.\n\tfor {\n\t\tselect {\n\t\tcase <-ch.fail:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\tif bs, ok, err := read(ch.Conn); errors.Cause(err) == io.EOF {\n\t\t\treturn\n\t\t} else if err != nil {\n\t\t\tlog.Printf(\"failed to read from socket: %s\", err.Error())\n\t\t\treturn\n\t\t} else if !ok {\n\t\t\t\/\/ Formatting error. Tell the\n\t\t\t\/\/ frontend, then move on.\n\t\t\tch.errs <- errors.Errorf(\"malformed message: %#q\", bs)\n\t\t} else if err = ch.Send(bs); err != nil {\n\t\t\tlog.Printf(\"failed to send to Sender: %s\", err.Error())\n\t\t\treturn\n\t\t}\n\t}\n}\nAdd nuance to websocket parse failure conditionpackage ws\n\nimport (\n\t\"io\"\n\t\"log\"\n\n\t\"github.com\/pkg\/errors\"\n\n\txws \"golang.org\/x\/net\/websocket\"\n)\n\n\/\/ chans is a simple struct for binding a set of done, errors, and fail\n\/\/ channels from a typical websocket bind to simplify the ws.Bind func.\ntype chans struct {\n\t*xws.Conn\n\tSendRecver\n\n\terrs chan error\n\tdone, fail chan struct{}\n}\n\n\/\/ RecvWrite receives messages from the SendRecver and writes them to\n\/\/ the Conn. If an error is received from errs, it will write it to the\n\/\/ Conn using err.Error(). When done is closed, it will return. Any\n\/\/ error on Recv or Write will cause it to return silently.\nfunc (ch chans) RecvWrite() {\n\tdefer close(ch.fail)\n\t\/\/ Receive from sr; pass result to c Write.\n\tfor {\n\t\tselect {\n\t\tcase err := <-ch.errs:\n\t\t\t_, err = ch.Write([]byte(err.Error()))\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-ch.done:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\tif bs, err := ch.Recv(); err != nil {\n\t\t\treturn\n\t\t} else if _, err = ch.Write(bs); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ ReadSend uses the given SocketReader to read bytes from the Conn. In\n\/\/ case of errors.Cause(err) == io.EOF, it will return silently.\n\/\/ Otherwise, it will Send the read bytes on its SendRecver.\nfunc (ch chans) ReadSend(read SocketReader) {\n\tdefer close(ch.done)\n\t\/\/ Receive from websocket; pass result to sr Send.\n\tfor {\n\t\tselect {\n\t\tcase <-ch.fail:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\tif bs, ok, err := read(ch.Conn); errors.Cause(err) == io.EOF {\n\t\t\treturn\n\t\t} else if ok && err != nil {\n\t\t\t\/\/ Error, but not a parse error.\n\t\t\tlog.Printf(\"failed to read from socket: %s\", err.Error())\n\t\t\treturn\n\t\t} else if !ok && err != nil {\n\t\t\t\/\/ Content error. Tell the frontend, then move on.\n\t\t\tch.errs <- errors.Errorf(\"malformed message: %s\", err.Error())\n\t\t} else if !ok {\n\t\t\t\/\/ Content error, but no specifics. Tell the\n\t\t\t\/\/ frontend, then move on.\n\t\t\tch.errs <- errors.Errorf(\"malformed message: %#q\", bs)\n\t\t} else if err = ch.Send(bs); err != nil {\n\t\t\t\/\/ Not bad content, and no error.\n\t\t\tlog.Printf(\"failed to send to Sender: %s\", err.Error())\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage commands\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/kubernetes-sigs\/kustomize\/pkg\/configmapandsecret\"\n\t\"github.com\/kubernetes-sigs\/kustomize\/pkg\/constants\"\n\t\"github.com\/kubernetes-sigs\/kustomize\/pkg\/fs\"\n\t\"github.com\/kubernetes-sigs\/kustomize\/pkg\/loader\"\n\t\"github.com\/kubernetes-sigs\/kustomize\/pkg\/types\"\n)\n\nfunc newCmdAddConfigMap(fSys fs.FileSystem) *cobra.Command {\n\tvar flagsAndArgs cMapFlagsAndArgs\n\tcmd := &cobra.Command{\n\t\tUse: \"configmap NAME [--from-file=[key=]source] [--from-literal=key1=value1]\",\n\t\tShort: \"Adds a configmap to the kustomization file.\",\n\t\tLong: \"\",\n\t\tExample: `\n\t# Adds a configmap to the kustomization file (with a specified key)\n\tkustomize edit add configmap my-configmap --from-file=my-key=file\/path --from-literal=my-literal=12345\n\n\t# Adds a configmap to the kustomization file (key is the filename)\n\tkustomize edit add configmap my-configmap --from-file=file\/path\n\n\t# Adds a configmap from env-file\n\tkustomize edit add configmap my-configmap --from-env-file=env\/path.env\n`,\n\t\tRunE: func(_ *cobra.Command, args []string) error {\n\t\t\terr := flagsAndArgs.Validate(args)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = flagsAndArgs.ExpandFileSource(fSys)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ Load the kustomization file.\n\t\t\tmf, err := newKustomizationFile(constants.KustomizationFileName, fSys)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tkustomization, err := mf.read()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ Add the flagsAndArgs map to the kustomization file.\n\t\t\terr = addConfigMap(\n\t\t\t\tkustomization, flagsAndArgs,\n\t\t\t\tconfigmapandsecret.NewConfigMapFactory(\n\t\t\t\t\tfSys, loader.NewFileLoader(fSys)))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ Write out the kustomization file with added configmap.\n\t\t\treturn mf.write(kustomization)\n\t\t},\n\t}\n\n\tcmd.Flags().StringSliceVar(\n\t\t&flagsAndArgs.FileSources,\n\t\t\"from-file\",\n\t\t[]string{},\n\t\t\"Key file can be specified using its file path, in which case file basename will be used as configmap \"+\n\t\t\t\"key, or optionally with a key and file path, in which case the given key will be used. Specifying a \"+\n\t\t\t\"directory will iterate each named file in the directory whose basename is a valid configmap key.\")\n\tcmd.Flags().StringArrayVar(\n\t\t&flagsAndArgs.LiteralSources,\n\t\t\"from-literal\",\n\t\t[]string{},\n\t\t\"Specify a key and literal value to insert in configmap (i.e. mykey=somevalue)\")\n\tcmd.Flags().StringVar(\n\t\t&flagsAndArgs.EnvFileSource,\n\t\t\"from-env-file\",\n\t\t\"\",\n\t\t\"Specify the path to a file to read lines of key=val pairs to create a configmap (i.e. a Docker .env file).\")\n\n\treturn cmd\n}\n\n\/\/ addConfigMap adds a configmap to a kustomization file.\n\/\/ Note: error may leave kustomization file in an undefined state.\n\/\/ Suggest passing a copy of kustomization file.\nfunc addConfigMap(\n\tk *types.Kustomization,\n\tflagsAndArgs cMapFlagsAndArgs,\n\tfactory *configmapandsecret.ConfigMapFactory) error {\n\tcmArgs := makeConfigMapArgs(k, flagsAndArgs.Name)\n\terr := mergeFlagsIntoCmArgs(&cmArgs.DataSources, flagsAndArgs)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Validate by trying to create corev1.configmap.\n\t_, _, err = factory.MakeUnstructAndGenerateName(cmArgs)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc makeConfigMapArgs(m *types.Kustomization, name string) *types.ConfigMapArgs {\n\tfor i, v := range m.ConfigMapGenerator {\n\t\tif name == v.Name {\n\t\t\treturn &m.ConfigMapGenerator[i]\n\t\t}\n\t}\n\t\/\/ config map not found, create new one and add it to the kustomization file.\n\tcm := &types.ConfigMapArgs{Name: name}\n\tm.ConfigMapGenerator = append(m.ConfigMapGenerator, *cm)\n\treturn &m.ConfigMapGenerator[len(m.ConfigMapGenerator)-1]\n}\n\nfunc mergeFlagsIntoCmArgs(src *types.DataSources, flags cMapFlagsAndArgs) error {\n\tsrc.LiteralSources = append(src.LiteralSources, flags.LiteralSources...)\n\tsrc.FileSources = append(src.FileSources, flags.FileSources...)\n\tif src.EnvSource != \"\" && src.EnvSource != flags.EnvFileSource {\n\t\treturn fmt.Errorf(\"updating existing env source '%s' not allowed\", src.EnvSource)\n\t}\n\tsrc.EnvSource = flags.EnvFileSource\n\treturn nil\n}\nChange the order of validate and expandFileSource in add configmap subcommand\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage commands\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/kubernetes-sigs\/kustomize\/pkg\/configmapandsecret\"\n\t\"github.com\/kubernetes-sigs\/kustomize\/pkg\/constants\"\n\t\"github.com\/kubernetes-sigs\/kustomize\/pkg\/fs\"\n\t\"github.com\/kubernetes-sigs\/kustomize\/pkg\/loader\"\n\t\"github.com\/kubernetes-sigs\/kustomize\/pkg\/types\"\n)\n\nfunc newCmdAddConfigMap(fSys fs.FileSystem) *cobra.Command {\n\tvar flagsAndArgs cMapFlagsAndArgs\n\tcmd := &cobra.Command{\n\t\tUse: \"configmap NAME [--from-file=[key=]source] [--from-literal=key1=value1]\",\n\t\tShort: \"Adds a configmap to the kustomization file.\",\n\t\tLong: \"\",\n\t\tExample: `\n\t# Adds a configmap to the kustomization file (with a specified key)\n\tkustomize edit add configmap my-configmap --from-file=my-key=file\/path --from-literal=my-literal=12345\n\n\t# Adds a configmap to the kustomization file (key is the filename)\n\tkustomize edit add configmap my-configmap --from-file=file\/path\n\n\t# Adds a configmap from env-file\n\tkustomize edit add configmap my-configmap --from-env-file=env\/path.env\n`,\n\t\tRunE: func(_ *cobra.Command, args []string) error {\n\t\t\terr := flagsAndArgs.ExpandFileSource(fSys)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = flagsAndArgs.Validate(args)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ Load the kustomization file.\n\t\t\tmf, err := newKustomizationFile(constants.KustomizationFileName, fSys)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tkustomization, err := mf.read()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ Add the flagsAndArgs map to the kustomization file.\n\t\t\terr = addConfigMap(\n\t\t\t\tkustomization, flagsAndArgs,\n\t\t\t\tconfigmapandsecret.NewConfigMapFactory(\n\t\t\t\t\tfSys, loader.NewFileLoader(fSys)))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ Write out the kustomization file with added configmap.\n\t\t\treturn mf.write(kustomization)\n\t\t},\n\t}\n\n\tcmd.Flags().StringSliceVar(\n\t\t&flagsAndArgs.FileSources,\n\t\t\"from-file\",\n\t\t[]string{},\n\t\t\"Key file can be specified using its file path, in which case file basename will be used as configmap \"+\n\t\t\t\"key, or optionally with a key and file path, in which case the given key will be used. Specifying a \"+\n\t\t\t\"directory will iterate each named file in the directory whose basename is a valid configmap key.\")\n\tcmd.Flags().StringArrayVar(\n\t\t&flagsAndArgs.LiteralSources,\n\t\t\"from-literal\",\n\t\t[]string{},\n\t\t\"Specify a key and literal value to insert in configmap (i.e. mykey=somevalue)\")\n\tcmd.Flags().StringVar(\n\t\t&flagsAndArgs.EnvFileSource,\n\t\t\"from-env-file\",\n\t\t\"\",\n\t\t\"Specify the path to a file to read lines of key=val pairs to create a configmap (i.e. a Docker .env file).\")\n\n\treturn cmd\n}\n\n\/\/ addConfigMap adds a configmap to a kustomization file.\n\/\/ Note: error may leave kustomization file in an undefined state.\n\/\/ Suggest passing a copy of kustomization file.\nfunc addConfigMap(\n\tk *types.Kustomization,\n\tflagsAndArgs cMapFlagsAndArgs,\n\tfactory *configmapandsecret.ConfigMapFactory) error {\n\tcmArgs := makeConfigMapArgs(k, flagsAndArgs.Name)\n\terr := mergeFlagsIntoCmArgs(&cmArgs.DataSources, flagsAndArgs)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Validate by trying to create corev1.configmap.\n\t_, _, err = factory.MakeUnstructAndGenerateName(cmArgs)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc makeConfigMapArgs(m *types.Kustomization, name string) *types.ConfigMapArgs {\n\tfor i, v := range m.ConfigMapGenerator {\n\t\tif name == v.Name {\n\t\t\treturn &m.ConfigMapGenerator[i]\n\t\t}\n\t}\n\t\/\/ config map not found, create new one and add it to the kustomization file.\n\tcm := &types.ConfigMapArgs{Name: name}\n\tm.ConfigMapGenerator = append(m.ConfigMapGenerator, *cm)\n\treturn &m.ConfigMapGenerator[len(m.ConfigMapGenerator)-1]\n}\n\nfunc mergeFlagsIntoCmArgs(src *types.DataSources, flags cMapFlagsAndArgs) error {\n\tsrc.LiteralSources = append(src.LiteralSources, flags.LiteralSources...)\n\tsrc.FileSources = append(src.FileSources, flags.FileSources...)\n\tif src.EnvSource != \"\" && src.EnvSource != flags.EnvFileSource {\n\t\treturn fmt.Errorf(\"updating existing env source '%s' not allowed\", src.EnvSource)\n\t}\n\tsrc.EnvSource = flags.EnvFileSource\n\treturn nil\n}\n<|endoftext|>"} {"text":"package invdendpoint\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"strings\"\n)\n\nconst EventEndpoint = \"\/events\"\n\ntype Events []Event\n\ntype Event struct {\n\tId int64 `json:\"id,omitempty\"` \/\/ The event’s unique ID\n\tObject string `json:\"object,omitempty\"`\n\tType string `json:\"type,omitempty\"` \/\/ Event type\n\tTimestamp int64 `json:\"timestamp,omitempty\"`\n\tData json.RawMessage `json:\"data,omitempty\"` \/\/ Contains an object property with the object that was subject of the event and an optional previous property for object.updated events that is a hash of the old values that changed during the event\n}\n\ntype EventObject struct {\n\tObject *json.RawMessage `json:\"object,omitempty\"`\n\tPreviousObject *json.RawMessage `json:\"previous,omitempty\"`\n}\n\nfunc (e *Event) ParseEventObject() (*json.RawMessage, error) {\n\tdata := e.Data\n\n\teo := new(EventObject)\n\n\tb, err := data.MarshalJSON()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(b, eo)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif eo.Object == nil {\n\t\treturn nil, errors.New(\"Could not parse event object\")\n\t}\n\n\treturn eo.Object, nil\n}\n\nfunc (e *Event) ParseEventPreviousObject() (*json.RawMessage, error) {\n\tdata := e.Data\n\n\teo := new(EventObject)\n\n\tb, err := data.MarshalJSON()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(b, eo)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif eo.Object == nil {\n\t\treturn nil, errors.New(\"Could not parse event object\")\n\t}\n\n\treturn eo.PreviousObject, nil\n}\n\nfunc (e *Event) ParseInvoiceEvent() (*Invoice, error) {\n\teoData, err := e.ParseEventObject()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb, err := eoData.MarshalJSON()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbClean := CleanMetaDataArray(b)\n\n\tie := new(Invoice)\n\n\terr = json.Unmarshal(bClean, ie)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ie, nil\n}\n\nfunc (e *Event) ParseInvoicePreviousEvent() (*Invoice, error) {\n\teoData, err := e.ParseEventPreviousObject()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif eoData == nil {\n\t\treturn nil, err\n\t}\n\n\tb, err := eoData.MarshalJSON()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbClean := CleanMetaDataArray(b)\n\n\tie := new(Invoice)\n\n\terr = json.Unmarshal(bClean, ie)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ie, nil\n}\n\nfunc CleanMetaDataArray(b []byte) []byte {\n\ts := string(b)\n\ts1 := strings.Replace(s, `\"metadata\": []`, ` \"metadata\": null`, -1)\n\ts1 = strings.Replace(s1, `\"metadata\":[]`, ` \"metadata\": null`, -1)\n\treturn []byte(s1)\n}\nadd parsepayment in event objectpackage invdendpoint\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"strings\"\n)\n\nconst EventEndpoint = \"\/events\"\n\ntype Events []Event\n\ntype Event struct {\n\tId int64 `json:\"id,omitempty\"` \/\/ The event’s unique ID\n\tObject string `json:\"object,omitempty\"`\n\tType string `json:\"type,omitempty\"` \/\/ Event type\n\tTimestamp int64 `json:\"timestamp,omitempty\"`\n\tData json.RawMessage `json:\"data,omitempty\"` \/\/ Contains an object property with the object that was subject of the event and an optional previous property for object.updated events that is a hash of the old values that changed during the event\n}\n\ntype EventObject struct {\n\tObject *json.RawMessage `json:\"object,omitempty\"`\n\tPreviousObject *json.RawMessage `json:\"previous,omitempty\"`\n}\n\nfunc (e *Event) ParseEventObject() (*json.RawMessage, error) {\n\tdata := e.Data\n\n\teo := new(EventObject)\n\n\tb, err := data.MarshalJSON()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(b, eo)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif eo.Object == nil {\n\t\treturn nil, errors.New(\"Could not parse event object\")\n\t}\n\n\treturn eo.Object, nil\n}\n\nfunc (e *Event) ParseEventPreviousObject() (*json.RawMessage, error) {\n\tdata := e.Data\n\n\teo := new(EventObject)\n\n\tb, err := data.MarshalJSON()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(b, eo)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif eo.Object == nil {\n\t\treturn nil, errors.New(\"Could not parse event object\")\n\t}\n\n\treturn eo.PreviousObject, nil\n}\n\nfunc (e *Event) ParseInvoiceEvent() (*Invoice, error) {\n\teoData, err := e.ParseEventObject()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb, err := eoData.MarshalJSON()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbClean := CleanMetaDataArray(b)\n\n\tie := new(Invoice)\n\n\terr = json.Unmarshal(bClean, ie)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ie, nil\n}\n\nfunc (e *Event) ParseInvoicePreviousEvent() (*Invoice, error) {\n\teoData, err := e.ParseEventPreviousObject()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif eoData == nil {\n\t\treturn nil, err\n\t}\n\n\tb, err := eoData.MarshalJSON()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbClean := CleanMetaDataArray(b)\n\n\tie := new(Invoice)\n\n\terr = json.Unmarshal(bClean, ie)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ie, nil\n}\n\nfunc (e *Event) ParsePaymentEvent() (*Payment, error) {\n\teoData, err := e.ParseEventObject()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb, err := eoData.MarshalJSON()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbClean := CleanMetaDataArray(b)\n\n\tie := new(Payment)\n\n\terr = json.Unmarshal(bClean, ie)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ie, nil\n}\n\nfunc CleanMetaDataArray(b []byte) []byte {\n\ts := string(b)\n\ts1 := strings.Replace(s, `\"metadata\": []`, ` \"metadata\": null`, -1)\n\ts1 = strings.Replace(s1, `\"metadata\":[]`, ` \"metadata\": null`, -1)\n\treturn []byte(s1)\n}\n<|endoftext|>"} {"text":"package fakedata\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype generator struct {\n\tf func(Column) string\n\tdesc string\n}\n\nvar generators map[string]generator\n\nfunc generate(column Column) string {\n\tif gen, ok := generators[column.Key]; ok {\n\t\treturn gen.f(column)\n\t}\n\n\treturn \"\"\n}\n\n\/\/ Generators returns all the available generators\nfunc Generators() []string {\n\tgens := make([]string, 0)\n\n\tfor k := range generators {\n\t\tgens = append(gens, k)\n\t}\n\n\tsort.Strings(gens)\n\treturn gens\n}\n\nfunc date() func(Column) string {\n\treturn func(column Column) string {\n\t\treturn strconv.FormatInt(time.Now().UnixNano(), 10)\n\t}\n}\n\nfunc withDictKey(key string) func(Column) string {\n\treturn func(column Column) string {\n\t\treturn dict[key][rand.Intn(len(dict[key]))]\n\t}\n}\n\nfunc withSep(left, right Column, sep string) func(column Column) string {\n\treturn func(column Column) string {\n\t\treturn fmt.Sprintf(\"%s%s%s\", generate(left), sep, generate(right))\n\t}\n}\n\nfunc id() func(Column) string {\n\treturn func(column Column) string {\n\t\tchars := []rune(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\")\n\t\tret := make([]rune, 10)\n\n\t\tfor i := range ret {\n\t\t\tret[i] = chars[rand.Intn(len(chars))]\n\t\t}\n\n\t\treturn string(ret)\n\t}\n}\n\nfunc ipv4() func(Column) string {\n\treturn func(column Column) string {\n\t\treturn fmt.Sprintf(\"%d.%d.%d.%d\", 1+rand.Intn(253), rand.Intn(255), rand.Intn(255), 1+rand.Intn(253))\n\t}\n\n}\n\nfunc ipv6() func(Column) string {\n\treturn func(column Column) string {\n\t\treturn fmt.Sprintf(\"2001:cafe:%x:%x:%x:%x:%x:%x\", rand.Intn(255), rand.Intn(255), rand.Intn(255), rand.Intn(255), rand.Intn(255), rand.Intn(255))\n\t}\n\n}\n\nfunc mac() func(Column) string {\n\treturn func(column Column) string {\n\t\treturn fmt.Sprintf(\"%x:%x:%x:%x:%x:%x\", rand.Intn(255), rand.Intn(255), rand.Intn(255), rand.Intn(255), rand.Intn(255), rand.Intn(255))\n\t}\n}\n\nfunc latitute() func(Column) string {\n\treturn func(column Column) string {\n\t\tlattitude := (rand.Float64() * 180) - 90\n\t\treturn strconv.FormatFloat(lattitude, 'f', 6, 64)\n\t}\n}\n\nfunc longitude() func(Column) string {\n\treturn func(column Column) string {\n\t\tlongitude := (rand.Float64() * 360) - 180\n\t\treturn strconv.FormatFloat(longitude, 'f', 6, 64)\n\t}\n}\n\nfunc double() func(Column) string {\n\treturn func(column Column) string {\n\t\treturn strconv.FormatFloat(rand.NormFloat64()*1000, 'f', 4, 64)\n\t}\n}\n\nfunc integer() func(Column) string {\n\treturn func(column Column) string {\n\t\tmin := 0\n\t\tmax := 1000\n\n\t\tif len(column.Range) > 0 {\n\t\t\trng := strings.Split(column.Range, \"..\")\n\n\t\t\tm, err := strconv.Atoi(rng[0])\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err.Error())\n\t\t\t}\n\t\t\tmin = m\n\n\t\t\tif len(rng) > 1 && len(rng[1]) > 0 {\n\t\t\t\tm, err := strconv.Atoi(rng[1])\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err.Error())\n\t\t\t\t}\n\t\t\t\tmax = m\n\t\t\t}\n\t\t}\n\n\t\tif min > max {\n\t\t\tlog.Fatalf(\"%d is smaller than %d in Column(%s=%s)\", max, min, column.Name, column.Key)\n\t\t}\n\t\treturn strconv.Itoa(min + rand.Intn(max-min))\n\t}\n}\n\nfunc init() {\n\tgenerators = make(map[string]generator)\n\n\tgenerators[\"date\"] = generator{desc: \"date\", f: date()}\n\n\tfor key := range dict {\n\t\tgenerators[key] = generator{desc: key, f: withDictKey(key)}\n\t}\n\n\tgenerators[\"name\"] = generator{desc: \"name\", f: withSep(Column{Key: \"name.first\"}, Column{Key: \"name.last\"}, \" \")}\n\tgenerators[\"email\"] = generator{desc: \"email\", f: withSep(Column{Key: \"username\"}, Column{Key: \"domain\"}, \"@\")}\n\tgenerators[\"domain\"] = generator{desc: \"domain\", f: withSep(Column{Key: \"domain.name\"}, Column{Key: \"domain.tld\"}, \".\")}\n\n\tgenerators[\"id\"] = generator{desc: \"id\", f: id()}\n\n\tgenerators[\"ipv4\"] = generator{desc: \"ipv4\", f: ipv4()}\n\tgenerators[\"ipv6\"] = generator{desc: \"ipv4\", f: ipv6()}\n\n\tgenerators[\"mac.address\"] = generator{desc: \"mac address\", f: mac()}\n\n\tgenerators[\"latitute\"] = generator{desc: \"lat\", f: latitute()}\n\tgenerators[\"longitude\"] = generator{desc: \"longitude\", f: longitude()}\n\n\tgenerators[\"double\"] = generator{desc: \"double\", f: double()}\n\n\tgenerators[\"int\"] = generator{desc: \"integer generator\", f: integer()}\n}\nThis generator doesn't seem helpful and needs rewritingpackage fakedata\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype generator struct {\n\tf func(Column) string\n\tdesc string\n}\n\nvar generators map[string]generator\n\nfunc generate(column Column) string {\n\tif gen, ok := generators[column.Key]; ok {\n\t\treturn gen.f(column)\n\t}\n\n\treturn \"\"\n}\n\n\/\/ Generators returns all the available generators\nfunc Generators() []string {\n\tgens := make([]string, 0)\n\n\tfor k := range generators {\n\t\tgens = append(gens, k)\n\t}\n\n\tsort.Strings(gens)\n\treturn gens\n}\n\nfunc date() func(Column) string {\n\treturn func(column Column) string {\n\t\treturn strconv.FormatInt(time.Now().UnixNano(), 10)\n\t}\n}\n\nfunc withDictKey(key string) func(Column) string {\n\treturn func(column Column) string {\n\t\treturn dict[key][rand.Intn(len(dict[key]))]\n\t}\n}\n\nfunc withSep(left, right Column, sep string) func(column Column) string {\n\treturn func(column Column) string {\n\t\treturn fmt.Sprintf(\"%s%s%s\", generate(left), sep, generate(right))\n\t}\n}\n\nfunc ipv4() func(Column) string {\n\treturn func(column Column) string {\n\t\treturn fmt.Sprintf(\"%d.%d.%d.%d\", 1+rand.Intn(253), rand.Intn(255), rand.Intn(255), 1+rand.Intn(253))\n\t}\n\n}\n\nfunc ipv6() func(Column) string {\n\treturn func(column Column) string {\n\t\treturn fmt.Sprintf(\"2001:cafe:%x:%x:%x:%x:%x:%x\", rand.Intn(255), rand.Intn(255), rand.Intn(255), rand.Intn(255), rand.Intn(255), rand.Intn(255))\n\t}\n\n}\n\nfunc mac() func(Column) string {\n\treturn func(column Column) string {\n\t\treturn fmt.Sprintf(\"%x:%x:%x:%x:%x:%x\", rand.Intn(255), rand.Intn(255), rand.Intn(255), rand.Intn(255), rand.Intn(255), rand.Intn(255))\n\t}\n}\n\nfunc latitute() func(Column) string {\n\treturn func(column Column) string {\n\t\tlattitude := (rand.Float64() * 180) - 90\n\t\treturn strconv.FormatFloat(lattitude, 'f', 6, 64)\n\t}\n}\n\nfunc longitude() func(Column) string {\n\treturn func(column Column) string {\n\t\tlongitude := (rand.Float64() * 360) - 180\n\t\treturn strconv.FormatFloat(longitude, 'f', 6, 64)\n\t}\n}\n\nfunc double() func(Column) string {\n\treturn func(column Column) string {\n\t\treturn strconv.FormatFloat(rand.NormFloat64()*1000, 'f', 4, 64)\n\t}\n}\n\nfunc integer() func(Column) string {\n\treturn func(column Column) string {\n\t\tmin := 0\n\t\tmax := 1000\n\n\t\tif len(column.Range) > 0 {\n\t\t\trng := strings.Split(column.Range, \"..\")\n\n\t\t\tm, err := strconv.Atoi(rng[0])\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err.Error())\n\t\t\t}\n\t\t\tmin = m\n\n\t\t\tif len(rng) > 1 && len(rng[1]) > 0 {\n\t\t\t\tm, err := strconv.Atoi(rng[1])\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err.Error())\n\t\t\t\t}\n\t\t\t\tmax = m\n\t\t\t}\n\t\t}\n\n\t\tif min > max {\n\t\t\tlog.Fatalf(\"%d is smaller than %d in Column(%s=%s)\", max, min, column.Name, column.Key)\n\t\t}\n\t\treturn strconv.Itoa(min + rand.Intn(max-min))\n\t}\n}\n\nfunc init() {\n\tgenerators = make(map[string]generator)\n\n\tgenerators[\"date\"] = generator{desc: \"date\", f: date()}\n\n\tfor key := range dict {\n\t\tgenerators[key] = generator{desc: key, f: withDictKey(key)}\n\t}\n\n\tgenerators[\"name\"] = generator{desc: \"name\", f: withSep(Column{Key: \"name.first\"}, Column{Key: \"name.last\"}, \" \")}\n\tgenerators[\"email\"] = generator{desc: \"email\", f: withSep(Column{Key: \"username\"}, Column{Key: \"domain\"}, \"@\")}\n\tgenerators[\"domain\"] = generator{desc: \"domain\", f: withSep(Column{Key: \"domain.name\"}, Column{Key: \"domain.tld\"}, \".\")}\n\n\tgenerators[\"ipv4\"] = generator{desc: \"ipv4\", f: ipv4()}\n\tgenerators[\"ipv6\"] = generator{desc: \"ipv4\", f: ipv6()}\n\n\tgenerators[\"mac.address\"] = generator{desc: \"mac address\", f: mac()}\n\n\tgenerators[\"latitute\"] = generator{desc: \"lat\", f: latitute()}\n\tgenerators[\"longitude\"] = generator{desc: \"longitude\", f: longitude()}\n\n\tgenerators[\"double\"] = generator{desc: \"double\", f: double()}\n\n\tgenerators[\"int\"] = generator{desc: \"integer generator\", f: integer()}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2020 The gVisor Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage kernel\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\n\t\"gvisor.dev\/gvisor\/pkg\/abi\/linux\"\n\t\"gvisor.dev\/gvisor\/pkg\/context\"\n\t\"gvisor.dev\/gvisor\/pkg\/coverage\"\n\t\"gvisor.dev\/gvisor\/pkg\/safemem\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/memmap\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/mm\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/pgalloc\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/usage\"\n\t\"gvisor.dev\/gvisor\/pkg\/syserror\"\n\t\"gvisor.dev\/gvisor\/pkg\/usermem\"\n)\n\n\/\/ kcovAreaSizeMax is the maximum number of uint64 entries allowed in the kcov\n\/\/ area. On Linux, the maximum is INT_MAX \/ 8.\nconst kcovAreaSizeMax = 10 * 1024 * 1024\n\n\/\/ Kcov provides kernel coverage data to userspace through a memory-mapped\n\/\/ region, as kcov does in Linux.\n\/\/\n\/\/ To give the illusion that the data is always up to date, we update the shared\n\/\/ memory every time before we return to userspace.\ntype Kcov struct {\n\t\/\/ mfp provides application memory. It is immutable after creation.\n\tmfp pgalloc.MemoryFileProvider\n\n\t\/\/ mu protects all of the fields below.\n\tmu sync.RWMutex\n\n\t\/\/ mode is the current kcov mode.\n\tmode uint8\n\n\t\/\/ size is the size of the mapping through which the kernel conveys coverage\n\t\/\/ information to userspace.\n\tsize uint64\n\n\t\/\/ owningTask is the task that currently owns coverage data on the system. The\n\t\/\/ interface for kcov essentially requires that coverage is only going to a\n\t\/\/ single task. Note that kcov should only generate coverage data for the\n\t\/\/ owning task, but we currently generate global coverage.\n\towningTask *Task\n\n\t\/\/ count is a locally cached version of the first uint64 in the kcov data,\n\t\/\/ which is the number of subsequent entries representing PCs.\n\t\/\/\n\t\/\/ It is used with kcovInode.countBlock(), to copy in\/out the first element of\n\t\/\/ the actual data in an efficient manner, avoid boilerplate, and prevent\n\t\/\/ accidental garbage escapes by the temporary counts.\n\tcount uint64\n\n\tmappable *mm.SpecialMappable\n}\n\n\/\/ NewKcov creates and returns a Kcov instance.\nfunc (k *Kernel) NewKcov() *Kcov {\n\treturn &Kcov{\n\t\tmfp: k,\n\t}\n}\n\nvar coveragePool = sync.Pool{\n\tNew: func() interface{} {\n\t\treturn make([]byte, 0)\n\t},\n}\n\n\/\/ TaskWork implements TaskWorker.TaskWork.\nfunc (kcov *Kcov) TaskWork(t *Task) {\n\tkcov.mu.Lock()\n\tdefer kcov.mu.Unlock()\n\n\tif kcov.mode != linux.KCOV_MODE_TRACE_PC {\n\t\treturn\n\t}\n\n\trw := &kcovReadWriter{\n\t\tmf: kcov.mfp.MemoryFile(),\n\t\tfr: kcov.mappable.FileRange(),\n\t}\n\n\t\/\/ Read in the PC count.\n\tif _, err := safemem.ReadFullToBlocks(rw, kcov.countBlock()); err != nil {\n\t\tpanic(fmt.Sprintf(\"Internal error reading count from kcov area: %v\", err))\n\t}\n\n\trw.off = 8 * (1 + kcov.count)\n\tn := coverage.ConsumeCoverageData(&kcovIOWriter{rw})\n\n\t\/\/ Update the pc count, based on the number of entries written. Note that if\n\t\/\/ we reached the end of the kcov area, we may not have written everything in\n\t\/\/ output.\n\tkcov.count += uint64(n \/ 8)\n\trw.off = 0\n\tif _, err := safemem.WriteFullFromBlocks(rw, kcov.countBlock()); err != nil {\n\t\tpanic(fmt.Sprintf(\"Internal error writing count to kcov area: %v\", err))\n\t}\n\n\t\/\/ Re-register for future work.\n\tt.RegisterWork(kcov)\n}\n\n\/\/ InitTrace performs the KCOV_INIT_TRACE ioctl.\nfunc (kcov *Kcov) InitTrace(size uint64) error {\n\tkcov.mu.Lock()\n\tdefer kcov.mu.Unlock()\n\n\tif kcov.mode != linux.KCOV_MODE_DISABLED {\n\t\treturn syserror.EBUSY\n\t}\n\n\t\/\/ To simplify all the logic around mapping, we require that the length of the\n\t\/\/ shared region is a multiple of the system page size.\n\tif (8*size)&(usermem.PageSize-1) != 0 {\n\t\treturn syserror.EINVAL\n\t}\n\n\t\/\/ We need space for at least two uint64s to hold current position and a\n\t\/\/ single PC.\n\tif size < 2 || size > kcovAreaSizeMax {\n\t\treturn syserror.EINVAL\n\t}\n\n\tkcov.size = size\n\tkcov.mode = linux.KCOV_MODE_INIT\n\treturn nil\n}\n\n\/\/ EnableTrace performs the KCOV_ENABLE_TRACE ioctl.\nfunc (kcov *Kcov) EnableTrace(ctx context.Context, traceKind uint8) error {\n\tt := TaskFromContext(ctx)\n\tif t == nil {\n\t\tpanic(\"kcovInode.EnableTrace() cannot be used outside of a task goroutine\")\n\t}\n\n\tkcov.mu.Lock()\n\tdefer kcov.mu.Unlock()\n\n\t\/\/ KCOV_ENABLE must be preceded by KCOV_INIT_TRACE and an mmap call.\n\tif kcov.mode != linux.KCOV_MODE_INIT || kcov.mappable == nil {\n\t\treturn syserror.EINVAL\n\t}\n\n\tswitch traceKind {\n\tcase linux.KCOV_TRACE_PC:\n\t\tkcov.mode = linux.KCOV_MODE_TRACE_PC\n\tcase linux.KCOV_TRACE_CMP:\n\t\t\/\/ We do not support KCOV_MODE_TRACE_CMP.\n\t\treturn syserror.ENOTSUP\n\tdefault:\n\t\treturn syserror.EINVAL\n\t}\n\n\tif kcov.owningTask != nil && kcov.owningTask != t {\n\t\treturn syserror.EBUSY\n\t}\n\n\tkcov.owningTask = t\n\tt.SetKcov(kcov)\n\tt.RegisterWork(kcov)\n\n\t\/\/ Clear existing coverage data; the task expects to read only coverage data\n\t\/\/ from the time it is activated.\n\tcoverage.ClearCoverageData()\n\treturn nil\n}\n\n\/\/ DisableTrace performs the KCOV_DISABLE_TRACE ioctl.\nfunc (kcov *Kcov) DisableTrace(ctx context.Context) error {\n\tkcov.mu.Lock()\n\tdefer kcov.mu.Unlock()\n\n\tt := TaskFromContext(ctx)\n\tif t == nil {\n\t\tpanic(\"kcovInode.EnableTrace() cannot be used outside of a task goroutine\")\n\t}\n\n\tif t != kcov.owningTask {\n\t\treturn syserror.EINVAL\n\t}\n\tkcov.mode = linux.KCOV_MODE_INIT\n\tkcov.owningTask = nil\n\tkcov.mappable = nil\n\treturn nil\n}\n\n\/\/ Clear resets the mode and clears the owning task and memory mapping for kcov.\n\/\/ It is called when the fd corresponding to kcov is closed. Note that the mode\n\/\/ needs to be set so that the next call to kcov.TaskWork() will exit early.\nfunc (kcov *Kcov) Clear() {\n\tkcov.mu.Lock()\n\tkcov.clearLocked()\n\tkcov.mu.Unlock()\n}\n\nfunc (kcov *Kcov) clearLocked() {\n\tkcov.mode = linux.KCOV_MODE_INIT\n\tkcov.owningTask = nil\n\tif kcov.mappable != nil {\n\t\tkcov.mappable = nil\n\t}\n}\n\n\/\/ OnTaskExit is called when the owning task exits. It is similar to\n\/\/ kcov.Clear(), except the memory mapping is not cleared, so that the same\n\/\/ mapping can be used in the future if kcov is enabled again by another task.\nfunc (kcov *Kcov) OnTaskExit() {\n\tkcov.mu.Lock()\n\tkcov.mode = linux.KCOV_MODE_INIT\n\tkcov.owningTask = nil\n\tkcov.mu.Unlock()\n}\n\n\/\/ ConfigureMMap is called by the vfs.FileDescription for this kcov instance to\n\/\/ implement vfs.FileDescription.ConfigureMMap.\nfunc (kcov *Kcov) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {\n\tkcov.mu.Lock()\n\tdefer kcov.mu.Unlock()\n\n\tif kcov.mode != linux.KCOV_MODE_INIT {\n\t\treturn syserror.EINVAL\n\t}\n\n\tif kcov.mappable == nil {\n\t\t\/\/ Set up the kcov area.\n\t\tfr, err := kcov.mfp.MemoryFile().Allocate(kcov.size*8, usage.Anonymous)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Get the thread id for the mmap name.\n\t\tt := TaskFromContext(ctx)\n\t\tif t == nil {\n\t\t\tpanic(\"ThreadFromContext returned nil\")\n\t\t}\n\t\t\/\/ For convenience, a special mappable is used here. Note that these mappings\n\t\t\/\/ will look different under \/proc\/[pid]\/maps than they do on Linux.\n\t\tkcov.mappable = mm.NewSpecialMappable(fmt.Sprintf(\"[kcov:%d]\", t.ThreadID()), kcov.mfp, fr)\n\t}\n\topts.Mappable = kcov.mappable\n\topts.MappingIdentity = kcov.mappable\n\treturn nil\n}\n\n\/\/ kcovReadWriter implements safemem.Reader and safemem.Writer.\ntype kcovReadWriter struct {\n\toff uint64\n\tmf *pgalloc.MemoryFile\n\tfr memmap.FileRange\n}\n\n\/\/ ReadToBlocks implements safemem.Reader.ReadToBlocks.\nfunc (rw *kcovReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {\n\tif dsts.IsEmpty() {\n\t\treturn 0, nil\n\t}\n\n\t\/\/ Limit the read to the kcov range and check for overflow.\n\tif rw.fr.Length() <= rw.off {\n\t\treturn 0, io.EOF\n\t}\n\tstart := rw.fr.Start + rw.off\n\tend := rw.fr.Start + rw.fr.Length()\n\tif rend := start + dsts.NumBytes(); rend < end {\n\t\tend = rend\n\t}\n\n\t\/\/ Get internal mappings.\n\tbs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, usermem.Read)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ Copy from internal mappings.\n\tn, err := safemem.CopySeq(dsts, bs)\n\trw.off += n\n\treturn n, err\n}\n\n\/\/ WriteFromBlocks implements safemem.Writer.WriteFromBlocks.\nfunc (rw *kcovReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) {\n\tif srcs.IsEmpty() {\n\t\treturn 0, nil\n\t}\n\n\t\/\/ Limit the write to the kcov area and check for overflow.\n\tif rw.fr.Length() <= rw.off {\n\t\treturn 0, io.EOF\n\t}\n\tstart := rw.fr.Start + rw.off\n\tend := rw.fr.Start + rw.fr.Length()\n\tif wend := start + srcs.NumBytes(); wend < end {\n\t\tend = wend\n\t}\n\n\t\/\/ Get internal mapping.\n\tbs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, usermem.Write)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ Copy to internal mapping.\n\tn, err := safemem.CopySeq(bs, srcs)\n\trw.off += n\n\treturn n, err\n}\n\n\/\/ kcovIOWriter implements io.Writer as a basic wrapper over kcovReadWriter.\ntype kcovIOWriter struct {\n\trw *kcovReadWriter\n}\n\n\/\/ Write implements io.Writer.Write.\nfunc (w *kcovIOWriter) Write(p []byte) (int, error) {\n\tbs := safemem.BlockSeqOf(safemem.BlockFromSafeSlice(p))\n\tn, err := safemem.WriteFullFromBlocks(w.rw, bs)\n\treturn int(n), err\n}\nSimplify nil assignment in kcov.\/\/ Copyright 2020 The gVisor Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage kernel\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\n\t\"gvisor.dev\/gvisor\/pkg\/abi\/linux\"\n\t\"gvisor.dev\/gvisor\/pkg\/context\"\n\t\"gvisor.dev\/gvisor\/pkg\/coverage\"\n\t\"gvisor.dev\/gvisor\/pkg\/safemem\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/memmap\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/mm\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/pgalloc\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/usage\"\n\t\"gvisor.dev\/gvisor\/pkg\/syserror\"\n\t\"gvisor.dev\/gvisor\/pkg\/usermem\"\n)\n\n\/\/ kcovAreaSizeMax is the maximum number of uint64 entries allowed in the kcov\n\/\/ area. On Linux, the maximum is INT_MAX \/ 8.\nconst kcovAreaSizeMax = 10 * 1024 * 1024\n\n\/\/ Kcov provides kernel coverage data to userspace through a memory-mapped\n\/\/ region, as kcov does in Linux.\n\/\/\n\/\/ To give the illusion that the data is always up to date, we update the shared\n\/\/ memory every time before we return to userspace.\ntype Kcov struct {\n\t\/\/ mfp provides application memory. It is immutable after creation.\n\tmfp pgalloc.MemoryFileProvider\n\n\t\/\/ mu protects all of the fields below.\n\tmu sync.RWMutex\n\n\t\/\/ mode is the current kcov mode.\n\tmode uint8\n\n\t\/\/ size is the size of the mapping through which the kernel conveys coverage\n\t\/\/ information to userspace.\n\tsize uint64\n\n\t\/\/ owningTask is the task that currently owns coverage data on the system. The\n\t\/\/ interface for kcov essentially requires that coverage is only going to a\n\t\/\/ single task. Note that kcov should only generate coverage data for the\n\t\/\/ owning task, but we currently generate global coverage.\n\towningTask *Task\n\n\t\/\/ count is a locally cached version of the first uint64 in the kcov data,\n\t\/\/ which is the number of subsequent entries representing PCs.\n\t\/\/\n\t\/\/ It is used with kcovInode.countBlock(), to copy in\/out the first element of\n\t\/\/ the actual data in an efficient manner, avoid boilerplate, and prevent\n\t\/\/ accidental garbage escapes by the temporary counts.\n\tcount uint64\n\n\tmappable *mm.SpecialMappable\n}\n\n\/\/ NewKcov creates and returns a Kcov instance.\nfunc (k *Kernel) NewKcov() *Kcov {\n\treturn &Kcov{\n\t\tmfp: k,\n\t}\n}\n\nvar coveragePool = sync.Pool{\n\tNew: func() interface{} {\n\t\treturn make([]byte, 0)\n\t},\n}\n\n\/\/ TaskWork implements TaskWorker.TaskWork.\nfunc (kcov *Kcov) TaskWork(t *Task) {\n\tkcov.mu.Lock()\n\tdefer kcov.mu.Unlock()\n\n\tif kcov.mode != linux.KCOV_MODE_TRACE_PC {\n\t\treturn\n\t}\n\n\trw := &kcovReadWriter{\n\t\tmf: kcov.mfp.MemoryFile(),\n\t\tfr: kcov.mappable.FileRange(),\n\t}\n\n\t\/\/ Read in the PC count.\n\tif _, err := safemem.ReadFullToBlocks(rw, kcov.countBlock()); err != nil {\n\t\tpanic(fmt.Sprintf(\"Internal error reading count from kcov area: %v\", err))\n\t}\n\n\trw.off = 8 * (1 + kcov.count)\n\tn := coverage.ConsumeCoverageData(&kcovIOWriter{rw})\n\n\t\/\/ Update the pc count, based on the number of entries written. Note that if\n\t\/\/ we reached the end of the kcov area, we may not have written everything in\n\t\/\/ output.\n\tkcov.count += uint64(n \/ 8)\n\trw.off = 0\n\tif _, err := safemem.WriteFullFromBlocks(rw, kcov.countBlock()); err != nil {\n\t\tpanic(fmt.Sprintf(\"Internal error writing count to kcov area: %v\", err))\n\t}\n\n\t\/\/ Re-register for future work.\n\tt.RegisterWork(kcov)\n}\n\n\/\/ InitTrace performs the KCOV_INIT_TRACE ioctl.\nfunc (kcov *Kcov) InitTrace(size uint64) error {\n\tkcov.mu.Lock()\n\tdefer kcov.mu.Unlock()\n\n\tif kcov.mode != linux.KCOV_MODE_DISABLED {\n\t\treturn syserror.EBUSY\n\t}\n\n\t\/\/ To simplify all the logic around mapping, we require that the length of the\n\t\/\/ shared region is a multiple of the system page size.\n\tif (8*size)&(usermem.PageSize-1) != 0 {\n\t\treturn syserror.EINVAL\n\t}\n\n\t\/\/ We need space for at least two uint64s to hold current position and a\n\t\/\/ single PC.\n\tif size < 2 || size > kcovAreaSizeMax {\n\t\treturn syserror.EINVAL\n\t}\n\n\tkcov.size = size\n\tkcov.mode = linux.KCOV_MODE_INIT\n\treturn nil\n}\n\n\/\/ EnableTrace performs the KCOV_ENABLE_TRACE ioctl.\nfunc (kcov *Kcov) EnableTrace(ctx context.Context, traceKind uint8) error {\n\tt := TaskFromContext(ctx)\n\tif t == nil {\n\t\tpanic(\"kcovInode.EnableTrace() cannot be used outside of a task goroutine\")\n\t}\n\n\tkcov.mu.Lock()\n\tdefer kcov.mu.Unlock()\n\n\t\/\/ KCOV_ENABLE must be preceded by KCOV_INIT_TRACE and an mmap call.\n\tif kcov.mode != linux.KCOV_MODE_INIT || kcov.mappable == nil {\n\t\treturn syserror.EINVAL\n\t}\n\n\tswitch traceKind {\n\tcase linux.KCOV_TRACE_PC:\n\t\tkcov.mode = linux.KCOV_MODE_TRACE_PC\n\tcase linux.KCOV_TRACE_CMP:\n\t\t\/\/ We do not support KCOV_MODE_TRACE_CMP.\n\t\treturn syserror.ENOTSUP\n\tdefault:\n\t\treturn syserror.EINVAL\n\t}\n\n\tif kcov.owningTask != nil && kcov.owningTask != t {\n\t\treturn syserror.EBUSY\n\t}\n\n\tkcov.owningTask = t\n\tt.SetKcov(kcov)\n\tt.RegisterWork(kcov)\n\n\t\/\/ Clear existing coverage data; the task expects to read only coverage data\n\t\/\/ from the time it is activated.\n\tcoverage.ClearCoverageData()\n\treturn nil\n}\n\n\/\/ DisableTrace performs the KCOV_DISABLE_TRACE ioctl.\nfunc (kcov *Kcov) DisableTrace(ctx context.Context) error {\n\tkcov.mu.Lock()\n\tdefer kcov.mu.Unlock()\n\n\tt := TaskFromContext(ctx)\n\tif t == nil {\n\t\tpanic(\"kcovInode.EnableTrace() cannot be used outside of a task goroutine\")\n\t}\n\n\tif t != kcov.owningTask {\n\t\treturn syserror.EINVAL\n\t}\n\tkcov.mode = linux.KCOV_MODE_INIT\n\tkcov.owningTask = nil\n\tkcov.mappable = nil\n\treturn nil\n}\n\n\/\/ Clear resets the mode and clears the owning task and memory mapping for kcov.\n\/\/ It is called when the fd corresponding to kcov is closed. Note that the mode\n\/\/ needs to be set so that the next call to kcov.TaskWork() will exit early.\nfunc (kcov *Kcov) Clear() {\n\tkcov.mu.Lock()\n\tkcov.clearLocked()\n\tkcov.mu.Unlock()\n}\n\nfunc (kcov *Kcov) clearLocked() {\n\tkcov.mode = linux.KCOV_MODE_INIT\n\tkcov.owningTask = nil\n\tkcov.mappable = nil\n}\n\n\/\/ OnTaskExit is called when the owning task exits. It is similar to\n\/\/ kcov.Clear(), except the memory mapping is not cleared, so that the same\n\/\/ mapping can be used in the future if kcov is enabled again by another task.\nfunc (kcov *Kcov) OnTaskExit() {\n\tkcov.mu.Lock()\n\tkcov.mode = linux.KCOV_MODE_INIT\n\tkcov.owningTask = nil\n\tkcov.mu.Unlock()\n}\n\n\/\/ ConfigureMMap is called by the vfs.FileDescription for this kcov instance to\n\/\/ implement vfs.FileDescription.ConfigureMMap.\nfunc (kcov *Kcov) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {\n\tkcov.mu.Lock()\n\tdefer kcov.mu.Unlock()\n\n\tif kcov.mode != linux.KCOV_MODE_INIT {\n\t\treturn syserror.EINVAL\n\t}\n\n\tif kcov.mappable == nil {\n\t\t\/\/ Set up the kcov area.\n\t\tfr, err := kcov.mfp.MemoryFile().Allocate(kcov.size*8, usage.Anonymous)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Get the thread id for the mmap name.\n\t\tt := TaskFromContext(ctx)\n\t\tif t == nil {\n\t\t\tpanic(\"ThreadFromContext returned nil\")\n\t\t}\n\t\t\/\/ For convenience, a special mappable is used here. Note that these mappings\n\t\t\/\/ will look different under \/proc\/[pid]\/maps than they do on Linux.\n\t\tkcov.mappable = mm.NewSpecialMappable(fmt.Sprintf(\"[kcov:%d]\", t.ThreadID()), kcov.mfp, fr)\n\t}\n\topts.Mappable = kcov.mappable\n\topts.MappingIdentity = kcov.mappable\n\treturn nil\n}\n\n\/\/ kcovReadWriter implements safemem.Reader and safemem.Writer.\ntype kcovReadWriter struct {\n\toff uint64\n\tmf *pgalloc.MemoryFile\n\tfr memmap.FileRange\n}\n\n\/\/ ReadToBlocks implements safemem.Reader.ReadToBlocks.\nfunc (rw *kcovReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {\n\tif dsts.IsEmpty() {\n\t\treturn 0, nil\n\t}\n\n\t\/\/ Limit the read to the kcov range and check for overflow.\n\tif rw.fr.Length() <= rw.off {\n\t\treturn 0, io.EOF\n\t}\n\tstart := rw.fr.Start + rw.off\n\tend := rw.fr.Start + rw.fr.Length()\n\tif rend := start + dsts.NumBytes(); rend < end {\n\t\tend = rend\n\t}\n\n\t\/\/ Get internal mappings.\n\tbs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, usermem.Read)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ Copy from internal mappings.\n\tn, err := safemem.CopySeq(dsts, bs)\n\trw.off += n\n\treturn n, err\n}\n\n\/\/ WriteFromBlocks implements safemem.Writer.WriteFromBlocks.\nfunc (rw *kcovReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) {\n\tif srcs.IsEmpty() {\n\t\treturn 0, nil\n\t}\n\n\t\/\/ Limit the write to the kcov area and check for overflow.\n\tif rw.fr.Length() <= rw.off {\n\t\treturn 0, io.EOF\n\t}\n\tstart := rw.fr.Start + rw.off\n\tend := rw.fr.Start + rw.fr.Length()\n\tif wend := start + srcs.NumBytes(); wend < end {\n\t\tend = wend\n\t}\n\n\t\/\/ Get internal mapping.\n\tbs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, usermem.Write)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ Copy to internal mapping.\n\tn, err := safemem.CopySeq(bs, srcs)\n\trw.off += n\n\treturn n, err\n}\n\n\/\/ kcovIOWriter implements io.Writer as a basic wrapper over kcovReadWriter.\ntype kcovIOWriter struct {\n\trw *kcovReadWriter\n}\n\n\/\/ Write implements io.Writer.Write.\nfunc (w *kcovIOWriter) Write(p []byte) (int, error) {\n\tbs := safemem.BlockSeqOf(safemem.BlockFromSafeSlice(p))\n\tn, err := safemem.WriteFullFromBlocks(w.rw, bs)\n\treturn int(n), err\n}\n<|endoftext|>"} {"text":"package forge\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n)\n\n\/\/ Section struct holds a map of values\ntype Section struct {\n\tcomments []string\n\tincludes []string\n\tparent *Section\n\tvalues map[string]Value\n}\n\n\/\/ NewSection will create and initialize a new Section\nfunc NewSection() *Section {\n\treturn &Section{\n\t\tcomments: make([]string, 0),\n\t\tincludes: make([]string, 0),\n\t\tvalues: make(map[string]Value),\n\t}\n}\n\nfunc newChildSection(parent *Section) *Section {\n\treturn &Section{\n\t\tcomments: make([]string, 0),\n\t\tincludes: make([]string, 0),\n\t\tparent: parent,\n\t\tvalues: make(map[string]Value),\n\t}\n}\n\n\/\/ AddComment will append a new comment into the section\nfunc (section *Section) AddComment(comment string) {\n\tsection.comments = append(section.comments, comment)\n}\n\n\/\/ AddInclude will append a new filename into the section\nfunc (section *Section) AddInclude(filename string) {\n\tsection.includes = append(section.includes, filename)\n}\n\n\/\/ GetComments will return all the comments were defined for this Section\nfunc (section *Section) GetComments() []string {\n\treturn section.comments\n}\n\n\/\/ GetIncludes will return the filenames of all the includes were parsed for this Section\nfunc (section *Section) GetIncludes() []string {\n\treturn section.includes\n}\n\n\/\/ GetType will respond with the ValueType of this Section (hint, always SECTION)\nfunc (section *Section) GetType() ValueType {\n\treturn SECTION\n}\n\n\/\/ GetValue retrieves the raw underlying value stored in this Section\nfunc (section *Section) GetValue() interface{} {\n\treturn section.values\n}\n\n\/\/ UpdateValue updates the raw underlying value stored in this Section\nfunc (section *Section) UpdateValue(value interface{}) error {\n\tswitch value.(type) {\n\tcase map[string]Value:\n\t\tsection.values = value.(map[string]Value)\n\t\treturn nil\n\t}\n\n\tmsg := fmt.Sprintf(\"unsupported type, %s must be of type `map[string]Value`\", value)\n\treturn errors.New(msg)\n}\n\n\/\/ AddSection adds a new child section to this Section with the provided name\nfunc (section *Section) AddSection(name string) *Section {\n\tchildSection := newChildSection(section)\n\tsection.values[name] = childSection\n\treturn childSection\n}\n\n\/\/ Exists returns true when a value stored under the key exists\nfunc (section *Section) Exists(name string) bool {\n\t_, err := section.Get(name)\n\treturn err == nil\n}\n\n\/\/ Get the value (Primative or Section) stored under the name\n\/\/ will respond with an error if the value does not exist\nfunc (section *Section) Get(name string) (Value, error) {\n\tvalue, ok := section.values[name]\n\tvar err error\n\tif ok == false {\n\t\terr = errors.New(\"value does not exist\")\n\t}\n\treturn value, err\n}\n\n\/\/ GetBoolean will try to get the value stored under name as a bool\n\/\/ will respond with an error if the value does not exist or cannot be converted to a bool\nfunc (section *Section) GetBoolean(name string) (bool, error) {\n\tvalue, err := section.Get(name)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tswitch value.(type) {\n\tcase *Primative:\n\t\treturn value.(*Primative).AsBoolean()\n\tcase *Section:\n\t\treturn true, nil\n\t}\n\n\treturn false, errors.New(\"could not convert unknown value to boolean\")\n}\n\n\/\/ GetFloat will try to get the value stored under name as a float64\n\/\/ will respond with an error if the value does not exist or cannot be converted to a float64\nfunc (section *Section) GetFloat(name string) (float64, error) {\n\tvalue, err := section.Get(name)\n\tif err != nil {\n\t\treturn float64(0), err\n\t}\n\n\tswitch value.(type) {\n\tcase *Primative:\n\t\treturn value.(*Primative).AsFloat()\n\t}\n\n\treturn float64(0), errors.New(\"could not convert non-primative value to float\")\n}\n\n\/\/ GetInteger will try to get the value stored under name as a int64\n\/\/ will respond with an error if the value does not exist or cannot be converted to a int64\nfunc (section *Section) GetInteger(name string) (int64, error) {\n\tvalue, err := section.Get(name)\n\tif err != nil {\n\t\treturn int64(0), err\n\t}\n\n\tswitch value.(type) {\n\tcase *Primative:\n\t\treturn value.(*Primative).AsInteger()\n\t}\n\n\treturn int64(0), errors.New(\"could not convert non-primative value to integer\")\n}\n\n\/\/ GetList will try to get the value stored under name as a List\n\/\/ will respond with an error if the value does not exist or is not a List\nfunc (section *Section) GetList(name string) (*List, error) {\n\tvalue, err := section.Get(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif value.GetType() == LIST {\n\t\treturn value.(*List), nil\n\t}\n\n\treturn nil, errors.New(\"could not fetch value as list\")\n}\n\n\/\/ GetSection will try to get the value stored under name as a Section\n\/\/ will respond with an error if the value does not exist or is not a Section\nfunc (section *Section) GetSection(name string) (*Section, error) {\n\tvalue, err := section.Get(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif value.GetType() == SECTION {\n\t\treturn value.(*Section), nil\n\t}\n\treturn nil, errors.New(\"could not fetch value as section\")\n}\n\n\/\/ GetString will try to get the value stored under name as a string\n\/\/ will respond with an error if the value does not exist or cannot be converted to a string\nfunc (section *Section) GetString(name string) (string, error) {\n\tvalue, err := section.Get(name)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch value.(type) {\n\tcase *Primative:\n\t\treturn value.(*Primative).AsString()\n\t}\n\n\treturn \"\", errors.New(\"could not convert non-primative value to string\")\n}\n\n\/\/ GetParent will get the parent section associated with this Section or nil\n\/\/ if it does not have one\nfunc (section *Section) GetParent() *Section {\n\treturn section.parent\n}\n\n\/\/ HasParent will return true if this Section has a parent\nfunc (section *Section) HasParent() bool {\n\treturn section.parent != nil\n}\n\n\/\/ Keys will return back a list of all setting names in this Section\nfunc (section *Section) Keys() []string {\n\tvar keys []string\n\tfor key := range section.values {\n\t\tkeys = append(keys, key)\n\t}\n\n\tsort.Strings(keys)\n\treturn keys\n}\n\n\/\/ Set will set a value (Primative or Section) to the provided name\nfunc (section *Section) Set(name string, value Value) {\n\tsection.values[name] = value\n}\n\n\/\/ SetBoolean will set the value for name as a bool\nfunc (section *Section) SetBoolean(name string, value bool) {\n\tcurrent, err := section.Get(name)\n\n\t\/\/ Exists just update the value\/type\n\tif err == nil {\n\t\tcurrent.UpdateValue(value)\n\t} else {\n\t\tsection.values[name] = NewBoolean(value)\n\t}\n}\n\n\/\/ SetFloat will set the value for name as a float64\nfunc (section *Section) SetFloat(name string, value float64) {\n\tcurrent, err := section.Get(name)\n\n\t\/\/ Exists just update the value\/type\n\tif err == nil {\n\t\tcurrent.UpdateValue(value)\n\t} else {\n\t\tsection.values[name] = NewFloat(value)\n\t}\n}\n\n\/\/ SetInteger will set the value for name as a int64\nfunc (section *Section) SetInteger(name string, value int64) {\n\tcurrent, err := section.Get(name)\n\n\t\/\/ Exists just update the value\/type\n\tif err == nil {\n\t\tcurrent.UpdateValue(value)\n\t} else {\n\t\tsection.values[name] = NewInteger(value)\n\t}\n}\n\n\/\/ SetNull will set the value for name as nil\nfunc (section *Section) SetNull(name string) {\n\tcurrent, err := section.Get(name)\n\n\t\/\/ Already is a Null, nothing to do\n\tif err == nil && current.GetType() == NULL {\n\t\treturn\n\t}\n\tsection.Set(name, NewNull())\n}\n\n\/\/ SetString will set the value for name as a string\nfunc (section *Section) SetString(name string, value string) {\n\tcurrent, err := section.Get(name)\n\n\t\/\/ Exists just update the value\/type\n\tif err == nil {\n\t\tcurrent.UpdateValue(value)\n\t} else {\n\t\tsection.Set(name, NewString(value))\n\t}\n}\n\n\/\/ Resolve will recursively try to fetch the provided value and will respond\n\/\/ with an error if the name does not exist or tries to be resolved through\n\/\/ a non-section value\nfunc (section *Section) Resolve(name string) (Value, error) {\n\t\/\/ Used only in error state return value\n\tvar value Value\n\n\tparts := strings.Split(name, \".\")\n\tif len(parts) == 0 {\n\t\treturn value, errors.New(\"no name provided\")\n\t}\n\n\tvar current Value\n\tcurrent = section\n\tfor _, part := range parts {\n\t\tif current.GetType() != SECTION {\n\t\t\treturn value, errors.New(\"trying to resolve value from non-section\")\n\t\t}\n\n\t\tnextCurrent, err := current.(*Section).Get(part)\n\t\tif err != nil {\n\t\t\treturn value, errors.New(\"could not find value in section\")\n\t\t}\n\t\tcurrent = nextCurrent\n\t}\n\treturn current, nil\n}\n\n\/\/ Merge merges the given section to current section. Settings from source\n\/\/ section overwites the values in the current section\nfunc (section *Section) Merge(source *Section) error {\n\tfor _, key := range source.Keys() {\n\t\tsourceValue, _ := source.Get(key)\n\t\ttargetValue, err := section.Get(key)\n\n\t\t\/\/ not found, so add it\n\t\tif err != nil {\n\t\t\tsection.Set(key, sourceValue)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ found existing one and it's type SECTION, merge it\n\t\tif targetValue.GetType() == SECTION {\n\t\t\t\/\/ Source value have to be SECTION type here\n\t\t\tif sourceValue.GetType() != SECTION {\n\t\t\t\treturn fmt.Errorf(\"source (%v) and target (%v) type doesn't match: %v\",\n\t\t\t\t\tsourceValue.GetType(),\n\t\t\t\t\ttargetValue.GetType(),\n\t\t\t\t\tkey)\n\t\t\t}\n\n\t\t\tif err = targetValue.(*Section).Merge(sourceValue.(*Section)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ found existing one, update it\n\t\tif err = targetValue.UpdateValue(sourceValue.GetValue()); err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", err, key)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ ToJSON will convert this Section and all it's underlying values and Sections\n\/\/ into JSON as a []byte\nfunc (section *Section) ToJSON() ([]byte, error) {\n\tdata := section.ToMap()\n\treturn json.Marshal(data)\n}\n\n\/\/ ToMap will convert this Section and all it's underlying values and Sections into\n\/\/ a map[string]interface{}\nfunc (section *Section) ToMap() map[string]interface{} {\n\toutput := make(map[string]interface{})\n\n\tfor key, value := range section.values {\n\t\tif value.GetType() == SECTION {\n\t\t\toutput[key] = value.(*Section).ToMap()\n\t\t} else {\n\t\t\toutput[key] = value.GetValue()\n\t\t}\n\t}\n\treturn output\n}\nExpose 'value does not exists' errorpackage forge\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n)\n\nvar (\n\t\/\/ ErrNotExists represents a nonexistent value error\n\tErrNotExists = errors.New(\"value does not exist\")\n)\n\n\/\/ Section struct holds a map of values\ntype Section struct {\n\tcomments []string\n\tincludes []string\n\tparent *Section\n\tvalues map[string]Value\n}\n\n\/\/ NewSection will create and initialize a new Section\nfunc NewSection() *Section {\n\treturn &Section{\n\t\tcomments: make([]string, 0),\n\t\tincludes: make([]string, 0),\n\t\tvalues: make(map[string]Value),\n\t}\n}\n\nfunc newChildSection(parent *Section) *Section {\n\treturn &Section{\n\t\tcomments: make([]string, 0),\n\t\tincludes: make([]string, 0),\n\t\tparent: parent,\n\t\tvalues: make(map[string]Value),\n\t}\n}\n\n\/\/ AddComment will append a new comment into the section\nfunc (section *Section) AddComment(comment string) {\n\tsection.comments = append(section.comments, comment)\n}\n\n\/\/ AddInclude will append a new filename into the section\nfunc (section *Section) AddInclude(filename string) {\n\tsection.includes = append(section.includes, filename)\n}\n\n\/\/ GetComments will return all the comments were defined for this Section\nfunc (section *Section) GetComments() []string {\n\treturn section.comments\n}\n\n\/\/ GetIncludes will return the filenames of all the includes were parsed for this Section\nfunc (section *Section) GetIncludes() []string {\n\treturn section.includes\n}\n\n\/\/ GetType will respond with the ValueType of this Section (hint, always SECTION)\nfunc (section *Section) GetType() ValueType {\n\treturn SECTION\n}\n\n\/\/ GetValue retrieves the raw underlying value stored in this Section\nfunc (section *Section) GetValue() interface{} {\n\treturn section.values\n}\n\n\/\/ UpdateValue updates the raw underlying value stored in this Section\nfunc (section *Section) UpdateValue(value interface{}) error {\n\tswitch value.(type) {\n\tcase map[string]Value:\n\t\tsection.values = value.(map[string]Value)\n\t\treturn nil\n\t}\n\n\tmsg := fmt.Sprintf(\"unsupported type, %s must be of type `map[string]Value`\", value)\n\treturn errors.New(msg)\n}\n\n\/\/ AddSection adds a new child section to this Section with the provided name\nfunc (section *Section) AddSection(name string) *Section {\n\tchildSection := newChildSection(section)\n\tsection.values[name] = childSection\n\treturn childSection\n}\n\n\/\/ Exists returns true when a value stored under the key exists\nfunc (section *Section) Exists(name string) bool {\n\t_, err := section.Get(name)\n\treturn err == nil\n}\n\n\/\/ Get the value (Primative or Section) stored under the name\n\/\/ will respond with an error if the value does not exist\nfunc (section *Section) Get(name string) (Value, error) {\n\tvalue, ok := section.values[name]\n\tvar err error\n\tif ok == false {\n\t\terr = ErrNotExists\n\t}\n\treturn value, err\n}\n\n\/\/ GetBoolean will try to get the value stored under name as a bool\n\/\/ will respond with an error if the value does not exist or cannot be converted to a bool\nfunc (section *Section) GetBoolean(name string) (bool, error) {\n\tvalue, err := section.Get(name)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tswitch value.(type) {\n\tcase *Primative:\n\t\treturn value.(*Primative).AsBoolean()\n\tcase *Section:\n\t\treturn true, nil\n\t}\n\n\treturn false, errors.New(\"could not convert unknown value to boolean\")\n}\n\n\/\/ GetFloat will try to get the value stored under name as a float64\n\/\/ will respond with an error if the value does not exist or cannot be converted to a float64\nfunc (section *Section) GetFloat(name string) (float64, error) {\n\tvalue, err := section.Get(name)\n\tif err != nil {\n\t\treturn float64(0), err\n\t}\n\n\tswitch value.(type) {\n\tcase *Primative:\n\t\treturn value.(*Primative).AsFloat()\n\t}\n\n\treturn float64(0), errors.New(\"could not convert non-primative value to float\")\n}\n\n\/\/ GetInteger will try to get the value stored under name as a int64\n\/\/ will respond with an error if the value does not exist or cannot be converted to a int64\nfunc (section *Section) GetInteger(name string) (int64, error) {\n\tvalue, err := section.Get(name)\n\tif err != nil {\n\t\treturn int64(0), err\n\t}\n\n\tswitch value.(type) {\n\tcase *Primative:\n\t\treturn value.(*Primative).AsInteger()\n\t}\n\n\treturn int64(0), errors.New(\"could not convert non-primative value to integer\")\n}\n\n\/\/ GetList will try to get the value stored under name as a List\n\/\/ will respond with an error if the value does not exist or is not a List\nfunc (section *Section) GetList(name string) (*List, error) {\n\tvalue, err := section.Get(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif value.GetType() == LIST {\n\t\treturn value.(*List), nil\n\t}\n\n\treturn nil, errors.New(\"could not fetch value as list\")\n}\n\n\/\/ GetSection will try to get the value stored under name as a Section\n\/\/ will respond with an error if the value does not exist or is not a Section\nfunc (section *Section) GetSection(name string) (*Section, error) {\n\tvalue, err := section.Get(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif value.GetType() == SECTION {\n\t\treturn value.(*Section), nil\n\t}\n\treturn nil, errors.New(\"could not fetch value as section\")\n}\n\n\/\/ GetString will try to get the value stored under name as a string\n\/\/ will respond with an error if the value does not exist or cannot be converted to a string\nfunc (section *Section) GetString(name string) (string, error) {\n\tvalue, err := section.Get(name)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch value.(type) {\n\tcase *Primative:\n\t\treturn value.(*Primative).AsString()\n\t}\n\n\treturn \"\", errors.New(\"could not convert non-primative value to string\")\n}\n\n\/\/ GetParent will get the parent section associated with this Section or nil\n\/\/ if it does not have one\nfunc (section *Section) GetParent() *Section {\n\treturn section.parent\n}\n\n\/\/ HasParent will return true if this Section has a parent\nfunc (section *Section) HasParent() bool {\n\treturn section.parent != nil\n}\n\n\/\/ Keys will return back a list of all setting names in this Section\nfunc (section *Section) Keys() []string {\n\tvar keys []string\n\tfor key := range section.values {\n\t\tkeys = append(keys, key)\n\t}\n\n\tsort.Strings(keys)\n\treturn keys\n}\n\n\/\/ Set will set a value (Primative or Section) to the provided name\nfunc (section *Section) Set(name string, value Value) {\n\tsection.values[name] = value\n}\n\n\/\/ SetBoolean will set the value for name as a bool\nfunc (section *Section) SetBoolean(name string, value bool) {\n\tcurrent, err := section.Get(name)\n\n\t\/\/ Exists just update the value\/type\n\tif err == nil {\n\t\tcurrent.UpdateValue(value)\n\t} else {\n\t\tsection.values[name] = NewBoolean(value)\n\t}\n}\n\n\/\/ SetFloat will set the value for name as a float64\nfunc (section *Section) SetFloat(name string, value float64) {\n\tcurrent, err := section.Get(name)\n\n\t\/\/ Exists just update the value\/type\n\tif err == nil {\n\t\tcurrent.UpdateValue(value)\n\t} else {\n\t\tsection.values[name] = NewFloat(value)\n\t}\n}\n\n\/\/ SetInteger will set the value for name as a int64\nfunc (section *Section) SetInteger(name string, value int64) {\n\tcurrent, err := section.Get(name)\n\n\t\/\/ Exists just update the value\/type\n\tif err == nil {\n\t\tcurrent.UpdateValue(value)\n\t} else {\n\t\tsection.values[name] = NewInteger(value)\n\t}\n}\n\n\/\/ SetNull will set the value for name as nil\nfunc (section *Section) SetNull(name string) {\n\tcurrent, err := section.Get(name)\n\n\t\/\/ Already is a Null, nothing to do\n\tif err == nil && current.GetType() == NULL {\n\t\treturn\n\t}\n\tsection.Set(name, NewNull())\n}\n\n\/\/ SetString will set the value for name as a string\nfunc (section *Section) SetString(name string, value string) {\n\tcurrent, err := section.Get(name)\n\n\t\/\/ Exists just update the value\/type\n\tif err == nil {\n\t\tcurrent.UpdateValue(value)\n\t} else {\n\t\tsection.Set(name, NewString(value))\n\t}\n}\n\n\/\/ Resolve will recursively try to fetch the provided value and will respond\n\/\/ with an error if the name does not exist or tries to be resolved through\n\/\/ a non-section value\nfunc (section *Section) Resolve(name string) (Value, error) {\n\t\/\/ Used only in error state return value\n\tvar value Value\n\n\tparts := strings.Split(name, \".\")\n\tif len(parts) == 0 {\n\t\treturn value, errors.New(\"no name provided\")\n\t}\n\n\tvar current Value\n\tcurrent = section\n\tfor _, part := range parts {\n\t\tif current.GetType() != SECTION {\n\t\t\treturn value, errors.New(\"trying to resolve value from non-section\")\n\t\t}\n\n\t\tnextCurrent, err := current.(*Section).Get(part)\n\t\tif err != nil {\n\t\t\treturn value, errors.New(\"could not find value in section\")\n\t\t}\n\t\tcurrent = nextCurrent\n\t}\n\treturn current, nil\n}\n\n\/\/ Merge merges the given section to current section. Settings from source\n\/\/ section overwites the values in the current section\nfunc (section *Section) Merge(source *Section) error {\n\tfor _, key := range source.Keys() {\n\t\tsourceValue, _ := source.Get(key)\n\t\ttargetValue, err := section.Get(key)\n\n\t\t\/\/ not found, so add it\n\t\tif err != nil {\n\t\t\tsection.Set(key, sourceValue)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ found existing one and it's type SECTION, merge it\n\t\tif targetValue.GetType() == SECTION {\n\t\t\t\/\/ Source value have to be SECTION type here\n\t\t\tif sourceValue.GetType() != SECTION {\n\t\t\t\treturn fmt.Errorf(\"source (%v) and target (%v) type doesn't match: %v\",\n\t\t\t\t\tsourceValue.GetType(),\n\t\t\t\t\ttargetValue.GetType(),\n\t\t\t\t\tkey)\n\t\t\t}\n\n\t\t\tif err = targetValue.(*Section).Merge(sourceValue.(*Section)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ found existing one, update it\n\t\tif err = targetValue.UpdateValue(sourceValue.GetValue()); err != nil {\n\t\t\treturn fmt.Errorf(\"%v: %v\", err, key)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ ToJSON will convert this Section and all it's underlying values and Sections\n\/\/ into JSON as a []byte\nfunc (section *Section) ToJSON() ([]byte, error) {\n\tdata := section.ToMap()\n\treturn json.Marshal(data)\n}\n\n\/\/ ToMap will convert this Section and all it's underlying values and Sections into\n\/\/ a map[string]interface{}\nfunc (section *Section) ToMap() map[string]interface{} {\n\toutput := make(map[string]interface{})\n\n\tfor key, value := range section.values {\n\t\tif value.GetType() == SECTION {\n\t\t\toutput[key] = value.(*Section).ToMap()\n\t\t} else {\n\t\t\toutput[key] = value.GetValue()\n\t\t}\n\t}\n\treturn output\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/andelf\/go-curl\"\n\t\"github.com\/nu7hatch\/gouuid\"\n)\n\ntype Config struct {\n\tLocation string\n\tChecksUrl string\n\tMeasurementsUrl string\n\tMeasurementsUser string\n\tMeasurementsPass string\n}\n\ntype Check struct {\n\tId string `json:\"id\"`\n\tUrl string `json:\"url\"`\n}\n\ntype Measurement struct {\n\tCheck Check `json:\"check\"`\n\tId string `json:\"id\"`\n\tLocation string `json:\"location\"`\n\tT int `json:\"t\"`\n\tExitStatus int `json:\"exit_status\"`\n\tConnectTime float64 `json:\"connect_time,omitempty\"`\n\tStartTransferTime float64 `json:\"starttransfer_time,omitempty\"`\n\tLocalIp string `json:\"local_ip,omitempty\"`\n\tPrimaryIp string `json:\"primary_ip,omitempty\"`\n\tTotalTime float64 `json:\"total_time,omitempty\"`\n\tHttpStatus int `json:\"http_status,omitempty\"`\n\tNameLookupTime float64 `json:\"namelookup_time,omitempty\"`\n}\n\nfunc GetEnvWithDefault(env string, def string) string {\n\ttmp := os.Getenv(env)\n\n\tif tmp == \"\" {\n\t\treturn def\n\t}\n\n\treturn tmp\n}\n\nfunc (c *Check) Measure(config Config) Measurement {\n\tvar m Measurement\n\n\tid, _ := uuid.NewV4()\n\tm.Id = id.String()\n\tm.Check = *c\n\tm.Location = config.Location\n\n\teasy := curl.EasyInit()\n\tdefer easy.Cleanup()\n\n\teasy.Setopt(curl.OPT_URL, c.Url)\n\n\t\/\/ dummy func for curl output\n\tnoOut := func(buf []byte, userdata interface{}) bool {\n\t\treturn true\n\t}\n\n\teasy.Setopt(curl.OPT_WRITEFUNCTION, noOut)\n\teasy.Setopt(curl.OPT_CONNECTTIMEOUT, 10)\n\teasy.Setopt(curl.OPT_TIMEOUT, 10)\n\n\tnow := time.Now()\n\tm.T = int(now.Unix())\n\n\tif err := easy.Perform(); err != nil {\n\t\tif e, ok := err.(curl.CurlError); ok {\n\t\t\tm.ExitStatus = (int(e))\n\t\t\treturn m\n\t\t}\n\t\tos.Exit(1)\n\t}\n\n\tm.ExitStatus = 0\n\thttp_status, _ := easy.Getinfo(curl.INFO_RESPONSE_CODE)\n\tm.HttpStatus = http_status.(int)\n\n\tconnect_time, _ := easy.Getinfo(curl.INFO_CONNECT_TIME)\n\tm.ConnectTime = connect_time.(float64)\n\n\tnamelookup_time, _ := easy.Getinfo(curl.INFO_NAMELOOKUP_TIME)\n\tm.NameLookupTime = namelookup_time.(float64)\n\n\tstarttransfer_time, _ := easy.Getinfo(curl.INFO_STARTTRANSFER_TIME)\n\tm.StartTransferTime = starttransfer_time.(float64)\n\n\ttotal_time, _ := easy.Getinfo(curl.INFO_TOTAL_TIME)\n\tm.TotalTime = total_time.(float64)\n\n\tlocal_ip, _ := easy.Getinfo(curl.INFO_LOCAL_IP)\n\tm.LocalIp = local_ip.(string)\n\n\tprimary_ip, _ := easy.Getinfo(curl.INFO_PRIMARY_IP)\n\tm.PrimaryIp = primary_ip.(string)\n\n\treturn m\n}\n\nfunc MeasureLoop(config Config, checks chan Check, measurements chan Measurement) {\n\tfor {\n\t\tc := <-checks\n\t\tm := c.Measure(config)\n\n\t\tmeasurements <- m\n\t}\n}\n\nfunc Record(config Config, payload []Measurement) {\n\ts, err := json.Marshal(&payload)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tbody := bytes.NewBuffer(s)\n\treq, err := http.NewRequest(\"POST\", config.MeasurementsUrl, body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\n\tif config.MeasurementsUser != \"\" {\n\t\treq.SetBasicAuth(config.MeasurementsUser, config.MeasurementsPass)\n\t}\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Printf(\"fn=Record http_code=%d\\n\", resp.StatusCode)\n\tresp.Body.Close()\n}\n\nfunc RecordLoop(config Config, measurements chan Measurement) {\n\ttickChan := time.NewTicker(time.Millisecond * 1000).C\n\tpayload := make([]Measurement, 0, 100)\n\n\tfor {\n\t\tselect {\n\t\tcase m := <-measurements:\n\t\t\tpayload = append(payload, m)\n\t\tcase <-tickChan:\n\t\t\tl := len(payload)\n\t\t\tfmt.Printf(\"fn=RecordLoop payload_size=%d\\n\", l)\n\n\t\t\tif l > 0 {\n\t\t\t\tRecord(config, payload)\n\t\t\t\tpayload = make([]Measurement, 0, 100)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc GetChecks(config Config) []Check {\n\turl := config.ChecksUrl\n\n\tres, err := http.Get(url)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer res.Body.Close()\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar checks []Check\n\terr = json.Unmarshal(body, &checks)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn checks\n}\n\nfunc ScheduleLoop(check Check, checks chan Check) {\n\tfor {\n\t\tchecks <- check\n\t\ttime.Sleep(1000 * time.Millisecond)\n\t}\n}\n\nfunc main() {\n\tvar config Config\n\tconfig.Location = GetEnvWithDefault(\"LOCATION\", \"undefined\")\n\tconfig.ChecksUrl = GetEnvWithDefault(\"CHECKS_URL\", \"https:\/\/s3.amazonaws.com\/canary-public-data\/data.json\")\n\tconfig.MeasurementsUrl = GetEnvWithDefault(\"MEASUREMENTS_URL\", \"http:\/\/localhost:5000\/measurements\")\n\n\tu, err := url.Parse(config.MeasurementsUrl)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif u.User != nil {\n\t\tconfig.MeasurementsUser = u.User.Username()\n\t\tconfig.MeasurementsPass, _ = u.User.Password()\n\t}\n\n\tmeasurerCount, err := strconv.Atoi(GetEnvWithDefault(\"MEASURER_COUNT\", \"1\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\trecorderCount, err := strconv.Atoi(GetEnvWithDefault(\"RECORDER_COUNT\", \"1\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tcheck_list := GetChecks(config)\n\n\tchecks := make(chan Check)\n\tmeasurements := make(chan Measurement)\n\n\tfor i := 0; i < measurerCount; i++ {\n\t\tgo MeasureLoop(config, checks, measurements)\n\t}\n\tfor i := 0; i < recorderCount; i++ {\n\t\tgo RecordLoop(config, measurements)\n\t}\n\n\tfor _, c := range check_list {\n\t\tgo ScheduleLoop(c, checks)\n\t}\n\tsigs := make(chan os.Signal, 1)\n\tsignal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)\n\n\t<-sigs\n}\nuse flag to get config, use select {} instead of sigs to block at the endpackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/andelf\/go-curl\"\n\t\"github.com\/nu7hatch\/gouuid\"\n)\n\ntype Config struct {\n\tLocation string\n\tChecksUrl string\n\tMeasurementsUrl string\n\tMeasurementsUser string\n\tMeasurementsPass string\n\tMeasurerCount int\n\tRecorderCount int\n}\n\ntype Check struct {\n\tId string `json:\"id\"`\n\tUrl string `json:\"url\"`\n}\n\ntype Measurement struct {\n\tCheck Check `json:\"check\"`\n\tId string `json:\"id\"`\n\tLocation string `json:\"location\"`\n\tT int `json:\"t\"`\n\tExitStatus int `json:\"exit_status\"`\n\tConnectTime float64 `json:\"connect_time,omitempty\"`\n\tStartTransferTime float64 `json:\"starttransfer_time,omitempty\"`\n\tLocalIp string `json:\"local_ip,omitempty\"`\n\tPrimaryIp string `json:\"primary_ip,omitempty\"`\n\tTotalTime float64 `json:\"total_time,omitempty\"`\n\tHttpStatus int `json:\"http_status,omitempty\"`\n\tNameLookupTime float64 `json:\"namelookup_time,omitempty\"`\n}\n\nfunc (c *Check) Measure(config Config) Measurement {\n\tvar m Measurement\n\n\tid, _ := uuid.NewV4()\n\tm.Id = id.String()\n\tm.Check = *c\n\tm.Location = config.Location\n\n\teasy := curl.EasyInit()\n\tdefer easy.Cleanup()\n\n\teasy.Setopt(curl.OPT_URL, c.Url)\n\n\t\/\/ dummy func for curl output\n\tnoOut := func(buf []byte, userdata interface{}) bool {\n\t\treturn true\n\t}\n\n\teasy.Setopt(curl.OPT_WRITEFUNCTION, noOut)\n\teasy.Setopt(curl.OPT_CONNECTTIMEOUT, 10)\n\teasy.Setopt(curl.OPT_TIMEOUT, 10)\n\n\tnow := time.Now()\n\tm.T = int(now.Unix())\n\n\tif err := easy.Perform(); err != nil {\n\t\tif e, ok := err.(curl.CurlError); ok {\n\t\t\tm.ExitStatus = (int(e))\n\t\t\treturn m\n\t\t}\n\t\tos.Exit(1)\n\t}\n\n\tm.ExitStatus = 0\n\thttp_status, _ := easy.Getinfo(curl.INFO_RESPONSE_CODE)\n\tm.HttpStatus = http_status.(int)\n\n\tconnect_time, _ := easy.Getinfo(curl.INFO_CONNECT_TIME)\n\tm.ConnectTime = connect_time.(float64)\n\n\tnamelookup_time, _ := easy.Getinfo(curl.INFO_NAMELOOKUP_TIME)\n\tm.NameLookupTime = namelookup_time.(float64)\n\n\tstarttransfer_time, _ := easy.Getinfo(curl.INFO_STARTTRANSFER_TIME)\n\tm.StartTransferTime = starttransfer_time.(float64)\n\n\ttotal_time, _ := easy.Getinfo(curl.INFO_TOTAL_TIME)\n\tm.TotalTime = total_time.(float64)\n\n\tlocal_ip, _ := easy.Getinfo(curl.INFO_LOCAL_IP)\n\tm.LocalIp = local_ip.(string)\n\n\tprimary_ip, _ := easy.Getinfo(curl.INFO_PRIMARY_IP)\n\tm.PrimaryIp = primary_ip.(string)\n\n\treturn m\n}\n\nfunc MeasureLoop(config Config, checks chan Check, measurements chan Measurement) {\n\tfor {\n\t\tc := <-checks\n\t\tm := c.Measure(config)\n\n\t\tmeasurements <- m\n\t}\n}\n\nfunc Record(config Config, payload []Measurement) {\n\ts, err := json.Marshal(&payload)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tbody := bytes.NewBuffer(s)\n\treq, err := http.NewRequest(\"POST\", config.MeasurementsUrl, body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\n\tif config.MeasurementsUser != \"\" {\n\t\treq.SetBasicAuth(config.MeasurementsUser, config.MeasurementsPass)\n\t}\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Printf(\"fn=Record http_code=%d\\n\", resp.StatusCode)\n\tresp.Body.Close()\n}\n\nfunc RecordLoop(config Config, measurements chan Measurement) {\n\ttickChan := time.NewTicker(time.Millisecond * 1000).C\n\tpayload := make([]Measurement, 0, 100)\n\n\tfor {\n\t\tselect {\n\t\tcase m := <-measurements:\n\t\t\tpayload = append(payload, m)\n\t\tcase <-tickChan:\n\t\t\tl := len(payload)\n\t\t\tfmt.Printf(\"fn=RecordLoop payload_size=%d\\n\", l)\n\n\t\t\tif l > 0 {\n\t\t\t\tRecord(config, payload)\n\t\t\t\tpayload = make([]Measurement, 0, 100)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc GetChecks(config Config) []Check {\n\turl := config.ChecksUrl\n\n\tres, err := http.Get(url)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer res.Body.Close()\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar checks []Check\n\terr = json.Unmarshal(body, &checks)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn checks\n}\n\nfunc ScheduleLoop(check Check, checks chan Check) {\n\tfor {\n\t\tchecks <- check\n\t\ttime.Sleep(1000 * time.Millisecond)\n\t}\n}\n\nfunc main() {\n\tconfig := Config{}\n\tflag.StringVar(&config.Location, \"location\", \"undefined\", \"location of this sensor\")\n\tflag.StringVar(&config.ChecksUrl, \"checks_url\", \"https:\/\/s3.amazonaws.com\/canary-public-data\/checks.json\", \"URL for check data\")\n\tflag.StringVar(&config.MeasurementsUrl, \"measurements_url\", \"http:\/\/localhost:5000\/measurements\", \"URL to POST measurements to\")\n\tflag.IntVar(&config.MeasurerCount, \"measurer_count\", 1, \"number of measurers to run\")\n\tflag.IntVar(&config.RecorderCount, \"recorder_count\", 1, \"number of recorders to run\")\n\tflag.Parse()\n\n\tu, err := url.Parse(config.MeasurementsUrl)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif u.User != nil {\n\t\tconfig.MeasurementsUser = u.User.Username()\n\t\tconfig.MeasurementsPass, _ = u.User.Password()\n\t}\n\n\tcheck_list := GetChecks(config)\n\n\tchecks := make(chan Check)\n\tmeasurements := make(chan Measurement)\n\n\tfor i := 0; i < config.MeasurerCount; i++ {\n\t\tgo MeasureLoop(config, checks, measurements)\n\t}\n\n\tfor i := 0; i < config.RecorderCount; i++ {\n\t\tgo RecordLoop(config, measurements)\n\t}\n\n\tfor _, c := range check_list {\n\t\tgo ScheduleLoop(c, checks)\n\t}\n\n\tselect {}\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/stripe\/sequins\/backend\"\n\t\"github.com\/stripe\/sequins\/index\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype sequinsOptions struct {\n\tLocalPath string\n\tCheckForSuccessFile bool\n}\n\ntype sequins struct {\n\toptions sequinsOptions\n\tbackend backend.Backend\n\tindexMonitor index.IndexReference\n\thttp *http.Server\n\tstarted time.Time\n\tupdated time.Time\n\treloadLock sync.Mutex\n}\n\ntype status struct {\n\tPath string `json:\"path\"`\n\tStarted int64 `json:\"started\"`\n\tUpdated int64 `json:\"updated\"`\n\tCount int `json:\"count\"`\n}\n\nfunc newSequins(backend backend.Backend, options sequinsOptions) *sequins {\n\treturn &sequins{\n\t\toptions: options,\n\t\tbackend: backend,\n\t\treloadLock: sync.Mutex{},\n\t}\n}\n\nfunc (s *sequins) init() error {\n\terr := s.refresh()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnow := time.Now()\n\ts.started = now\n\ts.updated = now\n\n\treturn nil\n}\n\nfunc (s *sequins) start(address string) error {\n\t\/\/ TODO: we may need a more graceful way of shutting down, since this will\n\t\/\/ cause requests that start processing after this runs to 500\n\t\/\/ However, this may not be a problem, since you have to shift traffic to\n\t\/\/ another instance before shutting down anyway, otherwise you'd have downtime\n\n\tdefer s.indexMonitor.Replace(nil).Close()\n\n\tlog.Printf(\"Listening on %s\", address)\n\treturn http.ListenAndServe(address, s)\n}\n\nfunc (s *sequins) reloadLatest() error {\n\terr := s.refresh()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.updated = time.Now()\n\n\treturn nil\n}\n\nfunc (s *sequins) refresh() error {\n\ts.reloadLock.Lock()\n\tdefer s.reloadLock.Unlock()\n\n\tversion, err := s.backend.LatestVersion(s.options.CheckForSuccessFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ We can use unsafe ref, since closing the index would not affect the version string\n\tvar currentVersion string\n\tcurrentIndex := s.indexMonitor.UnsafeGet()\n\tif currentIndex != nil {\n\t\tcurrentVersion = currentIndex.Version\n\t}\n\n\tif version != currentVersion {\n\t\tpath := filepath.Join(s.options.LocalPath, version)\n\n\t\terr := os.Mkdir(path, 0700|os.ModeDir)\n\t\tif err != nil && !os.IsExist(err) {\n\t\t\treturn err\n\t\t}\n\n\t\tif os.IsExist(err) {\n\t\t\tlog.Printf(\"Version %s is already downloaded\", version)\n\t\t} else {\n\t\t\tlog.Printf(\"Downloading version %s from %s\", version, s.backend.DisplayPath(version))\n\t\t\terr = s.backend.Download(version, path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tlog.Printf(\"Preparing version %s at %s\", version, path)\n\t\tindex := index.New(path, version)\n\t\terr = index.Load()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error while indexing: %s\", err)\n\t\t}\n\n\t\tlog.Printf(\"Switching to version %s!\", version)\n\n\t\toldIndex := s.indexMonitor.Replace(index)\n\t\tif oldIndex != nil {\n\t\t\toldIndex.Close()\n\t\t}\n\t} else {\n\t\tlog.Printf(\"%s is already the newest version, so not reloading.\", version)\n\t}\n\n\treturn nil\n}\n\nfunc (s *sequins) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path == \"\/\" {\n\t\tindex := s.indexMonitor.Get()\n\t\tcount, err := index.Count()\n\t\tcurrentVersion := index.Version\n\t\ts.indexMonitor.Release(index)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tstatus := status{\n\t\t\tPath: s.backend.DisplayPath(currentVersion),\n\t\t\tStarted: s.started.Unix(),\n\t\t\tUpdated: s.updated.Unix(),\n\t\t\tCount: count,\n\t\t}\n\n\t\tjsonBytes, err := json.Marshal(status)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tw.Write(jsonBytes)\n\t\treturn\n\t}\n\n\tkey := strings.TrimPrefix(r.URL.Path, \"\/\")\n\n\tcurrentIndex := s.indexMonitor.Get()\n\tres, err := currentIndex.Get(key)\n\ts.indexMonitor.Release(currentIndex)\n\n\tif err == index.ErrNotFound {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t} else if err != nil {\n\t\tlog.Fatal(fmt.Errorf(\"Error fetching value for %s: %s\", key, err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t} else {\n\t\t\/\/ Explicitly unset Content-Type, so ServeContent doesn't try to do any\n\t\t\/\/ sniffing.\n\t\tw.Header()[\"Content-Type\"] = nil\n\n\t\thttp.ServeContent(w, r, key, s.updated, bytes.NewReader(res))\n\t}\n}\nFix minor naming inconsistencypackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/stripe\/sequins\/backend\"\n\t\"github.com\/stripe\/sequins\/index\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype sequinsOptions struct {\n\tLocalPath string\n\tCheckForSuccessFile bool\n}\n\ntype sequins struct {\n\toptions sequinsOptions\n\tbackend backend.Backend\n\tindexReference index.IndexReference\n\thttp *http.Server\n\tstarted time.Time\n\tupdated time.Time\n\treloadLock sync.Mutex\n}\n\ntype status struct {\n\tPath string `json:\"path\"`\n\tStarted int64 `json:\"started\"`\n\tUpdated int64 `json:\"updated\"`\n\tCount int `json:\"count\"`\n}\n\nfunc newSequins(backend backend.Backend, options sequinsOptions) *sequins {\n\treturn &sequins{\n\t\toptions: options,\n\t\tbackend: backend,\n\t\treloadLock: sync.Mutex{},\n\t}\n}\n\nfunc (s *sequins) init() error {\n\terr := s.refresh()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnow := time.Now()\n\ts.started = now\n\ts.updated = now\n\n\treturn nil\n}\n\nfunc (s *sequins) start(address string) error {\n\t\/\/ TODO: we may need a more graceful way of shutting down, since this will\n\t\/\/ cause requests that start processing after this runs to 500\n\t\/\/ However, this may not be a problem, since you have to shift traffic to\n\t\/\/ another instance before shutting down anyway, otherwise you'd have downtime\n\n\tdefer s.indexReference.Replace(nil).Close()\n\n\tlog.Printf(\"Listening on %s\", address)\n\treturn http.ListenAndServe(address, s)\n}\n\nfunc (s *sequins) reloadLatest() error {\n\terr := s.refresh()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.updated = time.Now()\n\n\treturn nil\n}\n\nfunc (s *sequins) refresh() error {\n\ts.reloadLock.Lock()\n\tdefer s.reloadLock.Unlock()\n\n\tversion, err := s.backend.LatestVersion(s.options.CheckForSuccessFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ We can use unsafe ref, since closing the index would not affect the version string\n\tvar currentVersion string\n\tcurrentIndex := s.indexReference.UnsafeGet()\n\tif currentIndex != nil {\n\t\tcurrentVersion = currentIndex.Version\n\t}\n\n\tif version != currentVersion {\n\t\tpath := filepath.Join(s.options.LocalPath, version)\n\n\t\terr := os.Mkdir(path, 0700|os.ModeDir)\n\t\tif err != nil && !os.IsExist(err) {\n\t\t\treturn err\n\t\t}\n\n\t\tif os.IsExist(err) {\n\t\t\tlog.Printf(\"Version %s is already downloaded\", version)\n\t\t} else {\n\t\t\tlog.Printf(\"Downloading version %s from %s\", version, s.backend.DisplayPath(version))\n\t\t\terr = s.backend.Download(version, path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tlog.Printf(\"Preparing version %s at %s\", version, path)\n\t\tindex := index.New(path, version)\n\t\terr = index.Load()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error while indexing: %s\", err)\n\t\t}\n\n\t\tlog.Printf(\"Switching to version %s!\", version)\n\n\t\toldIndex := s.indexReference.Replace(index)\n\t\tif oldIndex != nil {\n\t\t\toldIndex.Close()\n\t\t}\n\t} else {\n\t\tlog.Printf(\"%s is already the newest version, so not reloading.\", version)\n\t}\n\n\treturn nil\n}\n\nfunc (s *sequins) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path == \"\/\" {\n\t\tindex := s.indexReference.Get()\n\t\tcount, err := index.Count()\n\t\tcurrentVersion := index.Version\n\t\ts.indexReference.Release(index)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tstatus := status{\n\t\t\tPath: s.backend.DisplayPath(currentVersion),\n\t\t\tStarted: s.started.Unix(),\n\t\t\tUpdated: s.updated.Unix(),\n\t\t\tCount: count,\n\t\t}\n\n\t\tjsonBytes, err := json.Marshal(status)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tw.Write(jsonBytes)\n\t\treturn\n\t}\n\n\tkey := strings.TrimPrefix(r.URL.Path, \"\/\")\n\n\tcurrentIndex := s.indexReference.Get()\n\tres, err := currentIndex.Get(key)\n\ts.indexReference.Release(currentIndex)\n\n\tif err == index.ErrNotFound {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t} else if err != nil {\n\t\tlog.Fatal(fmt.Errorf(\"Error fetching value for %s: %s\", key, err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t} else {\n\t\t\/\/ Explicitly unset Content-Type, so ServeContent doesn't try to do any\n\t\t\/\/ sniffing.\n\t\tw.Header()[\"Content-Type\"] = nil\n\n\t\thttp.ServeContent(w, r, key, s.updated, bytes.NewReader(res))\n\t}\n}\n<|endoftext|>"} {"text":"package micro\n\nimport (\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"github.com\/micro\/go-micro\/v2\/client\"\n\t\"github.com\/micro\/go-micro\/v2\/config\/cmd\"\n\t\"github.com\/micro\/go-micro\/v2\/debug\/profile\"\n\t\"github.com\/micro\/go-micro\/v2\/debug\/profile\/http\"\n\t\"github.com\/micro\/go-micro\/v2\/debug\/profile\/pprof\"\n\t\"github.com\/micro\/go-micro\/v2\/debug\/service\/handler\"\n\t\"github.com\/micro\/go-micro\/v2\/debug\/stats\"\n\t\"github.com\/micro\/go-micro\/v2\/debug\/trace\"\n\t\"github.com\/micro\/go-micro\/v2\/plugin\"\n\t\"github.com\/micro\/go-micro\/v2\/server\"\n\t\"github.com\/micro\/go-micro\/v2\/util\/log\"\n\t\"github.com\/micro\/go-micro\/v2\/util\/wrapper\"\n)\n\ntype service struct {\n\topts Options\n\n\tonce sync.Once\n}\n\nfunc newService(opts ...Option) Service {\n\toptions := newOptions(opts...)\n\n\t\/\/ service name\n\tserviceName := options.Server.Options().Name\n\n\t\/\/ wrap client to inject From-Service header on any calls\n\toptions.Client = wrapper.FromService(serviceName, options.Client)\n\toptions.Client = wrapper.TraceCall(serviceName, trace.DefaultTracer, options.Client)\n\n\t\/\/ wrap the server to provide handler stats\n\toptions.Server.Init(\n\t\tserver.WrapHandler(wrapper.HandlerStats(stats.DefaultStats)),\n\t\tserver.WrapHandler(wrapper.TraceHandler(trace.DefaultTracer)),\n\t)\n\n\treturn &service{\n\t\topts: options,\n\t}\n}\n\nfunc (s *service) Name() string {\n\treturn s.opts.Server.Options().Name\n}\n\n\/\/ Init initialises options. Additionally it calls cmd.Init\n\/\/ which parses command line flags. cmd.Init is only called\n\/\/ on first Init.\nfunc (s *service) Init(opts ...Option) {\n\t\/\/ process options\n\tfor _, o := range opts {\n\t\to(&s.opts)\n\t}\n\n\ts.once.Do(func() {\n\t\t\/\/ setup the plugins\n\t\tfor _, p := range strings.Split(os.Getenv(\"MICRO_PLUGIN\"), \",\") {\n\t\t\tif len(p) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ load the plugin\n\t\t\tc, err := plugin.Load(p)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\t\/\/ initialise the plugin\n\t\t\tif err := plugin.Init(c); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ set cmd name\n\t\tif len(s.opts.Cmd.App().Name) == 0 {\n\t\t\ts.opts.Cmd.App().Name = s.Server().Options().Name\n\t\t}\n\n\t\t\/\/ Initialise the command flags, overriding new service\n\t\t_ = s.opts.Cmd.Init(\n\t\t\tcmd.Broker(&s.opts.Broker),\n\t\t\tcmd.Registry(&s.opts.Registry),\n\t\t\tcmd.Transport(&s.opts.Transport),\n\t\t\tcmd.Client(&s.opts.Client),\n\t\t\tcmd.Server(&s.opts.Server),\n\t\t)\n\t})\n}\n\nfunc (s *service) Options() Options {\n\treturn s.opts\n}\n\nfunc (s *service) Client() client.Client {\n\treturn s.opts.Client\n}\n\nfunc (s *service) Server() server.Server {\n\treturn s.opts.Server\n}\n\nfunc (s *service) String() string {\n\treturn \"micro\"\n}\n\nfunc (s *service) Start() error {\n\tfor _, fn := range s.opts.BeforeStart {\n\t\tif err := fn(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := s.opts.Server.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fn := range s.opts.AfterStart {\n\t\tif err := fn(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s *service) Stop() error {\n\tvar gerr error\n\n\tfor _, fn := range s.opts.BeforeStop {\n\t\tif err := fn(); err != nil {\n\t\t\tgerr = err\n\t\t}\n\t}\n\n\tif err := s.opts.Server.Stop(); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fn := range s.opts.AfterStop {\n\t\tif err := fn(); err != nil {\n\t\t\tgerr = err\n\t\t}\n\t}\n\n\treturn gerr\n}\n\nfunc (s *service) Run() error {\n\t\/\/ register the debug handler\n\ts.opts.Server.Handle(\n\t\ts.opts.Server.NewHandler(\n\t\t\thandler.NewHandler(),\n\t\t\tserver.InternalHandler(true),\n\t\t),\n\t)\n\n\t\/\/ start the profiler\n\t\/\/ TODO: set as an option to the service, don't just use pprof\n\tif prof := os.Getenv(\"MICRO_DEBUG_PROFILE\"); len(prof) > 0 {\n\t\tvar profiler profile.Profile\n\n\t\t\/\/ to view mutex contention\n\t\truntime.SetMutexProfileFraction(5)\n\t\t\/\/ to view blocking profile\n\t\truntime.SetBlockProfileRate(1)\n\n\t\tswitch prof {\n\t\tcase \"http\":\n\t\t\tprofiler = http.NewProfile()\n\t\tdefault:\n\t\t\tservice := s.opts.Server.Options().Name\n\t\t\tversion := s.opts.Server.Options().Version\n\t\t\tid := s.opts.Server.Options().Id\n\t\t\tprofiler = pprof.NewProfile(\n\t\t\t\tprofile.Name(service + \".\" + version + \".\" + id),\n\t\t\t)\n\t\t}\n\n\t\tif err := profiler.Start(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer profiler.Stop()\n\t}\n\n\tlog.Logf(\"Starting [service] %s\", s.Name())\n\n\tif err := s.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tch := make(chan os.Signal, 1)\n\tif s.opts.Signal {\n\t\tsignal.Notify(ch, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT)\n\t}\n\n\tselect {\n\t\/\/ wait on kill signal\n\tcase <-ch:\n\t\/\/ wait on context cancel\n\tcase <-s.opts.Context.Done():\n\t}\n\n\treturn s.Stop()\n}\nfatal on command errorpackage micro\n\nimport (\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"github.com\/micro\/go-micro\/v2\/client\"\n\t\"github.com\/micro\/go-micro\/v2\/config\/cmd\"\n\t\"github.com\/micro\/go-micro\/v2\/debug\/profile\"\n\t\"github.com\/micro\/go-micro\/v2\/debug\/profile\/http\"\n\t\"github.com\/micro\/go-micro\/v2\/debug\/profile\/pprof\"\n\t\"github.com\/micro\/go-micro\/v2\/debug\/service\/handler\"\n\t\"github.com\/micro\/go-micro\/v2\/debug\/stats\"\n\t\"github.com\/micro\/go-micro\/v2\/debug\/trace\"\n\t\"github.com\/micro\/go-micro\/v2\/plugin\"\n\t\"github.com\/micro\/go-micro\/v2\/server\"\n\t\"github.com\/micro\/go-micro\/v2\/util\/log\"\n\t\"github.com\/micro\/go-micro\/v2\/util\/wrapper\"\n)\n\ntype service struct {\n\topts Options\n\n\tonce sync.Once\n}\n\nfunc newService(opts ...Option) Service {\n\toptions := newOptions(opts...)\n\n\t\/\/ service name\n\tserviceName := options.Server.Options().Name\n\n\t\/\/ wrap client to inject From-Service header on any calls\n\toptions.Client = wrapper.FromService(serviceName, options.Client)\n\toptions.Client = wrapper.TraceCall(serviceName, trace.DefaultTracer, options.Client)\n\n\t\/\/ wrap the server to provide handler stats\n\toptions.Server.Init(\n\t\tserver.WrapHandler(wrapper.HandlerStats(stats.DefaultStats)),\n\t\tserver.WrapHandler(wrapper.TraceHandler(trace.DefaultTracer)),\n\t)\n\n\treturn &service{\n\t\topts: options,\n\t}\n}\n\nfunc (s *service) Name() string {\n\treturn s.opts.Server.Options().Name\n}\n\n\/\/ Init initialises options. Additionally it calls cmd.Init\n\/\/ which parses command line flags. cmd.Init is only called\n\/\/ on first Init.\nfunc (s *service) Init(opts ...Option) {\n\t\/\/ process options\n\tfor _, o := range opts {\n\t\to(&s.opts)\n\t}\n\n\ts.once.Do(func() {\n\t\t\/\/ setup the plugins\n\t\tfor _, p := range strings.Split(os.Getenv(\"MICRO_PLUGIN\"), \",\") {\n\t\t\tif len(p) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ load the plugin\n\t\t\tc, err := plugin.Load(p)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\t\/\/ initialise the plugin\n\t\t\tif err := plugin.Init(c); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ set cmd name\n\t\tif len(s.opts.Cmd.App().Name) == 0 {\n\t\t\ts.opts.Cmd.App().Name = s.Server().Options().Name\n\t\t}\n\n\t\t\/\/ Initialise the command flags, overriding new service\n\t\tif err := s.opts.Cmd.Init(\n\t\t\tcmd.Broker(&s.opts.Broker),\n\t\t\tcmd.Registry(&s.opts.Registry),\n\t\t\tcmd.Transport(&s.opts.Transport),\n\t\t\tcmd.Client(&s.opts.Client),\n\t\t\tcmd.Server(&s.opts.Server),\n\t\t); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t})\n}\n\nfunc (s *service) Options() Options {\n\treturn s.opts\n}\n\nfunc (s *service) Client() client.Client {\n\treturn s.opts.Client\n}\n\nfunc (s *service) Server() server.Server {\n\treturn s.opts.Server\n}\n\nfunc (s *service) String() string {\n\treturn \"micro\"\n}\n\nfunc (s *service) Start() error {\n\tfor _, fn := range s.opts.BeforeStart {\n\t\tif err := fn(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := s.opts.Server.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fn := range s.opts.AfterStart {\n\t\tif err := fn(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s *service) Stop() error {\n\tvar gerr error\n\n\tfor _, fn := range s.opts.BeforeStop {\n\t\tif err := fn(); err != nil {\n\t\t\tgerr = err\n\t\t}\n\t}\n\n\tif err := s.opts.Server.Stop(); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fn := range s.opts.AfterStop {\n\t\tif err := fn(); err != nil {\n\t\t\tgerr = err\n\t\t}\n\t}\n\n\treturn gerr\n}\n\nfunc (s *service) Run() error {\n\t\/\/ register the debug handler\n\ts.opts.Server.Handle(\n\t\ts.opts.Server.NewHandler(\n\t\t\thandler.NewHandler(),\n\t\t\tserver.InternalHandler(true),\n\t\t),\n\t)\n\n\t\/\/ start the profiler\n\t\/\/ TODO: set as an option to the service, don't just use pprof\n\tif prof := os.Getenv(\"MICRO_DEBUG_PROFILE\"); len(prof) > 0 {\n\t\tvar profiler profile.Profile\n\n\t\t\/\/ to view mutex contention\n\t\truntime.SetMutexProfileFraction(5)\n\t\t\/\/ to view blocking profile\n\t\truntime.SetBlockProfileRate(1)\n\n\t\tswitch prof {\n\t\tcase \"http\":\n\t\t\tprofiler = http.NewProfile()\n\t\tdefault:\n\t\t\tservice := s.opts.Server.Options().Name\n\t\t\tversion := s.opts.Server.Options().Version\n\t\t\tid := s.opts.Server.Options().Id\n\t\t\tprofiler = pprof.NewProfile(\n\t\t\t\tprofile.Name(service + \".\" + version + \".\" + id),\n\t\t\t)\n\t\t}\n\n\t\tif err := profiler.Start(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer profiler.Stop()\n\t}\n\n\tlog.Logf(\"Starting [service] %s\", s.Name())\n\n\tif err := s.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tch := make(chan os.Signal, 1)\n\tif s.opts.Signal {\n\t\tsignal.Notify(ch, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT)\n\t}\n\n\tselect {\n\t\/\/ wait on kill signal\n\tcase <-ch:\n\t\/\/ wait on context cancel\n\tcase <-s.opts.Context.Done():\n\t}\n\n\treturn s.Stop()\n}\n<|endoftext|>"} {"text":"package siesta\n\nimport (\n\t\"net\/http\"\n\t\"path\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n\/\/ Registered services keyed by base URI.\nvar services = map[string]*Service{}\n\n\/\/ A Service is a container for routes with a common base URI.\n\/\/ It also has two middleware chains, named \"pre\" and \"post\".\n\/\/\n\/\/ The \"pre\" chain is run before the main handler. The first\n\/\/ handler in the \"pre\" chain is guaranteed to run, but execution\n\/\/ may quit anywhere else in the chain.\n\/\/\n\/\/ If the \"pre\" chain executes completely, the main handler is executed.\n\/\/ It is skipped otherwise.\n\/\/\n\/\/ The \"post\" chain runs after the main handler, whether it is skipped\n\/\/ or not. The first handler in the \"post\" chain is guaranteed to run, but\n\/\/ execution may quit anywhere else in the chain.\ntype Service struct {\n\tbaseURI string\n\n\tpre []contextHandler\n\tpost []contextHandler\n\n\thandlers map[*regexp.Regexp]contextHandler\n\n\troutes map[string]*node\n}\n\n\/\/ NewService returns a new Service with the given base URI\n\/\/ or panics if the base URI has already been registered.\nfunc NewService(baseURI string) *Service {\n\tif services[baseURI] != nil {\n\t\tpanic(\"service already registered\")\n\t}\n\n\treturn &Service{\n\t\tbaseURI: path.Join(\"\/\", baseURI, \"\/\"),\n\t\thandlers: make(map[*regexp.Regexp]contextHandler),\n\t\troutes: map[string]*node{},\n\t}\n}\n\nfunc addToChain(f interface{}, chain []contextHandler) []contextHandler {\n\tm := toContextHandler(f)\n\treturn append(chain, m)\n}\n\n\/\/ AddPre adds f to the end of the \"pre\" chain.\n\/\/ It panics if f cannot be converted to a contextHandler (see Service.Route).\nfunc (s *Service) AddPre(f interface{}) {\n\ts.pre = addToChain(f, s.pre)\n}\n\n\/\/ AddPost adds f to the end of the \"post\" chain.\n\/\/ It panics if f cannot be converted to a contextHandler (see Service.Route).\nfunc (s *Service) AddPost(f interface{}) {\n\ts.post = addToChain(f, s.post)\n}\n\n\/\/ Service satisfies the http.Handler interface.\nfunc (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\ts.ServeHTTPInContext(NewSiestaContext(), w, r)\n}\n\n\/\/ ServiceHTTPInContext serves an HTTP request within the Context c.\n\/\/ A Service will run through both of its internal chains, quitting\n\/\/ when requested.\nfunc (s *Service) ServeHTTPInContext(c Context, w http.ResponseWriter, r *http.Request) {\n\tquit := false\n\tfor _, m := range s.pre {\n\t\tm(c, w, r, func() {\n\t\t\tquit = true\n\t\t})\n\n\t\tif quit {\n\t\t\t\/\/ Break out of the \"pre\" loop, but\n\t\t\t\/\/ continue on.\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !quit {\n\t\t\/\/ The main handler is only run if we have not\n\t\t\/\/ been signaled to quit.\n\n\t\tif r.URL.Path != \"\/\" {\n\t\t\tr.URL.Path = strings.TrimRight(r.URL.Path, \"\/\")\n\t\t}\n\n\t\tvar (\n\t\t\thandler contextHandler\n\t\t\tparams routeParams\n\t\t)\n\n\t\t\/\/ Lookup the tree for this method\n\t\trouteNode, ok := s.routes[r.Method]\n\n\t\tif ok {\n\t\t\thandler, params, _ = routeNode.getValue(r.URL.Path)\n\t\t}\n\n\t\tif handler == nil {\n\t\t\thttp.NotFoundHandler().ServeHTTP(w, r)\n\t\t} else {\n\t\t\tr.ParseForm()\n\t\t\tfor _, p := range params {\n\t\t\t\tr.Form.Set(p.Key, p.Value)\n\t\t\t}\n\n\t\t\thandler(c, w, r, func() {\n\t\t\t\tquit = true\n\t\t\t})\n\t\t}\n\t}\n\n\tfor _, m := range s.post {\n\t\tm(c, w, r, func() {\n\t\t\tquit = true\n\t\t})\n\n\t\tif quit {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Route adds a new route to the Service.\n\/\/ f must be a function with one of the following signatures:\n\/\/\n\/\/ func(http.ResponseWriter, *http.Request)\n\/\/ func(http.ResponseWriter, *http.Request, func())\n\/\/ func(Context, http.ResponseWriter, *http.Request)\n\/\/ func(Context, http.ResponseWriter, *http.Request, func())\n\/\/\n\/\/ Note that Context is an interface type defined in this package.\n\/\/ The last argument is a function which is called to signal the\n\/\/ quitting of the current execution sequence.\nfunc (s *Service) Route(verb, uriPath, usage string, f interface{}) {\n\thandler := toContextHandler(f)\n\n\tif n := s.routes[verb]; n == nil {\n\t\ts.routes[verb] = &node{}\n\t}\n\n\ts.routes[verb].addRoute(path.Join(s.baseURI, strings.TrimRight(uriPath, \"\/\")), handler)\n}\n\n\/\/ Register registers s by adding it as a handler to the\n\/\/ DefaultServeMux in the net\/http package.\nfunc (s *Service) Register() {\n\thttp.Handle(s.baseURI, s)\n}\nsupport custom \"not found\" handlers; #35package siesta\n\nimport (\n\t\"net\/http\"\n\t\"path\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n\/\/ Registered services keyed by base URI.\nvar services = map[string]*Service{}\n\n\/\/ A Service is a container for routes with a common base URI.\n\/\/ It also has two middleware chains, named \"pre\" and \"post\".\n\/\/\n\/\/ The \"pre\" chain is run before the main handler. The first\n\/\/ handler in the \"pre\" chain is guaranteed to run, but execution\n\/\/ may quit anywhere else in the chain.\n\/\/\n\/\/ If the \"pre\" chain executes completely, the main handler is executed.\n\/\/ It is skipped otherwise.\n\/\/\n\/\/ The \"post\" chain runs after the main handler, whether it is skipped\n\/\/ or not. The first handler in the \"post\" chain is guaranteed to run, but\n\/\/ execution may quit anywhere else in the chain.\ntype Service struct {\n\tbaseURI string\n\n\tpre []contextHandler\n\tpost []contextHandler\n\n\thandlers map[*regexp.Regexp]contextHandler\n\n\troutes map[string]*node\n\n\tnotFound contextHandler\n}\n\n\/\/ NewService returns a new Service with the given base URI\n\/\/ or panics if the base URI has already been registered.\nfunc NewService(baseURI string) *Service {\n\tif services[baseURI] != nil {\n\t\tpanic(\"service already registered\")\n\t}\n\n\treturn &Service{\n\t\tbaseURI: path.Join(\"\/\", baseURI, \"\/\"),\n\t\thandlers: make(map[*regexp.Regexp]contextHandler),\n\t\troutes: map[string]*node{},\n\t}\n}\n\nfunc addToChain(f interface{}, chain []contextHandler) []contextHandler {\n\tm := toContextHandler(f)\n\treturn append(chain, m)\n}\n\n\/\/ AddPre adds f to the end of the \"pre\" chain.\n\/\/ It panics if f cannot be converted to a contextHandler (see Service.Route).\nfunc (s *Service) AddPre(f interface{}) {\n\ts.pre = addToChain(f, s.pre)\n}\n\n\/\/ AddPost adds f to the end of the \"post\" chain.\n\/\/ It panics if f cannot be converted to a contextHandler (see Service.Route).\nfunc (s *Service) AddPost(f interface{}) {\n\ts.post = addToChain(f, s.post)\n}\n\n\/\/ Service satisfies the http.Handler interface.\nfunc (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\ts.ServeHTTPInContext(NewSiestaContext(), w, r)\n}\n\n\/\/ ServiceHTTPInContext serves an HTTP request within the Context c.\n\/\/ A Service will run through both of its internal chains, quitting\n\/\/ when requested.\nfunc (s *Service) ServeHTTPInContext(c Context, w http.ResponseWriter, r *http.Request) {\n\tquit := false\n\tfor _, m := range s.pre {\n\t\tm(c, w, r, func() {\n\t\t\tquit = true\n\t\t})\n\n\t\tif quit {\n\t\t\t\/\/ Break out of the \"pre\" loop, but\n\t\t\t\/\/ continue on.\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !quit {\n\t\t\/\/ The main handler is only run if we have not\n\t\t\/\/ been signaled to quit.\n\n\t\tif r.URL.Path != \"\/\" {\n\t\t\tr.URL.Path = strings.TrimRight(r.URL.Path, \"\/\")\n\t\t}\n\n\t\tvar (\n\t\t\thandler contextHandler\n\t\t\tparams routeParams\n\t\t)\n\n\t\t\/\/ Lookup the tree for this method\n\t\trouteNode, ok := s.routes[r.Method]\n\n\t\tif ok {\n\t\t\thandler, params, _ = routeNode.getValue(r.URL.Path)\n\t\t}\n\n\t\tif handler == nil {\n\t\t\tif s.notFound != nil {\n\t\t\t\ts.notFound(c, w, r, func() {})\n\t\t\t}\n\t\t} else {\n\t\t\tr.ParseForm()\n\t\t\tfor _, p := range params {\n\t\t\t\tr.Form.Set(p.Key, p.Value)\n\t\t\t}\n\n\t\t\thandler(c, w, r, func() {\n\t\t\t\tquit = true\n\t\t\t})\n\t\t}\n\t}\n\n\tfor _, m := range s.post {\n\t\tm(c, w, r, func() {\n\t\t\tquit = true\n\t\t})\n\n\t\tif quit {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Route adds a new route to the Service.\n\/\/ f must be a function with one of the following signatures:\n\/\/\n\/\/ func(http.ResponseWriter, *http.Request)\n\/\/ func(http.ResponseWriter, *http.Request, func())\n\/\/ func(Context, http.ResponseWriter, *http.Request)\n\/\/ func(Context, http.ResponseWriter, *http.Request, func())\n\/\/\n\/\/ Note that Context is an interface type defined in this package.\n\/\/ The last argument is a function which is called to signal the\n\/\/ quitting of the current execution sequence.\nfunc (s *Service) Route(verb, uriPath, usage string, f interface{}) {\n\thandler := toContextHandler(f)\n\n\tif n := s.routes[verb]; n == nil {\n\t\ts.routes[verb] = &node{}\n\t}\n\n\ts.routes[verb].addRoute(path.Join(s.baseURI, strings.TrimRight(uriPath, \"\/\")), handler)\n}\n\n\/\/ SetNotFound sets the handler for all paths that do not\n\/\/ match any existing routes. It accepts the same function\n\/\/ signatures that Route does.\nfunc (s *Service) SetNotFound(f interface{}) {\n\thandler := toContextHandler(f)\n\ts.notFound = handler\n}\n\n\/\/ Register registers s by adding it as a handler to the\n\/\/ DefaultServeMux in the net\/http package.\nfunc (s *Service) Register() {\n\thttp.Handle(s.baseURI, s)\n}\n<|endoftext|>"} {"text":"package micro\n\nimport (\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\tlog \"github.com\/micro\/go-log\"\n\t\"github.com\/micro\/go-micro\/client\"\n\t\"github.com\/micro\/go-micro\/cmd\"\n\t\"github.com\/micro\/go-micro\/metadata\"\n\t\"github.com\/micro\/go-micro\/server\"\n)\n\ntype service struct {\n\topts Options\n\n\tonce sync.Once\n}\n\nfunc newService(opts ...Option) Service {\n\toptions := newOptions(opts...)\n\n\toptions.Client = &clientWrapper{\n\t\toptions.Client,\n\t\tmetadata.Metadata{\n\t\t\tHeaderPrefix + \"From-Service\": options.Server.Options().Name,\n\t\t},\n\t}\n\n\treturn &service{\n\t\topts: options,\n\t}\n}\n\nfunc (s *service) run(exit chan bool) {\n\tif s.opts.RegisterInterval <= time.Duration(0) {\n\t\treturn\n\t}\n\n\tt := time.NewTicker(s.opts.RegisterInterval)\n\n\tfor {\n\t\tselect {\n\t\tcase <-t.C:\n\t\t\terr := s.opts.Server.Register()\n\t\t\tif err != nil {\n\t\t\t\tlog.Log(\"service run Server.Register err : \", err)\n\t\t\t}\n\t\tcase <-exit:\n\t\t\tt.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Init initialises options. Additionally it calls cmd.Init\n\/\/ which parses command line flags. cmd.Init is only called\n\/\/ on first Init.\nfunc (s *service) Init(opts ...Option) {\n\t\/\/ process options\n\tfor _, o := range opts {\n\t\to(&s.opts)\n\t}\n\n\ts.once.Do(func() {\n\t\t\/\/ Initialise the command flags, overriding new service\n\t\ts.opts.Cmd.Init(\n\t\t\tcmd.Broker(&s.opts.Broker),\n\t\t\tcmd.Registry(&s.opts.Registry),\n\t\t\tcmd.Transport(&s.opts.Transport),\n\t\t\tcmd.Client(&s.opts.Client),\n\t\t\tcmd.Server(&s.opts.Server),\n\t\t)\n\t})\n}\n\nfunc (s *service) Options() Options {\n\treturn s.opts\n}\n\nfunc (s *service) Client() client.Client {\n\treturn s.opts.Client\n}\n\nfunc (s *service) Server() server.Server {\n\treturn s.opts.Server\n}\n\nfunc (s *service) String() string {\n\treturn \"go-micro\"\n}\n\nfunc (s *service) Start() error {\n\tfor _, fn := range s.opts.BeforeStart {\n\t\tif err := fn(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := s.opts.Server.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.opts.Server.Register(); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fn := range s.opts.AfterStart {\n\t\tif err := fn(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s *service) Stop() error {\n\tvar gerr error\n\n\tfor _, fn := range s.opts.BeforeStop {\n\t\tif err := fn(); err != nil {\n\t\t\tgerr = err\n\t\t}\n\t}\n\n\tif err := s.opts.Server.Deregister(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.opts.Server.Stop(); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fn := range s.opts.AfterStop {\n\t\tif err := fn(); err != nil {\n\t\t\tgerr = err\n\t\t}\n\t}\n\n\treturn gerr\n}\n\nfunc (s *service) Run() error {\n\tif err := s.Start(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ start reg loop\n\tex := make(chan bool)\n\tgo s.run(ex)\n\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL)\n\n\tselect {\n\t\/\/ wait on kill signal\n\tcase <-ch:\n\t\/\/ wait on context cancel\n\tcase <-s.opts.Context.Done():\n\t}\n\n\t\/\/ exit reg loop\n\tclose(ex)\n\n\tif err := s.Stop(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\nRemove whitespacepackage micro\n\nimport (\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\tlog \"github.com\/micro\/go-log\"\n\t\"github.com\/micro\/go-micro\/client\"\n\t\"github.com\/micro\/go-micro\/cmd\"\n\t\"github.com\/micro\/go-micro\/metadata\"\n\t\"github.com\/micro\/go-micro\/server\"\n)\n\ntype service struct {\n\topts Options\n\n\tonce sync.Once\n}\n\nfunc newService(opts ...Option) Service {\n\toptions := newOptions(opts...)\n\n\toptions.Client = &clientWrapper{\n\t\toptions.Client,\n\t\tmetadata.Metadata{\n\t\t\tHeaderPrefix + \"From-Service\": options.Server.Options().Name,\n\t\t},\n\t}\n\n\treturn &service{\n\t\topts: options,\n\t}\n}\n\nfunc (s *service) run(exit chan bool) {\n\tif s.opts.RegisterInterval <= time.Duration(0) {\n\t\treturn\n\t}\n\n\tt := time.NewTicker(s.opts.RegisterInterval)\n\n\tfor {\n\t\tselect {\n\t\tcase <-t.C:\n\t\t\terr := s.opts.Server.Register()\n\t\t\tif err != nil {\n\t\t\t\tlog.Log(\"service run Server.Register error: \", err)\n\t\t\t}\n\t\tcase <-exit:\n\t\t\tt.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Init initialises options. Additionally it calls cmd.Init\n\/\/ which parses command line flags. cmd.Init is only called\n\/\/ on first Init.\nfunc (s *service) Init(opts ...Option) {\n\t\/\/ process options\n\tfor _, o := range opts {\n\t\to(&s.opts)\n\t}\n\n\ts.once.Do(func() {\n\t\t\/\/ Initialise the command flags, overriding new service\n\t\ts.opts.Cmd.Init(\n\t\t\tcmd.Broker(&s.opts.Broker),\n\t\t\tcmd.Registry(&s.opts.Registry),\n\t\t\tcmd.Transport(&s.opts.Transport),\n\t\t\tcmd.Client(&s.opts.Client),\n\t\t\tcmd.Server(&s.opts.Server),\n\t\t)\n\t})\n}\n\nfunc (s *service) Options() Options {\n\treturn s.opts\n}\n\nfunc (s *service) Client() client.Client {\n\treturn s.opts.Client\n}\n\nfunc (s *service) Server() server.Server {\n\treturn s.opts.Server\n}\n\nfunc (s *service) String() string {\n\treturn \"go-micro\"\n}\n\nfunc (s *service) Start() error {\n\tfor _, fn := range s.opts.BeforeStart {\n\t\tif err := fn(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := s.opts.Server.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.opts.Server.Register(); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fn := range s.opts.AfterStart {\n\t\tif err := fn(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s *service) Stop() error {\n\tvar gerr error\n\n\tfor _, fn := range s.opts.BeforeStop {\n\t\tif err := fn(); err != nil {\n\t\t\tgerr = err\n\t\t}\n\t}\n\n\tif err := s.opts.Server.Deregister(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.opts.Server.Stop(); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fn := range s.opts.AfterStop {\n\t\tif err := fn(); err != nil {\n\t\t\tgerr = err\n\t\t}\n\t}\n\n\treturn gerr\n}\n\nfunc (s *service) Run() error {\n\tif err := s.Start(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ start reg loop\n\tex := make(chan bool)\n\tgo s.run(ex)\n\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL)\n\n\tselect {\n\t\/\/ wait on kill signal\n\tcase <-ch:\n\t\/\/ wait on context cancel\n\tcase <-s.opts.Context.Done():\n\t}\n\n\t\/\/ exit reg loop\n\tclose(ex)\n\n\tif err := s.Stop(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"package common\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n)\n\n\/\/ Service interface\ntype Service interface {\n\tStart() error\n\tStop() error\n}\n\n\/\/ Serve starts a service, and stops it if recieve INT or TERM signal.\nfunc Serve(s Service) {\n\tsignalCh := make(chan os.Signal, 1)\n\texitCh := make(chan bool)\n\n\tgo func() {\n\t\tsig := <-signalCh\n\t\tlog.Printf(\"recieve signal: %s\", sig)\n\t\texitCh <- true\n\t}()\n\n\t\/\/ listening INT & TERM signal\n\tsignal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM)\n\n\tgo func() {\n\t\tlog.Printf(\"start service\")\n\t\terr := s.Start()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"service ended with error: %s\", err)\n\t\t} else {\n\t\t\tlog.Printf(\"service ended\")\n\t\t}\n\t\texitCh <- true\n\t}()\n\n\t<-exitCh\n\n\tlog.Printf(\"stopping service...\")\n\tif err := s.Stop(); err != nil {\n\t\tlog.Fatalf(\"servic ended with error: %s\", err)\n\t}\n\tlog.Printf(\"Bye-bye!\")\n}\nAdded Name methodpackage common\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n)\n\nvar logger *log.Logger\n\n\/\/ Service interface\ntype Service interface {\n\tName() string\n\tStart() error\n\tStop() error\n}\n\n\/\/ Serve starts a service, and stops it if recieve INT or TERM signal.\nfunc Serve(s Service) {\n\n\tlogf := func(format string, v ...interface{}) {\n\t\tname := s.Name()\n\t\tmsg := fmt.Sprintf(format, v...)\n\t\tmsg = fmt.Sprintf(\"[Service %s] %s\", name, msg)\n\t\tif logger != nil {\n\t\t\tlogger.Println(msg)\n\t\t} else {\n\t\t\tlog.Println(msg)\n\t\t}\n\t}\n\n\tsignalCh := make(chan os.Signal, 1)\n\texitCh := make(chan bool)\n\n\tgo func() {\n\t\tsig := <-signalCh\n\t\tlogf(\"recieve signal: %s\", sig)\n\t\texitCh <- true\n\t}()\n\n\t\/\/ listening INT & TERM signal\n\tsignal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM)\n\n\tgo func() {\n\t\tlogf(\"started\")\n\t\terr := s.Start()\n\t\tif err != nil {\n\t\t\tlogf(\"ended unexpectely: %s\", err)\n\t\t} else {\n\t\t\tlogf(\"ended\")\n\t\t}\n\t\texitCh <- true\n\t}()\n\n\t<-exitCh\n\n\tlogf(\"stopping...\")\n\tif err := s.Stop(); err != nil {\n\t\tlogf(\"stopped with error: %s\", err)\n\t}\n\tlogf(\"Bye-bye!\")\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bufio\"\n\t\"crypto\/md5\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/cheggaaa\/pb\"\n\t\"image\"\n\t\"image\/gif\"\n\t\"image\/jpeg\"\n\t\"image\/png\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nvar subdivisions *int\nvar tolerance *int\n\nvar scratchDir string\n\ntype pictable [][][]uint64\n\nfunc MkPictable(dx int, dy int) pictable {\n\tpic := make([][][]uint64, dx) \/* type declaration *\/\n\tfor i := range pic {\n\t\tpic[i] = make([][]uint64, dy) \/* again the type? *\/\n\t\tfor j := range pic[i] {\n\t\t\tpic[i][j] = []uint64{0, 0, 0}\n\t\t}\n\t}\n\treturn pic\n}\n\nfunc absdiff(a uint64, b uint64) uint64 {\n\treturn uint64(math.Abs(float64(a) - float64(b)))\n}\n\nfunc init() {\n\tsubdivisions = flag.Int(\"subdivisions\", 10, \"Slices per axis\")\n\ttolerance = flag.Int(\"tolerance\", 100, \"Color delta tolerance, higher = more tolerant\")\n\tflag.Parse()\n\n\tif flag.NArg() < 1 {\n\t\tfmt.Println(\"usage: imgdedup [options] [\/files]\")\n\t\tflag.PrintDefaults()\n\t\tos.Exit(1)\n\t}\n\n\timage.RegisterFormat(\"png\", \"png\", png.Decode, png.DecodeConfig)\n\timage.RegisterFormat(\"jpeg\", \"jpeg\", jpeg.Decode, jpeg.DecodeConfig)\n\timage.RegisterFormat(\"gif\", \"gif\", gif.Decode, gif.DecodeConfig)\n}\n\nfunc init() {\n\tusr, err := user.Current()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tscratchDir = path.Join(usr.HomeDir, \".imgdedup\")\n\n\tif _, err := os.Stat(scratchDir); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tos.Mkdir(scratchDir, 0700)\n\t\t} else {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc scanImg(file *os.File) (pictable, error) {\n\tm, _, err := image.Decode(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbounds := m.Bounds()\n\n\tavgdata := MkPictable(*subdivisions, *subdivisions)\n\n\tfor y := bounds.Min.Y; y < bounds.Max.Y; y++ {\n\t\tfor x := bounds.Min.X; x < bounds.Max.X; x++ {\n\t\t\trX := int64(math.Floor((float64(x) \/ float64(bounds.Max.X)) * float64(*subdivisions)))\n\t\t\trY := int64(math.Floor((float64(y) \/ float64(bounds.Max.Y)) * float64(*subdivisions)))\n\n\t\t\tr, g, b, _ := m.At(x, y).RGBA()\n\t\t\tavgdata[rX][rY][0] += uint64((float32(r) \/ 65535) * 255)\n\t\t\tavgdata[rX][rY][1] += uint64((float32(g) \/ 65535) * 255)\n\t\t\tavgdata[rX][rY][2] += uint64((float32(b) \/ 65535) * 255)\n\t\t}\n\t}\n\n\tdivisor := uint64((bounds.Max.X \/ *subdivisions) * (bounds.Max.Y \/ *subdivisions))\n\n\tfor rX := 0; rX < *subdivisions; rX++ {\n\t\tfor rY := 0; rY < *subdivisions; rY++ {\n\t\t\tavgdata[rX][rY][0] = avgdata[rX][rY][0] \/ divisor\n\t\t\tavgdata[rX][rY][1] = avgdata[rX][rY][1] \/ divisor\n\t\t\tavgdata[rX][rY][2] = avgdata[rX][rY][2] \/ divisor\n\t\t}\n\t}\n\n\treturn avgdata, nil\n}\n\nfunc loadCache(cachename string) (pictable, error) {\n\n\tfile, err := os.Open(cachename)\n\tdefer file.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := bufio.NewReader(file)\n\n\tvar avgdata pictable\n\n\tdec := json.NewDecoder(r)\n\n\terr = dec.Decode(&avgdata)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn avgdata, nil\n}\n\nfunc storeCache(cachename string, avgdata *pictable) {\n\tfo, err := os.Create(cachename)\n\tdefer fo.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tenc := json.NewEncoder(fo)\n\tenc.Encode(avgdata)\n}\n\nfunc main() {\n\t\n\timgdata := make(map[string]pictable)\n\n\tfileList := getFiles(flag.Args())\n\n\tbar := pb.StartNew(len(fileList))\n\n\tfor _, imgpath := range fileList {\n\n\t\tbar.Increment()\n\n\t\tfile, err := os.Open(imgpath)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfExt := strings.ToLower(filepath.Ext(imgpath))\n\t\tif fExt == \".png\" || fExt == \".jpg\" || fExt == \".jpeg\" || fExt == \".gif\" {\n\n\t\t\tfi, err := file.Stat()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\th := md5.New()\n\n\t\t\tcacheUnit := imgpath+\"|\"+string(*subdivisions)+\"|\"+string(fi.Size())+string(fi.ModTime().Unix());\n\n\t\t\tio.WriteString(h, cacheUnit)\n\t\t\tcachename := path.Join(scratchDir, fmt.Sprintf(\"%x\", h.Sum(nil))+\".tmp\")\n\n\t\t\tvar avgdata pictable\n\n\t\t\tavgdata, err = loadCache(cachename)\n\t\t\tif err != nil {\n\n\t\t\t\tavgdata, err = scanImg(file)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Print(imgpath, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tstoreCache(cachename, &avgdata)\n\t\t\t}\n\n\t\t\timgdata[imgpath] = avgdata\n\n\t\t\tfile.Close()\n\n\t\t} else {\n\t\t\tfile.Close()\n\t\t}\n\t}\n\n\tbar.Finish()\n\n\tfileLength := len(fileList)\n\n\tfor i := 0; i < fileLength-1; i++ {\n\t\tfor j := i + 1; j < fileLength-1; j++ {\n\n\t\t\tfilename1 := fileList[i]\n\t\t\tfilename2 := fileList[j]\n\n\t\t\tavgdata1, ok1 := imgdata[filename1]\n\t\t\tavgdata2, ok2 := imgdata[filename2]\n\n\t\t\tif ok1 && ok2 {\n\n\t\t\t\tif filename1 == filename2 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tvar xdiff uint64 = 0\n\n\t\t\t\tfor rX := 0; rX < *subdivisions; rX++ {\n\t\t\t\t\tfor rY := 0; rY < *subdivisions; rY++ {\n\t\t\t\t\t\taa := avgdata1[rX][rY]\n\t\t\t\t\t\tbb := avgdata2[rX][rY]\n\n\t\t\t\t\t\txdiff += absdiff(absdiff(absdiff(aa[0], bb[0]), absdiff(aa[1], bb[1])), absdiff(aa[2], bb[2]))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif xdiff < uint64(*tolerance) {\n\t\t\t\t\tfmt.Println(filename1, filename2)\n\t\t\t\t\tfmt.Println(xdiff)\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\t}\n\n}\n\nfunc getFiles(paths []string) []string {\n\tvar fileList []string\n\n\tfor _, imgpath := range paths {\n\n\t\tfile, err := os.Open(imgpath)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfi, err := file.Stat()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tswitch mode := fi.Mode(); {\n\t\tcase mode.IsDir():\n\t\t\t\/\/ fmt.Println(\"directory\")\n\t\t\tfilepath.Walk(imgpath, func(path string, f os.FileInfo, err error) error {\n\n\t\t\t\tsubmode := f.Mode()\n\t\t\t\tif submode.IsRegular() {\n\t\t\t\t\tfpath, _ := filepath.Abs(path)\n\t\t\t\t\tfileList = append(fileList, fpath)\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t})\n\t\tcase mode.IsRegular():\n\t\t\t\/\/ fmt.Println(\"file\")\n\t\t\tfpath, _ := filepath.Abs(imgpath)\n\t\t\tfileList = append(fileList, fpath)\n\t\t}\n\n\t\tfile.Close()\n\n\t}\n\n\treturn fileList\n}\nDivide by zero error on bad image fixedpackage main\n\nimport (\n\t\"bufio\"\n\t\"crypto\/md5\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/cheggaaa\/pb\"\n\t\"image\"\n\t\"image\/gif\"\n\t\"image\/jpeg\"\n\t\"image\/png\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nvar subdivisions *int\nvar tolerance *int\n\nvar scratchDir string\n\ntype pictable [][][]uint64\n\nfunc MkPictable(dx int, dy int) pictable {\n\tpic := make([][][]uint64, dx) \/* type declaration *\/\n\tfor i := range pic {\n\t\tpic[i] = make([][]uint64, dy) \/* again the type? *\/\n\t\tfor j := range pic[i] {\n\t\t\tpic[i][j] = []uint64{0, 0, 0}\n\t\t}\n\t}\n\treturn pic\n}\n\nfunc absdiff(a uint64, b uint64) uint64 {\n\treturn uint64(math.Abs(float64(a) - float64(b)))\n}\n\nfunc init() {\n\tsubdivisions = flag.Int(\"subdivisions\", 10, \"Slices per axis\")\n\ttolerance = flag.Int(\"tolerance\", 100, \"Color delta tolerance, higher = more tolerant\")\n\tflag.Parse()\n\n\tif flag.NArg() < 1 {\n\t\tfmt.Println(\"usage: imgdedup [options] [\/files]\")\n\t\tflag.PrintDefaults()\n\t\tos.Exit(1)\n\t}\n\n\timage.RegisterFormat(\"png\", \"png\", png.Decode, png.DecodeConfig)\n\timage.RegisterFormat(\"jpeg\", \"jpeg\", jpeg.Decode, jpeg.DecodeConfig)\n\timage.RegisterFormat(\"gif\", \"gif\", gif.Decode, gif.DecodeConfig)\n}\n\nfunc init() {\n\tusr, err := user.Current()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tscratchDir = path.Join(usr.HomeDir, \".imgdedup\")\n\n\tif _, err := os.Stat(scratchDir); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tos.Mkdir(scratchDir, 0700)\n\t\t} else {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc scanImg(file *os.File) (pictable, error) {\n\tm, _, err := image.Decode(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbounds := m.Bounds()\n\n\tavgdata := MkPictable(*subdivisions, *subdivisions)\n\n\tfor y := bounds.Min.Y; y < bounds.Max.Y; y++ {\n\t\tfor x := bounds.Min.X; x < bounds.Max.X; x++ {\n\t\t\trX := int64(math.Floor((float64(x) \/ float64(bounds.Max.X)) * float64(*subdivisions)))\n\t\t\trY := int64(math.Floor((float64(y) \/ float64(bounds.Max.Y)) * float64(*subdivisions)))\n\n\t\t\tr, g, b, _ := m.At(x, y).RGBA()\n\t\t\tavgdata[rX][rY][0] += uint64((float32(r) \/ 65535) * 255)\n\t\t\tavgdata[rX][rY][1] += uint64((float32(g) \/ 65535) * 255)\n\t\t\tavgdata[rX][rY][2] += uint64((float32(b) \/ 65535) * 255)\n\t\t}\n\t}\n\n\tdivisor := uint64((bounds.Max.X \/ *subdivisions) * (bounds.Max.Y \/ *subdivisions))\n\tif divisor == 0 {\n\t\treturn nil, fmt.Errorf(\"Image dimensions %d x %d invalid\", bounds.Max.X, bounds.Max.Y)\n\t}\n\n\tfor rX := 0; rX < *subdivisions; rX++ {\n\t\tfor rY := 0; rY < *subdivisions; rY++ {\n\t\t\tavgdata[rX][rY][0] = avgdata[rX][rY][0] \/ divisor\n\t\t\tavgdata[rX][rY][1] = avgdata[rX][rY][1] \/ divisor\n\t\t\tavgdata[rX][rY][2] = avgdata[rX][rY][2] \/ divisor\n\t\t}\n\t}\n\n\treturn avgdata, nil\n}\n\nfunc loadCache(cachename string) (pictable, error) {\n\n\tfile, err := os.Open(cachename)\n\tdefer file.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := bufio.NewReader(file)\n\n\tvar avgdata pictable\n\n\tdec := json.NewDecoder(r)\n\n\terr = dec.Decode(&avgdata)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn avgdata, nil\n}\n\nfunc storeCache(cachename string, avgdata *pictable) {\n\tfo, err := os.Create(cachename)\n\tdefer fo.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tenc := json.NewEncoder(fo)\n\tenc.Encode(avgdata)\n}\n\nfunc main() {\n\n\timgdata := make(map[string]pictable)\n\n\tfileList := getFiles(flag.Args())\n\n\tbar := pb.StartNew(len(fileList))\n\n\tfor _, imgpath := range fileList {\n\n\t\tbar.Increment()\n\n\t\tfile, err := os.Open(imgpath)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfExt := strings.ToLower(filepath.Ext(imgpath))\n\t\tif fExt == \".png\" || fExt == \".jpg\" || fExt == \".jpeg\" || fExt == \".gif\" {\n\n\t\t\tfi, err := file.Stat()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\th := md5.New()\n\n\t\t\tcacheUnit := imgpath + \"|\" + string(*subdivisions) + \"|\" + string(fi.Size()) + string(fi.ModTime().Unix())\n\n\t\t\tio.WriteString(h, cacheUnit)\n\t\t\tcachename := path.Join(scratchDir, fmt.Sprintf(\"%x\", h.Sum(nil))+\".tmp\")\n\n\t\t\tvar avgdata pictable\n\n\t\t\tavgdata, err = loadCache(cachename)\n\t\t\tif err != nil {\n\n\t\t\t\tavgdata, err = scanImg(file)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Print(imgpath, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tstoreCache(cachename, &avgdata)\n\t\t\t}\n\n\t\t\timgdata[imgpath] = avgdata\n\n\t\t\tfile.Close()\n\n\t\t} else {\n\t\t\tfile.Close()\n\t\t}\n\t}\n\n\tbar.Finish()\n\n\tfileLength := len(fileList)\n\n\tfor i := 0; i < fileLength-1; i++ {\n\t\tfor j := i + 1; j < fileLength-1; j++ {\n\n\t\t\tfilename1 := fileList[i]\n\t\t\tfilename2 := fileList[j]\n\n\t\t\tavgdata1, ok1 := imgdata[filename1]\n\t\t\tavgdata2, ok2 := imgdata[filename2]\n\n\t\t\tif ok1 && ok2 {\n\n\t\t\t\tif filename1 == filename2 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tvar xdiff uint64 = 0\n\n\t\t\t\tfor rX := 0; rX < *subdivisions; rX++ {\n\t\t\t\t\tfor rY := 0; rY < *subdivisions; rY++ {\n\t\t\t\t\t\taa := avgdata1[rX][rY]\n\t\t\t\t\t\tbb := avgdata2[rX][rY]\n\n\t\t\t\t\t\txdiff += absdiff(absdiff(absdiff(aa[0], bb[0]), absdiff(aa[1], bb[1])), absdiff(aa[2], bb[2]))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif xdiff < uint64(*tolerance) {\n\t\t\t\t\tfmt.Println(filename1, filename2)\n\t\t\t\t\tfmt.Println(xdiff)\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\t}\n\n}\n\nfunc getFiles(paths []string) []string {\n\tvar fileList []string\n\n\tfor _, imgpath := range paths {\n\n\t\tfile, err := os.Open(imgpath)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfi, err := file.Stat()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tswitch mode := fi.Mode(); {\n\t\tcase mode.IsDir():\n\t\t\t\/\/ fmt.Println(\"directory\")\n\t\t\tfilepath.Walk(imgpath, func(path string, f os.FileInfo, err error) error {\n\n\t\t\t\tsubmode := f.Mode()\n\t\t\t\tif submode.IsRegular() {\n\t\t\t\t\tfpath, _ := filepath.Abs(path)\n\t\t\t\t\tfileList = append(fileList, fpath)\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t})\n\t\tcase mode.IsRegular():\n\t\t\t\/\/ fmt.Println(\"file\")\n\t\t\tfpath, _ := filepath.Abs(imgpath)\n\t\t\tfileList = append(fileList, fpath)\n\t\t}\n\n\t\tfile.Close()\n\n\t}\n\n\treturn fileList\n}\n<|endoftext|>"} {"text":"package coordinator\n\nimport (\n\t\"bytes\"\n\tlog \"code.google.com\/p\/log4go\"\n\t\"configuration\"\n\t\"encoding\/binary\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/goraft\/raft\"\n\t\"github.com\/gorilla\/mux\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"protocol\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tDEFAULT_ROOT_PWD = \"root\"\n)\n\n\/\/ The raftd server is a combination of the Raft server and an HTTP\n\/\/ server which acts as the transport.\ntype RaftServer struct {\n\tname string\n\thost string\n\tport int\n\tpath string\n\trouter *mux.Router\n\traftServer raft.Server\n\thttpServer *http.Server\n\tclusterConfig *ClusterConfiguration\n\tmutex sync.RWMutex\n\tlistener net.Listener\n\tclosing bool\n\tconfig *configuration.Configuration\n}\n\nvar registeredCommands bool\nvar replicateWrite = protocol.Request_REPLICATION_WRITE\nvar replicateDelete = protocol.Request_REPLICATION_DELETE\n\n\/\/ Creates a new server.\nfunc NewRaftServer(config *configuration.Configuration, clusterConfig *ClusterConfiguration) *RaftServer {\n\tif !registeredCommands {\n\t\tregisteredCommands = true\n\t\tfor _, command := range internalRaftCommands {\n\t\t\traft.RegisterCommand(command)\n\t\t}\n\t}\n\n\ts := &RaftServer{\n\t\thost: config.HostnameOrDetect(),\n\t\tport: config.RaftServerPort,\n\t\tpath: config.RaftDir,\n\t\tclusterConfig: clusterConfig,\n\t\trouter: mux.NewRouter(),\n\t\tconfig: config,\n\t}\n\t\/\/ Read existing name or generate a new one.\n\tif b, err := ioutil.ReadFile(filepath.Join(s.path, \"name\")); err == nil {\n\t\ts.name = string(b)\n\t} else {\n\t\tvar i uint64\n\t\tif _, err := os.Stat(\"\/dev\/random\"); err == nil {\n\t\t\tlog.Info(\"Using \/dev\/random to initialize the raft server name\")\n\t\t\tf, err := os.Open(\"\/dev\/random\")\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tb := make([]byte, 8)\n\t\t\t_, err = f.Read(b)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\ti, err = binary.ReadUvarint(bytes.NewBuffer(b))\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Info(\"Using rand package to generate raft server name\")\n\t\t\trand.Seed(time.Now().UnixNano())\n\t\t\ti = uint64(rand.Int())\n\t\t}\n\t\ts.name = fmt.Sprintf(\"%07x\", i)[0:7]\n\t\tlog.Info(\"Setting raft name to %s\", s.name)\n\t\tif err = ioutil.WriteFile(filepath.Join(s.path, \"name\"), []byte(s.name), 0644); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\treturn s\n}\n\nfunc (s *RaftServer) leaderConnectString() (string, bool) {\n\tleader := s.raftServer.Leader()\n\tpeers := s.raftServer.Peers()\n\tif peer, ok := peers[leader]; !ok {\n\t\treturn \"\", false\n\t} else {\n\t\treturn peer.ConnectionString, true\n\t}\n}\n\nfunc (s *RaftServer) doOrProxyCommand(command raft.Command, commandType string) (interface{}, error) {\n\tif s.raftServer.State() == raft.Leader {\n\t\tvalue, err := s.raftServer.Do(command)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Cannot run command %#v. %s\", command, err)\n\t\t}\n\t\treturn value, err\n\t} else {\n\t\tif leader, ok := s.leaderConnectString(); !ok {\n\t\t\treturn nil, errors.New(\"Couldn't connect to the cluster leader...\")\n\t\t} else {\n\t\t\tvar b bytes.Buffer\n\t\t\tjson.NewEncoder(&b).Encode(command)\n\t\t\tresp, err := http.Post(leader+\"\/process_command\/\"+commandType, \"application\/json\", &b)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tbody, err2 := ioutil.ReadAll(resp.Body)\n\n\t\t\tif resp.StatusCode != 200 {\n\t\t\t\treturn nil, errors.New(strings.TrimSpace(string(body)))\n\t\t\t}\n\n\t\t\tvar js interface{}\n\t\t\tjson.Unmarshal(body, &js)\n\t\t\treturn js, err2\n\t\t}\n\t}\n\treturn nil, nil\n}\n\nfunc (s *RaftServer) CreateDatabase(name string, replicationFactor uint8) error {\n\tif replicationFactor == 0 {\n\t\treplicationFactor = 1\n\t}\n\tcommand := NewCreateDatabaseCommand(name, replicationFactor)\n\t_, err := s.doOrProxyCommand(command, \"create_db\")\n\treturn err\n}\n\nfunc (s *RaftServer) DropDatabase(name string) error {\n\tcommand := NewDropDatabaseCommand(name)\n\t_, err := s.doOrProxyCommand(command, \"drop_db\")\n\treturn err\n}\n\nfunc (s *RaftServer) SaveDbUser(u *dbUser) error {\n\tcommand := NewSaveDbUserCommand(u)\n\t_, err := s.doOrProxyCommand(command, \"save_db_user\")\n\treturn err\n}\n\nfunc (s *RaftServer) ChangeDbUserPassword(db, username string, hash []byte) error {\n\tcommand := NewChangeDbUserPasswordCommand(db, username, string(hash))\n\t_, err := s.doOrProxyCommand(command, \"change_db_user_password\")\n\treturn err\n}\n\nfunc (s *RaftServer) SaveClusterAdminUser(u *clusterAdmin) error {\n\tcommand := NewSaveClusterAdminCommand(u)\n\t_, err := s.doOrProxyCommand(command, \"save_cluster_admin_user\")\n\treturn err\n}\n\nfunc (s *RaftServer) CreateRootUser() error {\n\tu := &clusterAdmin{CommonUser{\"root\", \"\", false}}\n\thash, _ := hashPassword(DEFAULT_ROOT_PWD)\n\tu.changePassword(string(hash))\n\treturn s.SaveClusterAdminUser(u)\n}\n\nfunc (s *RaftServer) ActivateServer(server *ClusterServer) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) AddServer(server *ClusterServer, insertIndex int) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) MovePotentialServer(server *ClusterServer, insertIndex int) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) ReplaceServer(oldServer *ClusterServer, replacement *ClusterServer) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) connectionString() string {\n\treturn fmt.Sprintf(\"http:\/\/%s:%d\", s.host, s.port)\n}\n\nfunc (s *RaftServer) startRaft() error {\n\tlog.Info(\"Initializing Raft Server: %s %d\", s.path, s.port)\n\n\t\/\/ Initialize and start Raft server.\n\ttransporter := raft.NewHTTPTransporter(\"\/raft\")\n\tvar err error\n\ts.raftServer, err = raft.NewServer(s.name, s.path, transporter, nil, s.clusterConfig, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttransporter.Install(s.raftServer, s)\n\ts.raftServer.Start()\n\n\tif !s.raftServer.IsLogEmpty() {\n\t\tlog.Info(\"Recovered from log\")\n\t\treturn nil\n\t}\n\n\tpotentialLeaders := s.config.SeedServers\n\n\tif len(potentialLeaders) == 0 {\n\t\tlog.Info(\"Starting as new Raft leader...\")\n\t\tname := s.raftServer.Name()\n\t\tconnectionString := s.connectionString()\n\t\t_, err := s.raftServer.Do(&InfluxJoinCommand{\n\t\t\tName: name,\n\t\t\tConnectionString: connectionString,\n\t\t\tProtobufConnectionString: s.config.ProtobufConnectionString(),\n\t\t})\n\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\n\t\tcommand := NewAddPotentialServerCommand(&ClusterServer{\n\t\t\tRaftName: name,\n\t\t\tRaftConnectionString: connectionString,\n\t\t\tProtobufConnectionString: s.config.ProtobufConnectionString(),\n\t\t})\n\t\t_, err = s.doOrProxyCommand(command, \"add_server\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = s.CreateRootUser()\n\t\treturn err\n\t}\n\n\tfor {\n\t\tfor _, leader := range potentialLeaders {\n\t\t\tlog.Info(\"(raft:%s) Attempting to join leader: %s\", s.raftServer.Name(), leader)\n\n\t\t\tif err := s.Join(leader); err == nil {\n\t\t\t\tlog.Info(\"Joined: %s\", leader)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tlog.Warn(\"Couldn't join any of the seeds, sleeping and retrying...\")\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\treturn nil\n}\n\nfunc (s *RaftServer) ListenAndServe() error {\n\tl, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", s.port))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn s.Serve(l)\n}\n\nfunc (s *RaftServer) Serve(l net.Listener) error {\n\ts.port = l.Addr().(*net.TCPAddr).Port\n\ts.listener = l\n\n\tlog.Info(\"Initializing Raft HTTP server\")\n\n\t\/\/ Initialize and start HTTP server.\n\ts.httpServer = &http.Server{\n\t\tHandler: s.router,\n\t}\n\n\ts.router.HandleFunc(\"\/cluster_config\", s.configHandler).Methods(\"GET\")\n\ts.router.HandleFunc(\"\/join\", s.joinHandler).Methods(\"POST\")\n\ts.router.HandleFunc(\"\/process_command\/{command_type}\", s.processCommandHandler).Methods(\"POST\")\n\n\tlog.Info(\"Raft Server Listening at %s\", s.connectionString())\n\n\tgo func() {\n\t\ts.httpServer.Serve(l)\n\t}()\n\tstarted := make(chan error)\n\tgo func() {\n\t\tstarted <- s.startRaft()\n\t}()\n\terr := <-started\n\t\/\/\ttime.Sleep(3 * time.Second)\n\treturn err\n}\n\nfunc (self *RaftServer) Close() {\n\tif !self.closing || self.raftServer == nil {\n\t\tself.closing = true\n\t\tself.raftServer.Stop()\n\t\tself.listener.Close()\n\t}\n}\n\n\/\/ This is a hack around Gorilla mux not providing the correct net\/http\n\/\/ HandleFunc() interface.\nfunc (s *RaftServer) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\ts.router.HandleFunc(pattern, handler)\n}\n\n\/\/ Joins to the leader of an existing cluster.\nfunc (s *RaftServer) Join(leader string) error {\n\tcommand := &InfluxJoinCommand{\n\t\tName: s.raftServer.Name(),\n\t\tConnectionString: s.connectionString(),\n\t\tProtobufConnectionString: s.config.ProtobufConnectionString(),\n\t}\n\tconnectUrl := leader\n\tif !strings.HasPrefix(connectUrl, \"http:\/\/\") {\n\t\tconnectUrl = \"http:\/\/\" + connectUrl\n\t}\n\tif !strings.HasSuffix(connectUrl, \"\/join\") {\n\t\tconnectUrl = connectUrl + \"\/join\"\n\t}\n\n\tvar b bytes.Buffer\n\tjson.NewEncoder(&b).Encode(command)\n\tlog.Debug(\"(raft:%s) Posting to seed server %s\", s.raftServer.Name(), connectUrl)\n\ttr := &http.Transport{\n\t\tResponseHeaderTimeout: time.Second,\n\t}\n\tclient := &http.Client{Transport: tr}\n\tresp, err := client.Post(connectUrl, \"application\/json\", &b)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusTemporaryRedirect {\n\t\taddress := resp.Header.Get(\"Location\")\n\t\tlog.Debug(\"Redirected to %s to join leader\\n\", address)\n\t\treturn s.Join(address)\n\t}\n\n\treturn nil\n}\n\nfunc (s *RaftServer) retryCommand(command raft.Command, retries int) (ret interface{}, err error) {\n\tfor retries = retries; retries > 0; retries-- {\n\t\tret, err = s.raftServer.Do(command)\n\t\tif err == nil {\n\t\t\treturn ret, nil\n\t\t}\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\tfmt.Println(\"Retrying RAFT command...\")\n\t}\n\treturn\n}\n\nfunc (s *RaftServer) joinHandler(w http.ResponseWriter, req *http.Request) {\n\tif s.raftServer.State() == raft.Leader {\n\t\tcommand := &InfluxJoinCommand{}\n\t\tif err := json.NewDecoder(req.Body).Decode(&command); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\t\/\/ during the test suite the join command will sometimes time out.. just retry a few times\n\t\tif _, err := s.raftServer.Do(command); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tserver := s.clusterConfig.GetServerByRaftName(command.Name)\n\t\t\/\/ it's a new server the cluster has never seen, make it a potential\n\t\tif server == nil {\n\t\t\taddServer := NewAddPotentialServerCommand(&ClusterServer{RaftName: command.Name, RaftConnectionString: command.ConnectionString, ProtobufConnectionString: command.ProtobufConnectionString})\n\t\t\tif _, err := s.raftServer.Do(addServer); err != nil {\n\t\t\t\tlog.Error(\"Error joining raft server: \", err, command)\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif leader, ok := s.leaderConnectString(); ok {\n\t\t\tlog.Debug(\"redirecting to leader to join...\")\n\t\t\thttp.Redirect(w, req, leader+\"\/join\", http.StatusTemporaryRedirect)\n\t\t} else {\n\t\t\thttp.Error(w, errors.New(\"Couldn't find leader of the cluster to join\").Error(), http.StatusInternalServerError)\n\t\t}\n\t}\n}\n\nfunc (s *RaftServer) configHandler(w http.ResponseWriter, req *http.Request) {\n\tjsonObject := make(map[string]interface{})\n\tdbs := make([]string, 0)\n\tfor db, _ := range s.clusterConfig.databaseReplicationFactors {\n\t\tdbs = append(dbs, db)\n\t}\n\tjsonObject[\"databases\"] = dbs\n\tjsonObject[\"cluster_admins\"] = s.clusterConfig.clusterAdmins\n\tjsonObject[\"database_users\"] = s.clusterConfig.dbUsers\n\tjs, err := json.Marshal(jsonObject)\n\tif err != nil {\n\t\tlog.Error(\"ERROR marshalling config: \", err)\n\t}\n\tw.Write(js)\n}\n\nfunc (s *RaftServer) marshalAndDoCommandFromBody(command raft.Command, req *http.Request) (interface{}, error) {\n\tif err := json.NewDecoder(req.Body).Decode(&command); err != nil {\n\t\treturn nil, err\n\t}\n\tif result, err := s.raftServer.Do(command); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn result, nil\n\t}\n}\n\nfunc (s *RaftServer) processCommandHandler(w http.ResponseWriter, req *http.Request) {\n\tvars := mux.Vars(req)\n\tvalue := vars[\"command_type\"]\n\tcommand := internalRaftCommands[value]\n\n\tif result, err := s.marshalAndDoCommandFromBody(command, req); err != nil {\n\t\tlog.Error(\"command %T failed: %s\", command, err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t} else {\n\t\tif result != nil {\n\t\t\tjs, _ := json.Marshal(result)\n\t\t\tw.Write(js)\n\t\t}\n\t}\n}\nformattingpackage coordinator\n\nimport (\n\t\"bytes\"\n\tlog \"code.google.com\/p\/log4go\"\n\t\"configuration\"\n\t\"encoding\/binary\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/goraft\/raft\"\n\t\"github.com\/gorilla\/mux\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"protocol\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tDEFAULT_ROOT_PWD = \"root\"\n)\n\n\/\/ The raftd server is a combination of the Raft server and an HTTP\n\/\/ server which acts as the transport.\ntype RaftServer struct {\n\tname string\n\thost string\n\tport int\n\tpath string\n\trouter *mux.Router\n\traftServer raft.Server\n\thttpServer *http.Server\n\tclusterConfig *ClusterConfiguration\n\tmutex sync.RWMutex\n\tlistener net.Listener\n\tclosing bool\n\tconfig *configuration.Configuration\n}\n\nvar registeredCommands bool\nvar replicateWrite = protocol.Request_REPLICATION_WRITE\nvar replicateDelete = protocol.Request_REPLICATION_DELETE\n\n\/\/ Creates a new server.\nfunc NewRaftServer(config *configuration.Configuration, clusterConfig *ClusterConfiguration) *RaftServer {\n\tif !registeredCommands {\n\t\tregisteredCommands = true\n\t\tfor _, command := range internalRaftCommands {\n\t\t\traft.RegisterCommand(command)\n\t\t}\n\t}\n\n\ts := &RaftServer{\n\t\thost: config.HostnameOrDetect(),\n\t\tport: config.RaftServerPort,\n\t\tpath: config.RaftDir,\n\t\tclusterConfig: clusterConfig,\n\t\trouter: mux.NewRouter(),\n\t\tconfig: config,\n\t}\n\t\/\/ Read existing name or generate a new one.\n\tif b, err := ioutil.ReadFile(filepath.Join(s.path, \"name\")); err == nil {\n\t\ts.name = string(b)\n\t} else {\n\t\tvar i uint64\n\t\tif _, err := os.Stat(\"\/dev\/random\"); err == nil {\n\t\t\tlog.Info(\"Using \/dev\/random to initialize the raft server name\")\n\t\t\tf, err := os.Open(\"\/dev\/random\")\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tb := make([]byte, 8)\n\t\t\t_, err = f.Read(b)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\ti, err = binary.ReadUvarint(bytes.NewBuffer(b))\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Info(\"Using rand package to generate raft server name\")\n\t\t\trand.Seed(time.Now().UnixNano())\n\t\t\ti = uint64(rand.Int())\n\t\t}\n\t\ts.name = fmt.Sprintf(\"%07x\", i)[0:7]\n\t\tlog.Info(\"Setting raft name to %s\", s.name)\n\t\tif err = ioutil.WriteFile(filepath.Join(s.path, \"name\"), []byte(s.name), 0644); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\treturn s\n}\n\nfunc (s *RaftServer) leaderConnectString() (string, bool) {\n\tleader := s.raftServer.Leader()\n\tpeers := s.raftServer.Peers()\n\tif peer, ok := peers[leader]; !ok {\n\t\treturn \"\", false\n\t} else {\n\t\treturn peer.ConnectionString, true\n\t}\n}\n\nfunc (s *RaftServer) doOrProxyCommand(command raft.Command, commandType string) (interface{}, error) {\n\tif s.raftServer.State() == raft.Leader {\n\t\tvalue, err := s.raftServer.Do(command)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Cannot run command %#v. %s\", command, err)\n\t\t}\n\t\treturn value, err\n\t} else {\n\t\tif leader, ok := s.leaderConnectString(); !ok {\n\t\t\treturn nil, errors.New(\"Couldn't connect to the cluster leader...\")\n\t\t} else {\n\t\t\tvar b bytes.Buffer\n\t\t\tjson.NewEncoder(&b).Encode(command)\n\t\t\tresp, err := http.Post(leader+\"\/process_command\/\"+commandType, \"application\/json\", &b)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tbody, err2 := ioutil.ReadAll(resp.Body)\n\n\t\t\tif resp.StatusCode != 200 {\n\t\t\t\treturn nil, errors.New(strings.TrimSpace(string(body)))\n\t\t\t}\n\n\t\t\tvar js interface{}\n\t\t\tjson.Unmarshal(body, &js)\n\t\t\treturn js, err2\n\t\t}\n\t}\n\treturn nil, nil\n}\n\nfunc (s *RaftServer) CreateDatabase(name string, replicationFactor uint8) error {\n\tif replicationFactor == 0 {\n\t\treplicationFactor = 1\n\t}\n\tcommand := NewCreateDatabaseCommand(name, replicationFactor)\n\t_, err := s.doOrProxyCommand(command, \"create_db\")\n\treturn err\n}\n\nfunc (s *RaftServer) DropDatabase(name string) error {\n\tcommand := NewDropDatabaseCommand(name)\n\t_, err := s.doOrProxyCommand(command, \"drop_db\")\n\treturn err\n}\n\nfunc (s *RaftServer) SaveDbUser(u *dbUser) error {\n\tcommand := NewSaveDbUserCommand(u)\n\t_, err := s.doOrProxyCommand(command, \"save_db_user\")\n\treturn err\n}\n\nfunc (s *RaftServer) ChangeDbUserPassword(db, username string, hash []byte) error {\n\tcommand := NewChangeDbUserPasswordCommand(db, username, string(hash))\n\t_, err := s.doOrProxyCommand(command, \"change_db_user_password\")\n\treturn err\n}\n\nfunc (s *RaftServer) SaveClusterAdminUser(u *clusterAdmin) error {\n\tcommand := NewSaveClusterAdminCommand(u)\n\t_, err := s.doOrProxyCommand(command, \"save_cluster_admin_user\")\n\treturn err\n}\n\nfunc (s *RaftServer) CreateRootUser() error {\n\tu := &clusterAdmin{CommonUser{\"root\", \"\", false}}\n\thash, _ := hashPassword(DEFAULT_ROOT_PWD)\n\tu.changePassword(string(hash))\n\treturn s.SaveClusterAdminUser(u)\n}\n\nfunc (s *RaftServer) ActivateServer(server *ClusterServer) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) AddServer(server *ClusterServer, insertIndex int) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) MovePotentialServer(server *ClusterServer, insertIndex int) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) ReplaceServer(oldServer *ClusterServer, replacement *ClusterServer) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (s *RaftServer) connectionString() string {\n\treturn fmt.Sprintf(\"http:\/\/%s:%d\", s.host, s.port)\n}\n\nfunc (s *RaftServer) startRaft() error {\n\tlog.Info(\"Initializing Raft Server: %s %d\", s.path, s.port)\n\n\t\/\/ Initialize and start Raft server.\n\ttransporter := raft.NewHTTPTransporter(\"\/raft\")\n\tvar err error\n\ts.raftServer, err = raft.NewServer(s.name, s.path, transporter, nil, s.clusterConfig, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttransporter.Install(s.raftServer, s)\n\ts.raftServer.Start()\n\n\tif !s.raftServer.IsLogEmpty() {\n\t\tlog.Info(\"Recovered from log\")\n\t\treturn nil\n\t}\n\n\tpotentialLeaders := s.config.SeedServers\n\n\tif len(potentialLeaders) == 0 {\n\t\tlog.Info(\"Starting as new Raft leader...\")\n\t\tname := s.raftServer.Name()\n\t\tconnectionString := s.connectionString()\n\t\t_, err := s.raftServer.Do(&InfluxJoinCommand{\n\t\t\tName: name,\n\t\t\tConnectionString: connectionString,\n\t\t\tProtobufConnectionString: s.config.ProtobufConnectionString(),\n\t\t})\n\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\n\t\tcommand := NewAddPotentialServerCommand(&ClusterServer{\n\t\t\tRaftName: name,\n\t\t\tRaftConnectionString: connectionString,\n\t\t\tProtobufConnectionString: s.config.ProtobufConnectionString(),\n\t\t})\n\t\t_, err = s.doOrProxyCommand(command, \"add_server\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = s.CreateRootUser()\n\t\treturn err\n\t}\n\n\tfor {\n\t\tfor _, leader := range potentialLeaders {\n\t\t\tlog.Info(\"(raft:%s) Attempting to join leader: %s\", s.raftServer.Name(), leader)\n\n\t\t\tif err := s.Join(leader); err == nil {\n\t\t\t\tlog.Info(\"Joined: %s\", leader)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tlog.Warn(\"Couldn't join any of the seeds, sleeping and retrying...\")\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\treturn nil\n}\n\nfunc (s *RaftServer) ListenAndServe() error {\n\tl, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", s.port))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn s.Serve(l)\n}\n\nfunc (s *RaftServer) Serve(l net.Listener) error {\n\ts.port = l.Addr().(*net.TCPAddr).Port\n\ts.listener = l\n\n\tlog.Info(\"Initializing Raft HTTP server\")\n\n\t\/\/ Initialize and start HTTP server.\n\ts.httpServer = &http.Server{\n\t\tHandler: s.router,\n\t}\n\n\ts.router.HandleFunc(\"\/cluster_config\", s.configHandler).Methods(\"GET\")\n\ts.router.HandleFunc(\"\/join\", s.joinHandler).Methods(\"POST\")\n\ts.router.HandleFunc(\"\/process_command\/{command_type}\", s.processCommandHandler).Methods(\"POST\")\n\n\tlog.Info(\"Raft Server Listening at %s\", s.connectionString())\n\n\tgo func() {\n\t\ts.httpServer.Serve(l)\n\t}()\n\tstarted := make(chan error)\n\tgo func() {\n\t\tstarted <- s.startRaft()\n\t}()\n\terr := <-started\n\t\/\/\ttime.Sleep(3 * time.Second)\n\treturn err\n}\n\nfunc (self *RaftServer) Close() {\n\tif !self.closing || self.raftServer == nil {\n\t\tself.closing = true\n\t\tself.raftServer.Stop()\n\t\tself.listener.Close()\n\t}\n}\n\n\/\/ This is a hack around Gorilla mux not providing the correct net\/http\n\/\/ HandleFunc() interface.\nfunc (s *RaftServer) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\ts.router.HandleFunc(pattern, handler)\n}\n\n\/\/ Joins to the leader of an existing cluster.\nfunc (s *RaftServer) Join(leader string) error {\n\tcommand := &InfluxJoinCommand{\n\t\tName: s.raftServer.Name(),\n\t\tConnectionString: s.connectionString(),\n\t\tProtobufConnectionString: s.config.ProtobufConnectionString(),\n\t}\n\tconnectUrl := leader\n\tif !strings.HasPrefix(connectUrl, \"http:\/\/\") {\n\t\tconnectUrl = \"http:\/\/\" + connectUrl\n\t}\n\tif !strings.HasSuffix(connectUrl, \"\/join\") {\n\t\tconnectUrl = connectUrl + \"\/join\"\n\t}\n\n\tvar b bytes.Buffer\n\tjson.NewEncoder(&b).Encode(command)\n\tlog.Debug(\"(raft:%s) Posting to seed server %s\", s.raftServer.Name(), connectUrl)\n\ttr := &http.Transport{\n\t\tResponseHeaderTimeout: time.Second,\n\t}\n\tclient := &http.Client{Transport: tr}\n\tresp, err := client.Post(connectUrl, \"application\/json\", &b)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusTemporaryRedirect {\n\t\taddress := resp.Header.Get(\"Location\")\n\t\tlog.Debug(\"Redirected to %s to join leader\\n\", address)\n\t\treturn s.Join(address)\n\t}\n\n\treturn nil\n}\n\nfunc (s *RaftServer) retryCommand(command raft.Command, retries int) (ret interface{}, err error) {\n\tfor retries = retries; retries > 0; retries-- {\n\t\tret, err = s.raftServer.Do(command)\n\t\tif err == nil {\n\t\t\treturn ret, nil\n\t\t}\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\tfmt.Println(\"Retrying RAFT command...\")\n\t}\n\treturn\n}\n\nfunc (s *RaftServer) joinHandler(w http.ResponseWriter, req *http.Request) {\n\tif s.raftServer.State() == raft.Leader {\n\t\tcommand := &InfluxJoinCommand{}\n\t\tif err := json.NewDecoder(req.Body).Decode(&command); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\t\/\/ during the test suite the join command will sometimes time out.. just retry a few times\n\t\tif _, err := s.raftServer.Do(command); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tserver := s.clusterConfig.GetServerByRaftName(command.Name)\n\t\t\/\/ it's a new server the cluster has never seen, make it a potential\n\t\tif server == nil {\n\t\t\tlog.Info(\"Adding new server to the cluster config %s\", command.Name)\n\t\t\taddServer := NewAddPotentialServerCommand(&ClusterServer{\n\t\t\t\tRaftName: command.Name,\n\t\t\t\tRaftConnectionString: command.ConnectionString,\n\t\t\t\tProtobufConnectionString: command.ProtobufConnectionString,\n\t\t\t})\n\t\t\tif _, err := s.raftServer.Do(addServer); err != nil {\n\t\t\t\tlog.Error(\"Error joining raft server: \", err, command)\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif leader, ok := s.leaderConnectString(); ok {\n\t\t\tlog.Debug(\"redirecting to leader to join...\")\n\t\t\thttp.Redirect(w, req, leader+\"\/join\", http.StatusTemporaryRedirect)\n\t\t} else {\n\t\t\thttp.Error(w, errors.New(\"Couldn't find leader of the cluster to join\").Error(), http.StatusInternalServerError)\n\t\t}\n\t}\n}\n\nfunc (s *RaftServer) configHandler(w http.ResponseWriter, req *http.Request) {\n\tjsonObject := make(map[string]interface{})\n\tdbs := make([]string, 0)\n\tfor db, _ := range s.clusterConfig.databaseReplicationFactors {\n\t\tdbs = append(dbs, db)\n\t}\n\tjsonObject[\"databases\"] = dbs\n\tjsonObject[\"cluster_admins\"] = s.clusterConfig.clusterAdmins\n\tjsonObject[\"database_users\"] = s.clusterConfig.dbUsers\n\tjs, err := json.Marshal(jsonObject)\n\tif err != nil {\n\t\tlog.Error(\"ERROR marshalling config: \", err)\n\t}\n\tw.Write(js)\n}\n\nfunc (s *RaftServer) marshalAndDoCommandFromBody(command raft.Command, req *http.Request) (interface{}, error) {\n\tif err := json.NewDecoder(req.Body).Decode(&command); err != nil {\n\t\treturn nil, err\n\t}\n\tif result, err := s.raftServer.Do(command); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn result, nil\n\t}\n}\n\nfunc (s *RaftServer) processCommandHandler(w http.ResponseWriter, req *http.Request) {\n\tvars := mux.Vars(req)\n\tvalue := vars[\"command_type\"]\n\tcommand := internalRaftCommands[value]\n\n\tif result, err := s.marshalAndDoCommandFromBody(command, req); err != nil {\n\t\tlog.Error(\"command %T failed: %s\", command, err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t} else {\n\t\tif result != nil {\n\t\t\tjs, _ := json.Marshal(result)\n\t\t\tw.Write(js)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package hdfs\n\nimport (\n\t\"fmt\"\n\t. \"github.com\/eaciit\/hdc\/hdfs\"\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc killApp(code int) {\n\tos.Exit(code)\n}\n\nvar h *WebHdfs\nvar e error\n\nfunc TestConnect(t *testing.T) {\n\th, e = NewWebHdfs(NewHdfsConfig(\"http:\/\/192.168.0.223:50070\", \"hdfs\"))\n\tif e != nil {\n\t\tt.Fatalf(e.Error())\n\t\tdefer killApp(1000)\n\t}\n\th.Config.TimeOut = 2 * time.Millisecond\n\th.Config.PoolSize = 100\n}\n\nfunc TestList(t *testing.T) {\n\tlist, err := h.List(\"\/\")\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t\tdefer killApp(1000)\n\t}\n\tlog.Println(list)\n}\n\nfunc TestDelete(t *testing.T) {\n\tif es := h.Delete(true, \"\/user\/ariefdarmawan\"); es != nil {\n\t\tt.Errorf(\"%s\", func() string {\n\t\t\ts := \"\"\n\t\t\tfor k, e := range es {\n\t\t\t\ts += fmt.Sprintf(\"%s = %s\", k, e.Error())\n\t\t\t}\n\t\t\treturn s\n\t\t}())\n\t}\n}\n\nfunc TestCreateDir(t *testing.T) {\n\tes := h.MakeDirs([]string{\"\/user\/ariefdarmawan\/inbox\", \"\/user\/ariefdarmawan\/temp\", \"\/user\/ariefdarmawan\/outbox\"}, \"\")\n\tif es != nil {\n\t\tfor k, v := range es {\n\t\t\tt.Error(fmt.Sprintf(\"Error when create %v : %v \\n\", k, v))\n\t\t}\n\t}\n}\n\nfunc TestChangeOwner(t *testing.T) {\n\tif e = h.SetOwner(\"\/user\/ariefdarmawan\", \"ariefdarmawan\", \"\"); e != nil {\n\t\tt.Error(e.Error())\n\t}\n}\n\n\/*\n\tfmt.Println(\">>>> TEST COPY DIR <<<<\")\n\te, es = h.PutDir(\"\/Users\/ariefdarmawan\/Temp\/ECFZ\/TempVisa\/JSON\", \"\/user\/ariefdarmawan\/inbox\/ecfz\/json\")\n\tif es != nil {\n\t\tfor k, v := range es {\n\t\t\tt.Error(fmt.Sprintf(\"Error when create %v : %v \\n\", k, v))\n\t\t}\n\t}\n*\/\n\nfunc TestPutFile(t *testing.T) {\n\te = h.Put(\"d:\/\/test.txt\", \"\/user\/ariefdarmawan\/inbox\/test.txt\", \"\", nil)\n\tif e != nil {\n\t\tt.Error(e.Error())\n\t}\n}\n\nfunc TestGetStatus(t *testing.T) {\n\thdata, e := h.List(\"\/user\/ariefdarmawan\")\n\tif e != nil {\n\t\tt.Error(e.Error())\n\t} else {\n\t\tfmt.Printf(\"Data Processed :\\n%v\\n\", len(hdata.FileStatuses.FileStatus))\n\t}\n}\nupdatepackage hdfs\n\nimport (\n\t\"fmt\"\n\t. \"github.com\/eaciit\/hdc\/hdfs\"\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc killApp(code int) {\n\tos.Exit(code)\n}\n\nvar h *WebHdfs\nvar e error\n\nfunc TestConnect(t *testing.T) {\n\th, e = NewWebHdfs(NewHdfsConfig(\"http:\/\/192.168.0.223:50070\", \"hdfs\"))\n\tif e != nil {\n\t\tt.Fatalf(e.Error())\n\t\tdefer killApp(1000)\n\t}\n\th.Config.TimeOut = 2 * time.Millisecond\n\th.Config.PoolSize = 100\n}\n\nfunc TestList(t *testing.T) {\n\tlist, err := h.List(\"http:\/\/192.168.0.223:50070\/\")\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t\tdefer killApp(1000)\n\t}\n\tlog.Println(list)\n}\n\nfunc TestDelete(t *testing.T) {\n\tif es := h.Delete(true, \"\/user\/ariefdarmawan\"); es != nil {\n\t\tt.Errorf(\"%s\", func() string {\n\t\t\ts := \"\"\n\t\t\tfor k, e := range es {\n\t\t\t\ts += fmt.Sprintf(\"%s = %s\", k, e.Error())\n\t\t\t}\n\t\t\treturn s\n\t\t}())\n\t}\n}\n\nfunc TestCreateDir(t *testing.T) {\n\tes := h.MakeDirs([]string{\"\/user\/ariefdarmawan\/inbox\", \"\/user\/ariefdarmawan\/temp\", \"\/user\/ariefdarmawan\/outbox\"}, \"\")\n\tif es != nil {\n\t\tfor k, v := range es {\n\t\t\tt.Error(fmt.Sprintf(\"Error when create %v : %v \\n\", k, v))\n\t\t}\n\t}\n}\n\nfunc TestChangeOwner(t *testing.T) {\n\tif e = h.SetOwner(\"\/user\/ariefdarmawan\", \"ariefdarmawan\", \"\"); e != nil {\n\t\tt.Error(e.Error())\n\t}\n}\n\n\/*\n\tfmt.Println(\">>>> TEST COPY DIR <<<<\")\n\te, es = h.PutDir(\"\/Users\/ariefdarmawan\/Temp\/ECFZ\/TempVisa\/JSON\", \"\/user\/ariefdarmawan\/inbox\/ecfz\/json\")\n\tif es != nil {\n\t\tfor k, v := range es {\n\t\t\tt.Error(fmt.Sprintf(\"Error when create %v : %v \\n\", k, v))\n\t\t}\n\t}\n*\/\n\nfunc TestPutFile(t *testing.T) {\n\te = h.Put(\"d:\/\/test.txt\", \"\/user\/ariefdarmawan\/inbox\/test.txt\", \"\", nil)\n\tif e != nil {\n\t\tt.Error(e.Error())\n\t}\n}\n\nfunc TestGetStatus(t *testing.T) {\n\thdata, e := h.List(\"\/user\/ariefdarmawan\")\n\tif e != nil {\n\t\tt.Error(e.Error())\n\t} else {\n\t\tfmt.Printf(\"Data Processed :\\n%v\\n\", len(hdata.FileStatuses.FileStatus))\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage keymanager_test\n\nimport (\n\t\"strings\"\n\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"fmt\"\n\tjujutesting \"launchpad.net\/juju-core\/juju\/testing\"\n\t\"launchpad.net\/juju-core\/state\/api\/params\"\n\t\"launchpad.net\/juju-core\/state\/apiserver\/common\"\n\t\"launchpad.net\/juju-core\/state\/apiserver\/keymanager\"\n\tapiservertesting \"launchpad.net\/juju-core\/state\/apiserver\/testing\"\n\tstatetesting \"launchpad.net\/juju-core\/state\/testing\"\n\t\"launchpad.net\/juju-core\/utils\/ssh\"\n\tsshtesting \"launchpad.net\/juju-core\/utils\/ssh\/testing\"\n)\n\ntype keyManagerSuite struct {\n\tjujutesting.JujuConnSuite\n\n\tkeymanager *keymanager.KeyManagerAPI\n\tresources *common.Resources\n\tauthoriser apiservertesting.FakeAuthorizer\n}\n\nvar _ = gc.Suite(&keyManagerSuite{})\n\nfunc (s *keyManagerSuite) SetUpTest(c *gc.C) {\n\ts.JujuConnSuite.SetUpTest(c)\n\ts.resources = common.NewResources()\n\ts.AddCleanup(func(_ *gc.C) { s.resources.StopAll() })\n\n\ts.authoriser = apiservertesting.FakeAuthorizer{\n\t\tTag: \"user-admin\",\n\t\tLoggedIn: true,\n\t\tClient: true,\n\t}\n\tvar err error\n\ts.keymanager, err = keymanager.NewKeyManagerAPI(s.State, s.resources, s.authoriser)\n\tc.Assert(err, gc.IsNil)\n}\n\nfunc (s *keyManagerSuite) TestNewKeyManagerAPIAcceptsClient(c *gc.C) {\n\tendPoint, err := keymanager.NewKeyManagerAPI(s.State, s.resources, s.authoriser)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(endPoint, gc.NotNil)\n}\n\nfunc (s *keyManagerSuite) TestNewKeyManagerAPIRefusesNonClient(c *gc.C) {\n\tanAuthoriser := s.authoriser\n\tanAuthoriser.Client = false\n\tendPoint, err := keymanager.NewKeyManagerAPI(s.State, s.resources, anAuthoriser)\n\tc.Assert(endPoint, gc.IsNil)\n\tc.Assert(err, gc.ErrorMatches, \"permission denied\")\n}\n\nfunc (s *keyManagerSuite) setAuthorisedKeys(c *gc.C, keys string) {\n\terr := statetesting.UpdateConfig(s.State, map[string]interface{}{\"authorized-keys\": keys})\n\tc.Assert(err, gc.IsNil)\n\tenvConfig, err := s.State.EnvironConfig()\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(envConfig.AuthorizedKeys(), gc.Equals, keys)\n}\n\nfunc (s *keyManagerSuite) TestListKeys(c *gc.C) {\n\tkey1 := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tkey2 := sshtesting.ValidKeyTwo.Key\n\ts.setAuthorisedKeys(c, strings.Join([]string{key1, key2, \"bad key\"}, \"\\n\"))\n\n\targs := params.ListSSHKeys{\n\t\tEntities: params.Entities{[]params.Entity{\n\t\t\t{Tag: \"admin\"},\n\t\t\t{Tag: \"invalid\"},\n\t\t}},\n\t\tMode: ssh.FullKeys,\n\t}\n\tresults, err := s.keymanager.ListKeys(args)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(results, gc.DeepEquals, params.StringsResults{\n\t\tResults: []params.StringsResult{\n\t\t\t{Result: []string{key1, key2, \"Invalid key: bad key\"}},\n\t\t\t{Error: apiservertesting.ErrUnauthorized},\n\t\t},\n\t})\n}\n\nfunc (s *keyManagerSuite) assertEnvironKeys(c *gc.C, expected []string) {\n\tenvConfig, err := s.State.EnvironConfig()\n\tc.Assert(err, gc.IsNil)\n\tkeys := envConfig.AuthorizedKeys()\n\tc.Assert(keys, gc.Equals, strings.Join(expected, \"\\n\"))\n}\n\nfunc (s *keyManagerSuite) TestAddKeys(c *gc.C) {\n\tkey1 := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tkey2 := sshtesting.ValidKeyTwo.Key\n\tinitialKeys := []string{key1, key2, \"bad key\"}\n\ts.setAuthorisedKeys(c, strings.Join(initialKeys, \"\\n\"))\n\n\tnewKey := sshtesting.ValidKeyThree.Key + \" newuser@host\"\n\targs := params.ModifyUserSSHKeys{\n\t\tUser: \"admin\",\n\t\tKeys: []string{key2, newKey, \"invalid-key\"},\n\t}\n\tresults, err := s.keymanager.AddKeys(args)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(results, gc.DeepEquals, params.ErrorResults{\n\t\tResults: []params.ErrorResult{\n\t\t\t{Error: apiservertesting.ServerError(fmt.Sprintf(\"duplicate ssh key: %s\", key2))},\n\t\t\t{Error: nil},\n\t\t\t{Error: apiservertesting.ServerError(\"invalid ssh key: invalid-key\")},\n\t\t},\n\t})\n\ts.assertEnvironKeys(c, append(initialKeys, newKey))\n}\n\nfunc (s *keyManagerSuite) TestDeleteKeys(c *gc.C) {\n\tkey1 := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tkey2 := sshtesting.ValidKeyTwo.Key\n\tinitialKeys := []string{key1, key2, \"bad key\"}\n\ts.setAuthorisedKeys(c, strings.Join(initialKeys, \"\\n\"))\n\n\targs := params.ModifyUserSSHKeys{\n\t\tUser: \"admin\",\n\t\tKeys: []string{sshtesting.ValidKeyTwo.Fingerprint, sshtesting.ValidKeyThree.Fingerprint, \"invalid-key\"},\n\t}\n\tresults, err := s.keymanager.DeleteKeys(args)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(results, gc.DeepEquals, params.ErrorResults{\n\t\tResults: []params.ErrorResult{\n\t\t\t{Error: nil},\n\t\t\t{Error: apiservertesting.ServerError(\"invalid ssh key: \" + sshtesting.ValidKeyThree.Fingerprint)},\n\t\t\t{Error: apiservertesting.ServerError(\"invalid ssh key: invalid-key\")},\n\t\t},\n\t})\n\ts.assertEnvironKeys(c, []string{\"bad key\", key1})\n}\n\nfunc (s *keyManagerSuite) TestCannotDeleteAllKeys(c *gc.C) {\n\tkey1 := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tkey2 := sshtesting.ValidKeyTwo.Key\n\tinitialKeys := []string{key1, key2}\n\ts.setAuthorisedKeys(c, strings.Join(initialKeys, \"\\n\"))\n\n\targs := params.ModifyUserSSHKeys{\n\t\tUser: \"admin\",\n\t\tKeys: []string{sshtesting.ValidKeyTwo.Fingerprint, \"user@host\"},\n\t}\n\t_, err := s.keymanager.DeleteKeys(args)\n\tc.Assert(err, gc.ErrorMatches, \"cannot delete all keys\")\n\ts.assertEnvironKeys(c, initialKeys)\n}\n\nfunc (s *keyManagerSuite) assertInvalidUserOperation(c *gc.C, test func(args params.ModifyUserSSHKeys) error) {\n\tinitialKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\ts.setAuthorisedKeys(c, initialKey)\n\n\t\/\/ Set up the params.\n\tnewKey := sshtesting.ValidKeyThree.Key + \" newuser@host\"\n\targs := params.ModifyUserSSHKeys{\n\t\tUser: \"invalid\",\n\t\tKeys: []string{newKey},\n\t}\n\t\/\/ Run the required test code and check the error.\n\terr := test(args)\n\tc.Assert(err, gc.DeepEquals, apiservertesting.ErrUnauthorized)\n\n\t\/\/ No environ changes.\n\ts.assertEnvironKeys(c, []string{initialKey})\n}\n\nfunc (s *keyManagerSuite) TestAddKeysInvalidUser(c *gc.C) {\n\ts.assertInvalidUserOperation(c, func(args params.ModifyUserSSHKeys) error {\n\t\t_, err := s.keymanager.AddKeys(args)\n\t\treturn err\n\t})\n}\n\nfunc (s *keyManagerSuite) TestDeleteKeysInvalidUser(c *gc.C) {\n\ts.assertInvalidUserOperation(c, func(args params.ModifyUserSSHKeys) error {\n\t\t_, err := s.keymanager.DeleteKeys(args)\n\t\treturn err\n\t})\n}\nTweak func name\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage keymanager_test\n\nimport (\n\t\"strings\"\n\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"fmt\"\n\tjujutesting \"launchpad.net\/juju-core\/juju\/testing\"\n\t\"launchpad.net\/juju-core\/state\/api\/params\"\n\t\"launchpad.net\/juju-core\/state\/apiserver\/common\"\n\t\"launchpad.net\/juju-core\/state\/apiserver\/keymanager\"\n\tapiservertesting \"launchpad.net\/juju-core\/state\/apiserver\/testing\"\n\tstatetesting \"launchpad.net\/juju-core\/state\/testing\"\n\t\"launchpad.net\/juju-core\/utils\/ssh\"\n\tsshtesting \"launchpad.net\/juju-core\/utils\/ssh\/testing\"\n)\n\ntype keyManagerSuite struct {\n\tjujutesting.JujuConnSuite\n\n\tkeymanager *keymanager.KeyManagerAPI\n\tresources *common.Resources\n\tauthoriser apiservertesting.FakeAuthorizer\n}\n\nvar _ = gc.Suite(&keyManagerSuite{})\n\nfunc (s *keyManagerSuite) SetUpTest(c *gc.C) {\n\ts.JujuConnSuite.SetUpTest(c)\n\ts.resources = common.NewResources()\n\ts.AddCleanup(func(_ *gc.C) { s.resources.StopAll() })\n\n\ts.authoriser = apiservertesting.FakeAuthorizer{\n\t\tTag: \"user-admin\",\n\t\tLoggedIn: true,\n\t\tClient: true,\n\t}\n\tvar err error\n\ts.keymanager, err = keymanager.NewKeyManagerAPI(s.State, s.resources, s.authoriser)\n\tc.Assert(err, gc.IsNil)\n}\n\nfunc (s *keyManagerSuite) TestNewKeyManagerAPIAcceptsClient(c *gc.C) {\n\tendPoint, err := keymanager.NewKeyManagerAPI(s.State, s.resources, s.authoriser)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(endPoint, gc.NotNil)\n}\n\nfunc (s *keyManagerSuite) TestNewKeyManagerAPIRefusesNonClient(c *gc.C) {\n\tanAuthoriser := s.authoriser\n\tanAuthoriser.Client = false\n\tendPoint, err := keymanager.NewKeyManagerAPI(s.State, s.resources, anAuthoriser)\n\tc.Assert(endPoint, gc.IsNil)\n\tc.Assert(err, gc.ErrorMatches, \"permission denied\")\n}\n\nfunc (s *keyManagerSuite) setAuthorisedKeys(c *gc.C, keys string) {\n\terr := statetesting.UpdateConfig(s.State, map[string]interface{}{\"authorized-keys\": keys})\n\tc.Assert(err, gc.IsNil)\n\tenvConfig, err := s.State.EnvironConfig()\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(envConfig.AuthorizedKeys(), gc.Equals, keys)\n}\n\nfunc (s *keyManagerSuite) TestListKeys(c *gc.C) {\n\tkey1 := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tkey2 := sshtesting.ValidKeyTwo.Key\n\ts.setAuthorisedKeys(c, strings.Join([]string{key1, key2, \"bad key\"}, \"\\n\"))\n\n\targs := params.ListSSHKeys{\n\t\tEntities: params.Entities{[]params.Entity{\n\t\t\t{Tag: \"admin\"},\n\t\t\t{Tag: \"invalid\"},\n\t\t}},\n\t\tMode: ssh.FullKeys,\n\t}\n\tresults, err := s.keymanager.ListKeys(args)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(results, gc.DeepEquals, params.StringsResults{\n\t\tResults: []params.StringsResult{\n\t\t\t{Result: []string{key1, key2, \"Invalid key: bad key\"}},\n\t\t\t{Error: apiservertesting.ErrUnauthorized},\n\t\t},\n\t})\n}\n\nfunc (s *keyManagerSuite) assertEnvironKeys(c *gc.C, expected []string) {\n\tenvConfig, err := s.State.EnvironConfig()\n\tc.Assert(err, gc.IsNil)\n\tkeys := envConfig.AuthorizedKeys()\n\tc.Assert(keys, gc.Equals, strings.Join(expected, \"\\n\"))\n}\n\nfunc (s *keyManagerSuite) TestAddKeys(c *gc.C) {\n\tkey1 := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tkey2 := sshtesting.ValidKeyTwo.Key\n\tinitialKeys := []string{key1, key2, \"bad key\"}\n\ts.setAuthorisedKeys(c, strings.Join(initialKeys, \"\\n\"))\n\n\tnewKey := sshtesting.ValidKeyThree.Key + \" newuser@host\"\n\targs := params.ModifyUserSSHKeys{\n\t\tUser: \"admin\",\n\t\tKeys: []string{key2, newKey, \"invalid-key\"},\n\t}\n\tresults, err := s.keymanager.AddKeys(args)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(results, gc.DeepEquals, params.ErrorResults{\n\t\tResults: []params.ErrorResult{\n\t\t\t{Error: apiservertesting.ServerError(fmt.Sprintf(\"duplicate ssh key: %s\", key2))},\n\t\t\t{Error: nil},\n\t\t\t{Error: apiservertesting.ServerError(\"invalid ssh key: invalid-key\")},\n\t\t},\n\t})\n\ts.assertEnvironKeys(c, append(initialKeys, newKey))\n}\n\nfunc (s *keyManagerSuite) TestDeleteKeys(c *gc.C) {\n\tkey1 := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tkey2 := sshtesting.ValidKeyTwo.Key\n\tinitialKeys := []string{key1, key2, \"bad key\"}\n\ts.setAuthorisedKeys(c, strings.Join(initialKeys, \"\\n\"))\n\n\targs := params.ModifyUserSSHKeys{\n\t\tUser: \"admin\",\n\t\tKeys: []string{sshtesting.ValidKeyTwo.Fingerprint, sshtesting.ValidKeyThree.Fingerprint, \"invalid-key\"},\n\t}\n\tresults, err := s.keymanager.DeleteKeys(args)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(results, gc.DeepEquals, params.ErrorResults{\n\t\tResults: []params.ErrorResult{\n\t\t\t{Error: nil},\n\t\t\t{Error: apiservertesting.ServerError(\"invalid ssh key: \" + sshtesting.ValidKeyThree.Fingerprint)},\n\t\t\t{Error: apiservertesting.ServerError(\"invalid ssh key: invalid-key\")},\n\t\t},\n\t})\n\ts.assertEnvironKeys(c, []string{\"bad key\", key1})\n}\n\nfunc (s *keyManagerSuite) TestCannotDeleteAllKeys(c *gc.C) {\n\tkey1 := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tkey2 := sshtesting.ValidKeyTwo.Key\n\tinitialKeys := []string{key1, key2}\n\ts.setAuthorisedKeys(c, strings.Join(initialKeys, \"\\n\"))\n\n\targs := params.ModifyUserSSHKeys{\n\t\tUser: \"admin\",\n\t\tKeys: []string{sshtesting.ValidKeyTwo.Fingerprint, \"user@host\"},\n\t}\n\t_, err := s.keymanager.DeleteKeys(args)\n\tc.Assert(err, gc.ErrorMatches, \"cannot delete all keys\")\n\ts.assertEnvironKeys(c, initialKeys)\n}\n\nfunc (s *keyManagerSuite) assertInvalidUserOperation(c *gc.C, runTestLogic func(args params.ModifyUserSSHKeys) error) {\n\tinitialKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\ts.setAuthorisedKeys(c, initialKey)\n\n\t\/\/ Set up the params.\n\tnewKey := sshtesting.ValidKeyThree.Key + \" newuser@host\"\n\targs := params.ModifyUserSSHKeys{\n\t\tUser: \"invalid\",\n\t\tKeys: []string{newKey},\n\t}\n\t\/\/ Run the required test code and check the error.\n\terr := runTestLogic(args)\n\tc.Assert(err, gc.DeepEquals, apiservertesting.ErrUnauthorized)\n\n\t\/\/ No environ changes.\n\ts.assertEnvironKeys(c, []string{initialKey})\n}\n\nfunc (s *keyManagerSuite) TestAddKeysInvalidUser(c *gc.C) {\n\ts.assertInvalidUserOperation(c, func(args params.ModifyUserSSHKeys) error {\n\t\t_, err := s.keymanager.AddKeys(args)\n\t\treturn err\n\t})\n}\n\nfunc (s *keyManagerSuite) TestDeleteKeysInvalidUser(c *gc.C) {\n\ts.assertInvalidUserOperation(c, func(args params.ModifyUserSSHKeys) error {\n\t\t_, err := s.keymanager.DeleteKeys(args)\n\t\treturn err\n\t})\n}\n<|endoftext|>"} {"text":"package coordinator\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/goraft\/raft\"\n\t\"github.com\/gorilla\/mux\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ The raftd server is a combination of the Raft server and an HTTP\n\/\/ server which acts as the transport.\ntype RaftServer struct {\n\tname string\n\thost string\n\tport int\n\tpath string\n\trouter *mux.Router\n\traftServer raft.Server\n\thttpServer *http.Server\n\tclusterConfig *ClusterConfiguration\n\tclusterServer *ClusterServer\n\tmutex sync.RWMutex\n\tlistener net.Listener\n\tclosing bool\n}\n\n\/\/ const (\n\/\/ \tElectionTimeout = 200 * time.Millisecond\n\/\/ \tHeartbeatTimeout = 50 * time.Millisecond\n\/\/ )\n\nvar registeredCommands bool\n\n\/\/ Creates a new server.\nfunc NewRaftServer(path string, host string, port int, clusterConfig *ClusterConfiguration) *RaftServer {\n\tif !registeredCommands {\n\t\tregisteredCommands = true\n\t\traft.RegisterCommand(&AddPotentialServerCommand{})\n\t\traft.RegisterCommand(&UpdateServerStateCommand{})\n\t\traft.RegisterCommand(&CreateDatabaseCommand{})\n\t\traft.RegisterCommand(&DropDatabaseCommand{})\n\t\traft.RegisterCommand(&SaveDbUserCommand{})\n\t\traft.RegisterCommand(&SaveClusterAdminCommand{})\n\t}\n\ts := &RaftServer{\n\t\thost: host,\n\t\tport: port,\n\t\tpath: path,\n\t\tclusterConfig: clusterConfig,\n\t\trouter: mux.NewRouter(),\n\t}\n\t\/\/ Read existing name or generate a new one.\n\tif b, err := ioutil.ReadFile(filepath.Join(path, \"name\")); err == nil {\n\t\ts.name = string(b)\n\t} else {\n\t\ts.name = fmt.Sprintf(\"%07x\", rand.Int())[0:7]\n\t\tif err = ioutil.WriteFile(filepath.Join(path, \"name\"), []byte(s.name), 0644); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\treturn s\n}\n\nfunc (s *RaftServer) ClusterServer() *ClusterServer {\n\tif s.clusterServer != nil {\n\t\treturn s.clusterServer\n\t}\n\ts.clusterServer = s.clusterConfig.GetServerByRaftName(s.name)\n\treturn s.clusterServer\n}\n\nfunc (s *RaftServer) leaderConnectString() (string, bool) {\n\tleader := s.raftServer.Leader()\n\tpeers := s.raftServer.Peers()\n\tif peer, ok := peers[leader]; !ok {\n\t\treturn \"\", false\n\t} else {\n\t\treturn peer.ConnectionString, true\n\t}\n}\n\nfunc (s *RaftServer) doOrProxyCommand(command raft.Command, commandType string) (interface{}, error) {\n\tif s.raftServer.State() == raft.Leader {\n\t\treturn s.raftServer.Do(command)\n\t} else {\n\t\tif leader, ok := s.leaderConnectString(); !ok {\n\t\t\treturn nil, errors.New(\"Couldn't connect to the cluster leader...\")\n\t\t} else {\n\t\t\tvar b bytes.Buffer\n\t\t\tjson.NewEncoder(&b).Encode(command)\n\t\t\tresp, err := http.Post(leader+\"\/process_command\/\"+commandType, \"application\/json\", &b)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tbody, err2 := ioutil.ReadAll(resp.Body)\n\n\t\t\tif resp.StatusCode != 200 {\n\t\t\t\treturn nil, errors.New(strings.TrimSpace(string(body)))\n\t\t\t}\n\n\t\t\tvar js interface{}\n\t\t\tjson.Unmarshal(body, &js)\n\t\t\treturn js, err2\n\t\t}\n\t}\n\treturn nil, nil\n}\n\nfunc (s *RaftServer) CreateDatabase(name string) error {\n\tcommand := NewCreateDatabaseCommand(name)\n\t_, err := s.doOrProxyCommand(command, \"create_db\")\n\treturn err\n}\n\nfunc (s *RaftServer) DropDatabase(name string) error {\n\tcommand := NewDropDatabaseCommand(name)\n\t_, err := s.doOrProxyCommand(command, \"drop_db\")\n\treturn err\n}\n\nfunc (s *RaftServer) SaveDbUser(u *dbUser) error {\n\tcommand := NewSaveDbUserCommand(u)\n\t_, err := s.doOrProxyCommand(command, \"save_db_user\")\n\treturn err\n}\n\nfunc (s *RaftServer) SaveClusterAdminUser(u *clusterAdmin) error {\n\tcommand := NewSaveClusterAdminCommand(u)\n\t_, err := s.doOrProxyCommand(command, \"save_cluster_admin_user\")\n\treturn err\n}\n\nfunc (s *RaftServer) CreateRootUser() error {\n\tu := &clusterAdmin{CommonUser{\"root\", \"\", false}}\n\tu.changePassword(\"root\")\n\treturn s.SaveClusterAdminUser(u)\n}\n\n\/*\n\twhen a cluster is started up for the first time, all servers are listed in Potential state.\n\tWhen this call is made they're all switched over to Running state so they can accept reads and writes.\n*\/\nfunc (s *RaftServer) ActivateCluster() error {\n\tfor _, server := range s.clusterConfig.servers {\n\t\tcommand := NewUpdateServerStateCommand(server.Id, Running)\n\t\tif _, err := s.doOrProxyCommand(command, \"update_state\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *RaftServer) connectionString() string {\n\treturn fmt.Sprintf(\"http:\/\/%s:%d\", s.host, s.port)\n}\n\nfunc (s *RaftServer) startRaft(potentialLeaders []string, retryUntilJoin bool) {\n\tlog.Printf(\"Initializing Raft Server: %s %d\", s.path, s.port)\n\n\t\/\/ Initialize and start Raft server.\n\ttransporter := raft.NewHTTPTransporter(\"\/raft\")\n\tvar err error\n\ts.raftServer, err = raft.NewServer(s.name, s.path, transporter, nil, s.clusterConfig, \"\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ts.raftServer.SetElectionTimeout(300 * time.Millisecond)\n\n\ttransporter.Install(s.raftServer, s)\n\ts.raftServer.Start()\n\n\tif s.raftServer.IsLogEmpty() {\n\t\tfor {\n\t\t\tjoined := false\n\t\t\tfor _, leader := range potentialLeaders {\n\t\t\t\tlog.Println(\"Attempting to join leader: \", leader, s.port)\n\n\t\t\t\tif err := s.Join(leader); err == nil {\n\t\t\t\t\tjoined = true\n\t\t\t\t\tlog.Println(\"Joined: \", leader)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ couldn't join a leader so we must be the first one up\n\t\t\tif joined {\n\t\t\t\tbreak\n\t\t\t} else if !joined && !retryUntilJoin {\n\t\t\t\tlog.Println(\"Couldn't contact a leader so initializing new cluster for server on port: \", s.port)\n\n\t\t\t\tname := s.raftServer.Name()\n\t\t\t\tconnectionString := s.connectionString()\n\t\t\t\t_, err := s.raftServer.Do(&raft.DefaultJoinCommand{\n\t\t\t\t\tName: name,\n\t\t\t\t\tConnectionString: connectionString,\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tcommand := NewAddPotentialServerCommand(&ClusterServer{RaftName: name, RaftConnectionString: connectionString})\n\t\t\t\ts.doOrProxyCommand(command, \"add_server\")\n\t\t\t\ts.CreateRootUser()\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\t\/\/ sleep for a little bit and retry it\n\t\t\t\tlog.Println(\"Couldn't join any of the seeds, sleeping and retrying...\")\n\t\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tlog.Println(\"Recovered from log\")\n\t}\n}\n\nfunc (s *RaftServer) ListenAndServe(potentialLeaders []string, retryUntilJoin bool) error {\n\tgo s.startRaft(potentialLeaders, retryUntilJoin)\n\n\tlog.Println(\"Initializing Raft HTTP server\")\n\n\tl, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", s.port))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t\/\/ Initialize and start HTTP server.\n\ts.httpServer = &http.Server{\n\t\tHandler: s.router,\n\t}\n\n\ts.router.HandleFunc(\"\/cluster_config\", s.configHandler).Methods(\"GET\")\n\ts.router.HandleFunc(\"\/join\", s.joinHandler).Methods(\"POST\")\n\ts.router.HandleFunc(\"\/process_command\/{command_type}\", s.processCommandHandler).Methods(\"POST\")\n\n\tlog.Println(\"Listening at:\", s.connectionString())\n\n\ts.listener = l\n\treturn s.httpServer.Serve(l)\n}\n\nfunc (self *RaftServer) Close() {\n\tif !self.closing {\n\t\tself.closing = true\n\t\tself.raftServer.Stop()\n\t\tself.listener.Close()\n\t}\n}\n\n\/\/ This is a hack around Gorilla mux not providing the correct net\/http\n\/\/ HandleFunc() interface.\nfunc (s *RaftServer) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\ts.router.HandleFunc(pattern, handler)\n}\n\n\/\/ Joins to the leader of an existing cluster.\nfunc (s *RaftServer) Join(leader string) error {\n\tcommand := &raft.DefaultJoinCommand{\n\t\tName: s.raftServer.Name(),\n\t\tConnectionString: s.connectionString(),\n\t}\n\tconnectUrl := leader\n\tif !strings.HasPrefix(connectUrl, \"http:\/\/\") {\n\t\tconnectUrl = \"http:\/\/\" + connectUrl\n\t}\n\tif !strings.HasSuffix(connectUrl, \"\/join\") {\n\t\tconnectUrl = connectUrl + \"\/join\"\n\t}\n\n\tvar b bytes.Buffer\n\tjson.NewEncoder(&b).Encode(command)\n\tresp, err := http.Post(connectUrl, \"application\/json\", &b)\n\tif err != nil {\n\t\tlog.Println(\"ERROR: \", err)\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusTemporaryRedirect {\n\t\taddress := resp.Header.Get(\"Location\")\n\t\tlog.Printf(\"Redirected to %s to join leader\\n\", address)\n\t\treturn s.Join(address)\n\t}\n\n\treturn nil\n}\n\nfunc (s *RaftServer) retryCommand(command raft.Command, retries int) (ret interface{}, err error) {\n\tfor retries = retries; retries > 0; retries-- {\n\t\tret, err = s.raftServer.Do(command)\n\t\tif err == nil {\n\t\t\treturn ret, nil\n\t\t}\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\tfmt.Println(\"Retrying RAFT command...\")\n\t}\n\treturn\n}\n\nfunc (s *RaftServer) joinHandler(w http.ResponseWriter, req *http.Request) {\n\tif s.raftServer.State() == raft.Leader {\n\t\tcommand := &raft.DefaultJoinCommand{}\n\t\tif err := json.NewDecoder(req.Body).Decode(&command); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\t\/\/ during the test suite the join command will sometimes time out.. just retry a few times\n\t\tif _, err := s.raftServer.Do(command); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tserver := s.clusterConfig.GetServerByRaftName(command.Name)\n\t\t\/\/ it's a new server the cluster has never seen, make it a potential\n\t\tif server == nil {\n\t\t\taddServer := NewAddPotentialServerCommand(&ClusterServer{RaftName: command.Name, RaftConnectionString: command.ConnectionString})\n\t\t\tif _, err := s.raftServer.Do(addServer); err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif leader, ok := s.leaderConnectString(); ok {\n\t\t\tlog.Println(\"redirecting to leader to join...\")\n\t\t\thttp.Redirect(w, req, leader+\"\/join\", http.StatusTemporaryRedirect)\n\t\t} else {\n\t\t\thttp.Error(w, errors.New(\"Couldn't find leader of the cluster to join\").Error(), http.StatusInternalServerError)\n\t\t}\n\t}\n}\n\nfunc (s *RaftServer) configHandler(w http.ResponseWriter, req *http.Request) {\n\tjsonObject := make(map[string]interface{})\n\tdbs := make([]string, 0)\n\tfor db, _ := range s.clusterConfig.databaseNames {\n\t\tdbs = append(dbs, db)\n\t}\n\tjsonObject[\"databases\"] = dbs\n\tjsonObject[\"cluster_admins\"] = s.clusterConfig.clusterAdmins\n\tjsonObject[\"database_users\"] = s.clusterConfig.dbUsers\n\tjs, err := json.Marshal(jsonObject)\n\tif err != nil {\n\t\tlog.Println(\"ERROR marshalling config: \", err)\n\t}\n\tw.Write(js)\n}\n\nfunc (s *RaftServer) marshalAndDoCommandFromBody(command raft.Command, req *http.Request) (interface{}, error) {\n\tif err := json.NewDecoder(req.Body).Decode(&command); err != nil {\n\t\treturn nil, err\n\t}\n\tif result, err := s.raftServer.Do(command); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn result, nil\n\t}\n}\n\nfunc (s *RaftServer) processCommandHandler(w http.ResponseWriter, req *http.Request) {\n\tvars := mux.Vars(req)\n\tvalue := vars[\"command_type\"]\n\tvar command raft.Command\n\tif value == \"create_db\" {\n\t\tcommand = &CreateDatabaseCommand{}\n\t} else if value == \"drop_db\" {\n\t\tcommand = &DropDatabaseCommand{}\n\t} else if value == \"save_db_user\" {\n\t\tcommand = &SaveDbUserCommand{}\n\t} else if value == \"save_cluster_admin_user\" {\n\t\tcommand = &SaveClusterAdminCommand{}\n\t} else if value == \"update_state\" {\n\t\tcommand = &UpdateServerStateCommand{}\n\t} else if value == \"add_server\" {\n\t\tfmt.Println(\"add_server: \", s.name)\n\t\tcommand = &AddPotentialServerCommand{}\n\t}\n\tif result, err := s.marshalAndDoCommandFromBody(command, req); err != nil {\n\t\tlog.Println(\"ERROR processCommandHanlder\", err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t} else {\n\t\tif result != nil {\n\t\t\tjs, _ := json.Marshal(result)\n\t\t\tw.Write(js)\n\t\t}\n\t}\n}\nadd more debug info.package coordinator\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/goraft\/raft\"\n\t\"github.com\/gorilla\/mux\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ The raftd server is a combination of the Raft server and an HTTP\n\/\/ server which acts as the transport.\ntype RaftServer struct {\n\tname string\n\thost string\n\tport int\n\tpath string\n\trouter *mux.Router\n\traftServer raft.Server\n\thttpServer *http.Server\n\tclusterConfig *ClusterConfiguration\n\tclusterServer *ClusterServer\n\tmutex sync.RWMutex\n\tlistener net.Listener\n\tclosing bool\n}\n\n\/\/ const (\n\/\/ \tElectionTimeout = 200 * time.Millisecond\n\/\/ \tHeartbeatTimeout = 50 * time.Millisecond\n\/\/ )\n\nvar registeredCommands bool\n\n\/\/ Creates a new server.\nfunc NewRaftServer(path string, host string, port int, clusterConfig *ClusterConfiguration) *RaftServer {\n\tif !registeredCommands {\n\t\traft.SetLogLevel(raft.Trace)\n\t\tregisteredCommands = true\n\t\traft.RegisterCommand(&AddPotentialServerCommand{})\n\t\traft.RegisterCommand(&UpdateServerStateCommand{})\n\t\traft.RegisterCommand(&CreateDatabaseCommand{})\n\t\traft.RegisterCommand(&DropDatabaseCommand{})\n\t\traft.RegisterCommand(&SaveDbUserCommand{})\n\t\traft.RegisterCommand(&SaveClusterAdminCommand{})\n\t}\n\ts := &RaftServer{\n\t\thost: host,\n\t\tport: port,\n\t\tpath: path,\n\t\tclusterConfig: clusterConfig,\n\t\trouter: mux.NewRouter(),\n\t}\n\t\/\/ Read existing name or generate a new one.\n\tif b, err := ioutil.ReadFile(filepath.Join(path, \"name\")); err == nil {\n\t\ts.name = string(b)\n\t} else {\n\t\ts.name = fmt.Sprintf(\"%07x\", rand.Int())[0:7]\n\t\tif err = ioutil.WriteFile(filepath.Join(path, \"name\"), []byte(s.name), 0644); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\treturn s\n}\n\nfunc (s *RaftServer) ClusterServer() *ClusterServer {\n\tif s.clusterServer != nil {\n\t\treturn s.clusterServer\n\t}\n\ts.clusterServer = s.clusterConfig.GetServerByRaftName(s.name)\n\treturn s.clusterServer\n}\n\nfunc (s *RaftServer) leaderConnectString() (string, bool) {\n\tleader := s.raftServer.Leader()\n\tpeers := s.raftServer.Peers()\n\tif peer, ok := peers[leader]; !ok {\n\t\treturn \"\", false\n\t} else {\n\t\treturn peer.ConnectionString, true\n\t}\n}\n\nfunc (s *RaftServer) doOrProxyCommand(command raft.Command, commandType string) (interface{}, error) {\n\tif s.raftServer.State() == raft.Leader {\n\t\tvalue, err := s.raftServer.Do(command)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error: Cannot run command %#v. %s\", command, err)\n\t\t}\n\t\treturn value, err\n\t} else {\n\t\tif leader, ok := s.leaderConnectString(); !ok {\n\t\t\treturn nil, errors.New(\"Couldn't connect to the cluster leader...\")\n\t\t} else {\n\t\t\tvar b bytes.Buffer\n\t\t\tjson.NewEncoder(&b).Encode(command)\n\t\t\tresp, err := http.Post(leader+\"\/process_command\/\"+commandType, \"application\/json\", &b)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tbody, err2 := ioutil.ReadAll(resp.Body)\n\n\t\t\tif resp.StatusCode != 200 {\n\t\t\t\treturn nil, errors.New(strings.TrimSpace(string(body)))\n\t\t\t}\n\n\t\t\tvar js interface{}\n\t\t\tjson.Unmarshal(body, &js)\n\t\t\treturn js, err2\n\t\t}\n\t}\n\treturn nil, nil\n}\n\nfunc (s *RaftServer) CreateDatabase(name string) error {\n\tcommand := NewCreateDatabaseCommand(name)\n\t_, err := s.doOrProxyCommand(command, \"create_db\")\n\treturn err\n}\n\nfunc (s *RaftServer) DropDatabase(name string) error {\n\tcommand := NewDropDatabaseCommand(name)\n\t_, err := s.doOrProxyCommand(command, \"drop_db\")\n\treturn err\n}\n\nfunc (s *RaftServer) SaveDbUser(u *dbUser) error {\n\tcommand := NewSaveDbUserCommand(u)\n\t_, err := s.doOrProxyCommand(command, \"save_db_user\")\n\treturn err\n}\n\nfunc (s *RaftServer) SaveClusterAdminUser(u *clusterAdmin) error {\n\tcommand := NewSaveClusterAdminCommand(u)\n\t_, err := s.doOrProxyCommand(command, \"save_cluster_admin_user\")\n\treturn err\n}\n\nfunc (s *RaftServer) CreateRootUser() error {\n\tu := &clusterAdmin{CommonUser{\"root\", \"\", false}}\n\tu.changePassword(\"root\")\n\treturn s.SaveClusterAdminUser(u)\n}\n\n\/*\n\twhen a cluster is started up for the first time, all servers are listed in Potential state.\n\tWhen this call is made they're all switched over to Running state so they can accept reads and writes.\n*\/\nfunc (s *RaftServer) ActivateCluster() error {\n\tfor _, server := range s.clusterConfig.servers {\n\t\tcommand := NewUpdateServerStateCommand(server.Id, Running)\n\t\tif _, err := s.doOrProxyCommand(command, \"update_state\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *RaftServer) connectionString() string {\n\treturn fmt.Sprintf(\"http:\/\/%s:%d\", s.host, s.port)\n}\n\nfunc (s *RaftServer) startRaft(potentialLeaders []string, retryUntilJoin bool) {\n\tlog.Printf(\"Initializing Raft Server: %s %d\", s.path, s.port)\n\n\t\/\/ Initialize and start Raft server.\n\ttransporter := raft.NewHTTPTransporter(\"\/raft\")\n\tvar err error\n\ts.raftServer, err = raft.NewServer(s.name, s.path, transporter, nil, s.clusterConfig, \"\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttransporter.Install(s.raftServer, s)\n\ts.raftServer.Start()\n\n\tif s.raftServer.IsLogEmpty() {\n\t\tfor {\n\t\t\tjoined := false\n\t\t\tfor _, leader := range potentialLeaders {\n\t\t\t\tlog.Println(\"Attempting to join leader: \", leader, s.port)\n\n\t\t\t\tif err := s.Join(leader); err == nil {\n\t\t\t\t\tjoined = true\n\t\t\t\t\tlog.Println(\"Joined: \", leader)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ couldn't join a leader so we must be the first one up\n\t\t\tif joined {\n\t\t\t\tbreak\n\t\t\t} else if !joined && !retryUntilJoin {\n\t\t\t\tlog.Println(\"Couldn't contact a leader so initializing new cluster for server on port: \", s.port)\n\n\t\t\t\tname := s.raftServer.Name()\n\t\t\t\tconnectionString := s.connectionString()\n\t\t\t\t_, err := s.raftServer.Do(&raft.DefaultJoinCommand{\n\t\t\t\t\tName: name,\n\t\t\t\t\tConnectionString: connectionString,\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tcommand := NewAddPotentialServerCommand(&ClusterServer{RaftName: name, RaftConnectionString: connectionString})\n\t\t\t\ts.doOrProxyCommand(command, \"add_server\")\n\t\t\t\ts.CreateRootUser()\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\t\/\/ sleep for a little bit and retry it\n\t\t\t\tlog.Println(\"Couldn't join any of the seeds, sleeping and retrying...\")\n\t\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tlog.Println(\"Recovered from log\")\n\t}\n}\n\nfunc (s *RaftServer) ListenAndServe(potentialLeaders []string, retryUntilJoin bool) error {\n\tgo s.startRaft(potentialLeaders, retryUntilJoin)\n\n\tlog.Println(\"Initializing Raft HTTP server\")\n\n\tl, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", s.port))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t\/\/ Initialize and start HTTP server.\n\ts.httpServer = &http.Server{\n\t\tHandler: s.router,\n\t}\n\n\ts.router.HandleFunc(\"\/cluster_config\", s.configHandler).Methods(\"GET\")\n\ts.router.HandleFunc(\"\/join\", s.joinHandler).Methods(\"POST\")\n\ts.router.HandleFunc(\"\/process_command\/{command_type}\", s.processCommandHandler).Methods(\"POST\")\n\n\tlog.Println(\"Listening at:\", s.connectionString())\n\n\ts.listener = l\n\treturn s.httpServer.Serve(l)\n}\n\nfunc (self *RaftServer) Close() {\n\tif !self.closing {\n\t\tself.closing = true\n\t\tself.raftServer.Stop()\n\t\tself.listener.Close()\n\t}\n}\n\n\/\/ This is a hack around Gorilla mux not providing the correct net\/http\n\/\/ HandleFunc() interface.\nfunc (s *RaftServer) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\ts.router.HandleFunc(pattern, handler)\n}\n\n\/\/ Joins to the leader of an existing cluster.\nfunc (s *RaftServer) Join(leader string) error {\n\tcommand := &raft.DefaultJoinCommand{\n\t\tName: s.raftServer.Name(),\n\t\tConnectionString: s.connectionString(),\n\t}\n\tconnectUrl := leader\n\tif !strings.HasPrefix(connectUrl, \"http:\/\/\") {\n\t\tconnectUrl = \"http:\/\/\" + connectUrl\n\t}\n\tif !strings.HasSuffix(connectUrl, \"\/join\") {\n\t\tconnectUrl = connectUrl + \"\/join\"\n\t}\n\n\tvar b bytes.Buffer\n\tjson.NewEncoder(&b).Encode(command)\n\tresp, err := http.Post(connectUrl, \"application\/json\", &b)\n\tif err != nil {\n\t\tlog.Println(\"ERROR: \", err)\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusTemporaryRedirect {\n\t\taddress := resp.Header.Get(\"Location\")\n\t\tlog.Printf(\"Redirected to %s to join leader\\n\", address)\n\t\treturn s.Join(address)\n\t}\n\n\treturn nil\n}\n\nfunc (s *RaftServer) retryCommand(command raft.Command, retries int) (ret interface{}, err error) {\n\tfor retries = retries; retries > 0; retries-- {\n\t\tret, err = s.raftServer.Do(command)\n\t\tif err == nil {\n\t\t\treturn ret, nil\n\t\t}\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\tfmt.Println(\"Retrying RAFT command...\")\n\t}\n\treturn\n}\n\nfunc (s *RaftServer) joinHandler(w http.ResponseWriter, req *http.Request) {\n\tif s.raftServer.State() == raft.Leader {\n\t\tcommand := &raft.DefaultJoinCommand{}\n\t\tif err := json.NewDecoder(req.Body).Decode(&command); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\t\/\/ during the test suite the join command will sometimes time out.. just retry a few times\n\t\tif _, err := s.raftServer.Do(command); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tserver := s.clusterConfig.GetServerByRaftName(command.Name)\n\t\t\/\/ it's a new server the cluster has never seen, make it a potential\n\t\tif server == nil {\n\t\t\taddServer := NewAddPotentialServerCommand(&ClusterServer{RaftName: command.Name, RaftConnectionString: command.ConnectionString})\n\t\t\tif _, err := s.raftServer.Do(addServer); err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif leader, ok := s.leaderConnectString(); ok {\n\t\t\tlog.Println(\"redirecting to leader to join...\")\n\t\t\thttp.Redirect(w, req, leader+\"\/join\", http.StatusTemporaryRedirect)\n\t\t} else {\n\t\t\thttp.Error(w, errors.New(\"Couldn't find leader of the cluster to join\").Error(), http.StatusInternalServerError)\n\t\t}\n\t}\n}\n\nfunc (s *RaftServer) configHandler(w http.ResponseWriter, req *http.Request) {\n\tjsonObject := make(map[string]interface{})\n\tdbs := make([]string, 0)\n\tfor db, _ := range s.clusterConfig.databaseNames {\n\t\tdbs = append(dbs, db)\n\t}\n\tjsonObject[\"databases\"] = dbs\n\tjsonObject[\"cluster_admins\"] = s.clusterConfig.clusterAdmins\n\tjsonObject[\"database_users\"] = s.clusterConfig.dbUsers\n\tjs, err := json.Marshal(jsonObject)\n\tif err != nil {\n\t\tlog.Println(\"ERROR marshalling config: \", err)\n\t}\n\tw.Write(js)\n}\n\nfunc (s *RaftServer) marshalAndDoCommandFromBody(command raft.Command, req *http.Request) (interface{}, error) {\n\tif err := json.NewDecoder(req.Body).Decode(&command); err != nil {\n\t\treturn nil, err\n\t}\n\tif result, err := s.raftServer.Do(command); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn result, nil\n\t}\n}\n\nfunc (s *RaftServer) processCommandHandler(w http.ResponseWriter, req *http.Request) {\n\tvars := mux.Vars(req)\n\tvalue := vars[\"command_type\"]\n\tvar command raft.Command\n\tif value == \"create_db\" {\n\t\tcommand = &CreateDatabaseCommand{}\n\t} else if value == \"drop_db\" {\n\t\tcommand = &DropDatabaseCommand{}\n\t} else if value == \"save_db_user\" {\n\t\tcommand = &SaveDbUserCommand{}\n\t} else if value == \"save_cluster_admin_user\" {\n\t\tcommand = &SaveClusterAdminCommand{}\n\t} else if value == \"update_state\" {\n\t\tcommand = &UpdateServerStateCommand{}\n\t} else if value == \"add_server\" {\n\t\tfmt.Println(\"add_server: \", s.name)\n\t\tcommand = &AddPotentialServerCommand{}\n\t}\n\tif result, err := s.marshalAndDoCommandFromBody(command, req); err != nil {\n\t\tlog.Println(\"ERROR processCommandHanlder\", err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t} else {\n\t\tif result != nil {\n\t\t\tjs, _ := json.Marshal(result)\n\t\t\tw.Write(js)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package goscrape\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n)\n\ntype Session struct {\n\tConn *net.UDPConn\n\tConnID uint64\n\tURL string\n}\n\nfunc NewConn(url string) Session {\n\tconn, id, err := UDPConnect(url)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn Session{conn, id, url}\n}\n\nfunc (sess Session) Scrape(btih string) (int, int, int, error) {\n\tif sess.Conn == nil {\n\t\treturn 0, 0, 0, errors.New(\"Session uninitialized.\")\n\t}\n\treturn UDPScrape(sess.Conn, sess.ConnID, btih)\n}\nRemove this debug.package goscrape\n\nimport (\n\t\"errors\"\n\t\"net\"\n)\n\ntype Session struct {\n\tConn *net.UDPConn\n\tConnID uint64\n\tURL string\n}\n\nfunc NewConn(url string) Session {\n\tconn, id, _ := UDPConnect(url)\n\treturn Session{conn, id, url}\n}\n\nfunc (sess Session) Scrape(btih string) (int, int, int, error) {\n\tif sess.Conn == nil {\n\t\treturn 0, 0, 0, errors.New(\"Session uninitialized.\")\n\t}\n\treturn UDPScrape(sess.Conn, sess.ConnID, btih)\n}\n<|endoftext|>"} {"text":"package gorethink\n\nimport (\n\t\"crypto\/tls\"\n\t\"sync\"\n\t\"time\"\n\n\tp \"github.com\/dancannon\/gorethink\/ql2\"\n)\n\n\/\/ A Session represents a connection to a RethinkDB cluster and should be used\n\/\/ when executing queries.\ntype Session struct {\n\thosts []Host\n\topts *ConnectOpts\n\n\tmu sync.RWMutex\n\tcluster *Cluster\n\tclosed bool\n}\n\n\/\/ ConnectOpts is used to specify optional arguments when connecting to a cluster.\ntype ConnectOpts struct {\n\tAddress string `gorethink:\"address,omitempty\"`\n\tAddresses []string `gorethink:\"addresses,omitempty\"`\n\tDatabase string `gorethink:\"database,omitempty\"`\n\tAuthKey string `gorethink:\"authkey,omitempty\"`\n\tTimeout time.Duration `gorethink:\"timeout,omitempty\"`\n\tWriteTimeout time.Duration `gorethink:\"write_timeout,omitempty\"`\n\tReadTimeout time.Duration `gorethink:\"read_timeout,omitempty\"`\n\tTLSConfig *tls.Config `gorethink:\"tlsconfig,omitempty\"`\n\n\tMaxIdle int `gorethink:\"max_idle,omitempty\"`\n\t\/\/ By default a maximum of 2 connections are opened per host.\n\tMaxOpen int `gorethink:\"max_open,omitempty\"`\n\n\t\/\/ Below options are for cluster discovery, please note there is a high\n\t\/\/ probability of these changing as the API is still being worked on.\n\n\t\/\/ DiscoverHosts is used to enable host discovery, when true the driver\n\t\/\/ will attempt to discover any new nodes added to the cluster and then\n\t\/\/ start sending queries to these new nodes.\n\tDiscoverHosts bool `gorethink:\"discover_hosts,omitempty\"`\n\t\/\/ NodeRefreshInterval is used to determine how often the driver should\n\t\/\/ refresh the status of a node.\n\t\/\/\n\t\/\/ Deprecated: This function is no longer used due to changes in the\n\t\/\/ way hosts are selected.\n\tNodeRefreshInterval time.Duration `gorethink:\"node_refresh_interval,omitempty\"`\n\t\/\/ HostDecayDuration is used by the go-hostpool package to calculate a weighted\n\t\/\/ score when selecting a host. By default a value of 5 minutes is used.\n\tHostDecayDuration time.Duration\n\n\t\/\/ Indicates whether the cursors running in this session should use json.Number instead of float64 while\n\t\/\/ unmarshaling documents with interface{}. The default is `false`.\n\tUseJSONNumber bool\n}\n\nfunc (o *ConnectOpts) toMap() map[string]interface{} {\n\treturn optArgsToMap(o)\n}\n\n\/\/ Connect creates a new database session. To view the available connection\n\/\/ options see ConnectOpts.\n\/\/\n\/\/ By default maxIdle and maxOpen are set to 1: passing values greater\n\/\/ than the default (e.g. MaxIdle: \"10\", MaxOpen: \"20\") will provide a\n\/\/ pool of re-usable connections.\n\/\/\n\/\/ Basic connection example:\n\/\/\n\/\/ \tsession, err := r.Connect(r.ConnectOpts{\n\/\/ \t\tHost: \"localhost:28015\",\n\/\/ \t\tDatabase: \"test\",\n\/\/ \t\tAuthKey: \"14daak1cad13dj\",\n\/\/ \t})\n\/\/\n\/\/ Cluster connection example:\n\/\/\n\/\/ \tsession, err := r.Connect(r.ConnectOpts{\n\/\/ \t\tHosts: []string{\"localhost:28015\", \"localhost:28016\"},\n\/\/ \t\tDatabase: \"test\",\n\/\/ \t\tAuthKey: \"14daak1cad13dj\",\n\/\/ \t})\nfunc Connect(opts ConnectOpts) (*Session, error) {\n\tvar addresses = opts.Addresses\n\tif len(addresses) == 0 {\n\t\taddresses = []string{opts.Address}\n\t}\n\n\thosts := make([]Host, len(addresses))\n\tfor i, address := range addresses {\n\t\thostname, port := splitAddress(address)\n\t\thosts[i] = NewHost(hostname, port)\n\t}\n\tif len(hosts) <= 0 {\n\t\treturn nil, ErrNoHosts\n\t}\n\n\t\/\/ Connect\n\ts := &Session{\n\t\thosts: hosts,\n\t\topts: &opts,\n\t}\n\n\terr := s.Reconnect()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}\n\n\/\/ CloseOpts allows calls to the Close function to be configured.\ntype CloseOpts struct {\n\tNoReplyWait bool `gorethink:\"noreplyWait,omitempty\"`\n}\n\nfunc (o *CloseOpts) toMap() map[string]interface{} {\n\treturn optArgsToMap(o)\n}\n\n\/\/ Convenience function that says whether client is still connected\nfunc (s *Session) IsConnected() bool {\n\tif s.closed == true {\n\t\treturn false\n\t}\n\tif s.cluster == nil {\n\t\treturn false\n\t}\n\treturn s.cluster.IsConnected()\n}\n\n\/\/ Reconnect closes and re-opens a session.\nfunc (s *Session) Reconnect(optArgs ...CloseOpts) error {\n\tvar err error\n\n\tif err = s.Close(optArgs...); err != nil {\n\t\treturn err\n\t}\n\n\ts.mu.Lock()\n\ts.cluster, err = NewCluster(s.hosts, s.opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.closed = false\n\ts.mu.Unlock()\n\n\treturn nil\n}\n\n\/\/ Close closes the session\nfunc (s *Session) Close(optArgs ...CloseOpts) error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif s.closed {\n\t\treturn nil\n\t}\n\n\tif len(optArgs) >= 1 {\n\t\tif optArgs[0].NoReplyWait {\n\t\t\ts.mu.Unlock()\n\t\t\ts.NoReplyWait()\n\t\t\ts.mu.Lock()\n\t\t}\n\t}\n\n\tif s.cluster != nil {\n\t\ts.cluster.Close()\n\t}\n\ts.cluster = nil\n\ts.closed = true\n\n\treturn nil\n}\n\n\/\/ SetMaxIdleConns sets the maximum number of connections in the idle\n\/\/ connection pool.\nfunc (s *Session) SetMaxIdleConns(n int) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\ts.opts.MaxIdle = n\n\ts.cluster.SetMaxIdleConns(n)\n}\n\n\/\/ SetMaxOpenConns sets the maximum number of open connections to the database.\nfunc (s *Session) SetMaxOpenConns(n int) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\ts.opts.MaxOpen = n\n\ts.cluster.SetMaxOpenConns(n)\n}\n\n\/\/ NoReplyWait ensures that previous queries with the noreply flag have been\n\/\/ processed by the server. Note that this guarantee only applies to queries\n\/\/ run on the given connection\nfunc (s *Session) NoReplyWait() error {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\tif s.closed {\n\t\treturn ErrConnectionClosed\n\t}\n\n\treturn s.cluster.Exec(Query{\n\t\tType: p.Query_NOREPLY_WAIT,\n\t})\n}\n\n\/\/ Use changes the default database used\nfunc (s *Session) Use(database string) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\ts.opts.Database = database\n}\n\n\/\/ Query executes a ReQL query using the session to connect to the database\nfunc (s *Session) Query(q Query) (*Cursor, error) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\tif s.closed {\n\t\treturn nil, ErrConnectionClosed\n\t}\n\n\treturn s.cluster.Query(q)\n}\n\n\/\/ Exec executes a ReQL query using the session to connect to the database\nfunc (s *Session) Exec(q Query) error {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\tif s.closed {\n\t\treturn ErrConnectionClosed\n\t}\n\n\treturn s.cluster.Exec(q)\n}\n\n\/\/ Server returns the server name and server UUID being used by a connection.\nfunc (s *Session) Server() (ServerResponse, error) {\n\treturn s.cluster.Server()\n}\n\n\/\/ SetHosts resets the hosts used when connecting to the RethinkDB cluster\nfunc (s *Session) SetHosts(hosts []Host) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\ts.hosts = hosts\n}\n\nfunc (s *Session) newQuery(t Term, opts map[string]interface{}) (Query, error) {\n\treturn newQuery(t, opts, s.opts)\n}\nAdded changes to IsConnected()package gorethink\n\nimport (\n\t\"crypto\/tls\"\n\t\"sync\"\n\t\"time\"\n\n\tp \"github.com\/dancannon\/gorethink\/ql2\"\n)\n\n\/\/ A Session represents a connection to a RethinkDB cluster and should be used\n\/\/ when executing queries.\ntype Session struct {\n\thosts []Host\n\topts *ConnectOpts\n\n\tmu sync.RWMutex\n\tcluster *Cluster\n\tclosed bool\n}\n\n\/\/ ConnectOpts is used to specify optional arguments when connecting to a cluster.\ntype ConnectOpts struct {\n\tAddress string `gorethink:\"address,omitempty\"`\n\tAddresses []string `gorethink:\"addresses,omitempty\"`\n\tDatabase string `gorethink:\"database,omitempty\"`\n\tAuthKey string `gorethink:\"authkey,omitempty\"`\n\tTimeout time.Duration `gorethink:\"timeout,omitempty\"`\n\tWriteTimeout time.Duration `gorethink:\"write_timeout,omitempty\"`\n\tReadTimeout time.Duration `gorethink:\"read_timeout,omitempty\"`\n\tTLSConfig *tls.Config `gorethink:\"tlsconfig,omitempty\"`\n\n\tMaxIdle int `gorethink:\"max_idle,omitempty\"`\n\t\/\/ By default a maximum of 2 connections are opened per host.\n\tMaxOpen int `gorethink:\"max_open,omitempty\"`\n\n\t\/\/ Below options are for cluster discovery, please note there is a high\n\t\/\/ probability of these changing as the API is still being worked on.\n\n\t\/\/ DiscoverHosts is used to enable host discovery, when true the driver\n\t\/\/ will attempt to discover any new nodes added to the cluster and then\n\t\/\/ start sending queries to these new nodes.\n\tDiscoverHosts bool `gorethink:\"discover_hosts,omitempty\"`\n\t\/\/ NodeRefreshInterval is used to determine how often the driver should\n\t\/\/ refresh the status of a node.\n\t\/\/\n\t\/\/ Deprecated: This function is no longer used due to changes in the\n\t\/\/ way hosts are selected.\n\tNodeRefreshInterval time.Duration `gorethink:\"node_refresh_interval,omitempty\"`\n\t\/\/ HostDecayDuration is used by the go-hostpool package to calculate a weighted\n\t\/\/ score when selecting a host. By default a value of 5 minutes is used.\n\tHostDecayDuration time.Duration\n\n\t\/\/ Indicates whether the cursors running in this session should use json.Number instead of float64 while\n\t\/\/ unmarshaling documents with interface{}. The default is `false`.\n\tUseJSONNumber bool\n}\n\nfunc (o *ConnectOpts) toMap() map[string]interface{} {\n\treturn optArgsToMap(o)\n}\n\n\/\/ Connect creates a new database session. To view the available connection\n\/\/ options see ConnectOpts.\n\/\/\n\/\/ By default maxIdle and maxOpen are set to 1: passing values greater\n\/\/ than the default (e.g. MaxIdle: \"10\", MaxOpen: \"20\") will provide a\n\/\/ pool of re-usable connections.\n\/\/\n\/\/ Basic connection example:\n\/\/\n\/\/ \tsession, err := r.Connect(r.ConnectOpts{\n\/\/ \t\tHost: \"localhost:28015\",\n\/\/ \t\tDatabase: \"test\",\n\/\/ \t\tAuthKey: \"14daak1cad13dj\",\n\/\/ \t})\n\/\/\n\/\/ Cluster connection example:\n\/\/\n\/\/ \tsession, err := r.Connect(r.ConnectOpts{\n\/\/ \t\tHosts: []string{\"localhost:28015\", \"localhost:28016\"},\n\/\/ \t\tDatabase: \"test\",\n\/\/ \t\tAuthKey: \"14daak1cad13dj\",\n\/\/ \t})\nfunc Connect(opts ConnectOpts) (*Session, error) {\n\tvar addresses = opts.Addresses\n\tif len(addresses) == 0 {\n\t\taddresses = []string{opts.Address}\n\t}\n\n\thosts := make([]Host, len(addresses))\n\tfor i, address := range addresses {\n\t\thostname, port := splitAddress(address)\n\t\thosts[i] = NewHost(hostname, port)\n\t}\n\tif len(hosts) <= 0 {\n\t\treturn nil, ErrNoHosts\n\t}\n\n\t\/\/ Connect\n\ts := &Session{\n\t\thosts: hosts,\n\t\topts: &opts,\n\t}\n\n\terr := s.Reconnect()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}\n\n\/\/ CloseOpts allows calls to the Close function to be configured.\ntype CloseOpts struct {\n\tNoReplyWait bool `gorethink:\"noreplyWait,omitempty\"`\n}\n\nfunc (o *CloseOpts) toMap() map[string]interface{} {\n\treturn optArgsToMap(o)\n}\n\n\/\/ IsConnected returns true if session has a valid connection.\nfunc (s *Session) IsConnected() bool {\n\tif s.cluster == nil || s.closed {\n return false\n\t}\n\treturn s.cluster.IsConnected()\n}\n\n\/\/ Reconnect closes and re-opens a session.\nfunc (s *Session) Reconnect(optArgs ...CloseOpts) error {\n\tvar err error\n\n\tif err = s.Close(optArgs...); err != nil {\n\t\treturn err\n\t}\n\n\ts.mu.Lock()\n\ts.cluster, err = NewCluster(s.hosts, s.opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.closed = false\n\ts.mu.Unlock()\n\n\treturn nil\n}\n\n\/\/ Close closes the session\nfunc (s *Session) Close(optArgs ...CloseOpts) error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif s.closed {\n\t\treturn nil\n\t}\n\n\tif len(optArgs) >= 1 {\n\t\tif optArgs[0].NoReplyWait {\n\t\t\ts.mu.Unlock()\n\t\t\ts.NoReplyWait()\n\t\t\ts.mu.Lock()\n\t\t}\n\t}\n\n\tif s.cluster != nil {\n\t\ts.cluster.Close()\n\t}\n\ts.cluster = nil\n\ts.closed = true\n\n\treturn nil\n}\n\n\/\/ SetMaxIdleConns sets the maximum number of connections in the idle\n\/\/ connection pool.\nfunc (s *Session) SetMaxIdleConns(n int) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\ts.opts.MaxIdle = n\n\ts.cluster.SetMaxIdleConns(n)\n}\n\n\/\/ SetMaxOpenConns sets the maximum number of open connections to the database.\nfunc (s *Session) SetMaxOpenConns(n int) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\ts.opts.MaxOpen = n\n\ts.cluster.SetMaxOpenConns(n)\n}\n\n\/\/ NoReplyWait ensures that previous queries with the noreply flag have been\n\/\/ processed by the server. Note that this guarantee only applies to queries\n\/\/ run on the given connection\nfunc (s *Session) NoReplyWait() error {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\tif s.closed {\n\t\treturn ErrConnectionClosed\n\t}\n\n\treturn s.cluster.Exec(Query{\n\t\tType: p.Query_NOREPLY_WAIT,\n\t})\n}\n\n\/\/ Use changes the default database used\nfunc (s *Session) Use(database string) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\ts.opts.Database = database\n}\n\n\/\/ Query executes a ReQL query using the session to connect to the database\nfunc (s *Session) Query(q Query) (*Cursor, error) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\tif s.closed {\n\t\treturn nil, ErrConnectionClosed\n\t}\n\n\treturn s.cluster.Query(q)\n}\n\n\/\/ Exec executes a ReQL query using the session to connect to the database\nfunc (s *Session) Exec(q Query) error {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\tif s.closed {\n\t\treturn ErrConnectionClosed\n\t}\n\n\treturn s.cluster.Exec(q)\n}\n\n\/\/ Server returns the server name and server UUID being used by a connection.\nfunc (s *Session) Server() (ServerResponse, error) {\n\treturn s.cluster.Server()\n}\n\n\/\/ SetHosts resets the hosts used when connecting to the RethinkDB cluster\nfunc (s *Session) SetHosts(hosts []Host) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\ts.hosts = hosts\n}\n\nfunc (s *Session) newQuery(t Term, opts map[string]interface{}) (Query, error) {\n\treturn newQuery(t, opts, s.opts)\n}\n<|endoftext|>"} {"text":"package libkbfs\n\nimport (\n\t\"io\"\n\t\"sort\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\nconst (\n\tdefaultIndirectPointerPrefetchCount int = 20\n\tfileIndirectBlockPrefetchPriority int = -100\n\tdirEntryPrefetchPriority int = -200\n)\n\ntype prefetcher interface {\n\tHandleBlock(b Block, kmd KeyMetadata, priority int)\n\tShutdown() <-chan struct{}\n}\n\nvar _ prefetcher = (*blockPrefetcher)(nil)\n\ntype blockPrefetcher struct {\n\tretriever blockRetriever\n\tprogressCh chan (<-chan error)\n\tdoneCh chan struct{}\n\teg errgroup.Group\n}\n\nfunc newPrefetcher(retriever blockRetriever) *blockPrefetcher {\n\tp := &blockPrefetcher{\n\t\tretriever: retriever,\n\t\tprogressCh: make(chan (<-chan error)),\n\t\tdoneCh: make(chan struct{}),\n\t}\n\tgo p.run()\n\treturn p\n}\n\nfunc (p *blockPrefetcher) run() {\n\tfor ch := range p.progressCh {\n\t\tch := ch\n\t\tp.eg.Go(func() error {\n\t\t\treturn <-ch\n\t\t})\n\t}\n}\n\nfunc (p *blockPrefetcher) request(priority int, kmd KeyMetadata, ptr BlockPointer, block Block) error {\n\tctx, cancel := context.WithCancel(context.Background())\n\tch := p.retriever.Request(ctx, priority, kmd, ptr, block, TransientEntry)\n\tselect {\n\tcase p.progressCh <- ch:\n\t\treturn nil\n\tcase <-p.doneCh:\n\t\tcancel()\n\t\treturn io.EOF\n\t}\n}\n\nfunc (p *blockPrefetcher) prefetchIndirectFileBlock(b *FileBlock, kmd KeyMetadata, priority int) {\n\t\/\/ Prefetch the first indirect block pointers.\n\t\/\/ TODO: do something smart with subsequent blocks.\n\tnumIPtrs := len(b.IPtrs)\n\tif numIPtrs > defaultIndirectPointerPrefetchCount {\n\t\tnumIPtrs = defaultIndirectPointerPrefetchCount\n\t}\n\tfor _, ptr := range b.IPtrs[:numIPtrs] {\n\t\tp.request(fileIndirectBlockPrefetchPriority, kmd,\n\t\t\tptr.BlockPointer, b.NewEmpty())\n\t}\n}\n\nfunc (p *blockPrefetcher) prefetchIndirectDirBlock(b *DirBlock, kmd KeyMetadata, priority int) {\n\t\/\/ Prefetch the first indirect block pointers.\n\tnumIPtrs := len(b.IPtrs)\n\tif numIPtrs > defaultIndirectPointerPrefetchCount {\n\t\tnumIPtrs = defaultIndirectPointerPrefetchCount\n\t}\n\tfor _, ptr := range b.IPtrs[:numIPtrs] {\n\t\t_ = p.request(fileIndirectBlockPrefetchPriority, kmd,\n\t\t\tptr.BlockPointer, b.NewEmpty())\n\t}\n}\n\nfunc (p *blockPrefetcher) prefetchDirectDirBlock(b *DirBlock, kmd KeyMetadata, priority int) {\n\t\/\/ Prefetch all DirEntry root blocks\n\tdirEntries := dirEntriesBySizeAsc{dirEntryMapToDirEntries(b.Children)}\n\tsort.Sort(dirEntries)\n\tfor i, entry := range dirEntries.dirEntries {\n\t\t\/\/ Prioritize small files\n\t\tpriority := dirEntryPrefetchPriority - i\n\t\tvar block Block\n\t\tswitch entry.Type {\n\t\tcase Dir:\n\t\t\tblock = &DirBlock{}\n\t\tcase File:\n\t\t\tblock = &FileBlock{}\n\t\tcase Exec:\n\t\t\tblock = &FileBlock{}\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t\tp.request(priority, kmd, entry.BlockPointer, block)\n\t}\n}\n\nfunc (p *blockPrefetcher) HandleBlock(b Block, kmd KeyMetadata, priority int) {\n\tswitch b := b.(type) {\n\tcase *FileBlock:\n\t\tif b.IsInd && priority >= defaultOnDemandRequestPriority {\n\t\t\tp.prefetchIndirectFileBlock(b, kmd, priority)\n\t\t}\n\tcase *DirBlock:\n\t\t\/\/ If this is an on-demand request:\n\t\tif priority >= defaultOnDemandRequestPriority {\n\t\t\tif b.IsInd {\n\t\t\t\tp.prefetchIndirectDirBlock(b, kmd, priority)\n\t\t\t} else {\n\t\t\t\tp.prefetchDirectDirBlock(b, kmd, priority)\n\t\t\t}\n\t\t}\n\tdefault:\n\t}\n}\n\nfunc (p *blockPrefetcher) Shutdown() <-chan struct{} {\n\tclose(p.progressCh)\n\tclose(p.doneCh)\n\tch := make(chan struct{})\n\tgo func() {\n\t\tp.eg.Wait()\n\t\tclose(ch)\n\t}()\n\treturn ch\n}\nprefetcher: Use sync.WaitGroup instead of errgroup because we weren't using the cancel nor error featurespackage libkbfs\n\nimport (\n\t\"io\"\n\t\"sort\"\n\t\"sync\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tdefaultIndirectPointerPrefetchCount int = 20\n\tfileIndirectBlockPrefetchPriority int = -100\n\tdirEntryPrefetchPriority int = -200\n)\n\ntype prefetcher interface {\n\tHandleBlock(b Block, kmd KeyMetadata, priority int)\n\tShutdown() <-chan struct{}\n}\n\nvar _ prefetcher = (*blockPrefetcher)(nil)\n\ntype blockPrefetcher struct {\n\tretriever blockRetriever\n\tprogressCh chan (<-chan error)\n\tdoneCh chan struct{}\n\tsg sync.WaitGroup\n}\n\nfunc newPrefetcher(retriever blockRetriever) *blockPrefetcher {\n\tp := &blockPrefetcher{\n\t\tretriever: retriever,\n\t\tprogressCh: make(chan (<-chan error)),\n\t\tdoneCh: make(chan struct{}),\n\t}\n\tgo p.run()\n\treturn p\n}\n\nfunc (p *blockPrefetcher) run() {\n\tfor ch := range p.progressCh {\n\t\tch := ch\n\t\tp.sg.Add(1)\n\t\tgo func() error {\n\t\t\tdefer p.sg.Done()\n\t\t\treturn <-ch\n\t\t}()\n\t}\n}\n\nfunc (p *blockPrefetcher) request(priority int, kmd KeyMetadata, ptr BlockPointer, block Block) error {\n\tctx, cancel := context.WithCancel(context.Background())\n\tch := p.retriever.Request(ctx, priority, kmd, ptr, block, TransientEntry)\n\tselect {\n\tcase p.progressCh <- ch:\n\t\treturn nil\n\tcase <-p.doneCh:\n\t\tcancel()\n\t\treturn io.EOF\n\t}\n}\n\nfunc (p *blockPrefetcher) prefetchIndirectFileBlock(b *FileBlock, kmd KeyMetadata, priority int) {\n\t\/\/ Prefetch the first indirect block pointers.\n\t\/\/ TODO: do something smart with subsequent blocks.\n\tnumIPtrs := len(b.IPtrs)\n\tif numIPtrs > defaultIndirectPointerPrefetchCount {\n\t\tnumIPtrs = defaultIndirectPointerPrefetchCount\n\t}\n\tfor _, ptr := range b.IPtrs[:numIPtrs] {\n\t\tp.request(fileIndirectBlockPrefetchPriority, kmd,\n\t\t\tptr.BlockPointer, b.NewEmpty())\n\t}\n}\n\nfunc (p *blockPrefetcher) prefetchIndirectDirBlock(b *DirBlock, kmd KeyMetadata, priority int) {\n\t\/\/ Prefetch the first indirect block pointers.\n\tnumIPtrs := len(b.IPtrs)\n\tif numIPtrs > defaultIndirectPointerPrefetchCount {\n\t\tnumIPtrs = defaultIndirectPointerPrefetchCount\n\t}\n\tfor _, ptr := range b.IPtrs[:numIPtrs] {\n\t\t_ = p.request(fileIndirectBlockPrefetchPriority, kmd,\n\t\t\tptr.BlockPointer, b.NewEmpty())\n\t}\n}\n\nfunc (p *blockPrefetcher) prefetchDirectDirBlock(b *DirBlock, kmd KeyMetadata, priority int) {\n\t\/\/ Prefetch all DirEntry root blocks\n\tdirEntries := dirEntriesBySizeAsc{dirEntryMapToDirEntries(b.Children)}\n\tsort.Sort(dirEntries)\n\tfor i, entry := range dirEntries.dirEntries {\n\t\t\/\/ Prioritize small files\n\t\tpriority := dirEntryPrefetchPriority - i\n\t\tvar block Block\n\t\tswitch entry.Type {\n\t\tcase Dir:\n\t\t\tblock = &DirBlock{}\n\t\tcase File:\n\t\t\tblock = &FileBlock{}\n\t\tcase Exec:\n\t\t\tblock = &FileBlock{}\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t\tp.request(priority, kmd, entry.BlockPointer, block)\n\t}\n}\n\nfunc (p *blockPrefetcher) HandleBlock(b Block, kmd KeyMetadata, priority int) {\n\tswitch b := b.(type) {\n\tcase *FileBlock:\n\t\tif b.IsInd && priority >= defaultOnDemandRequestPriority {\n\t\t\tp.prefetchIndirectFileBlock(b, kmd, priority)\n\t\t}\n\tcase *DirBlock:\n\t\t\/\/ If this is an on-demand request:\n\t\tif priority >= defaultOnDemandRequestPriority {\n\t\t\tif b.IsInd {\n\t\t\t\tp.prefetchIndirectDirBlock(b, kmd, priority)\n\t\t\t} else {\n\t\t\t\tp.prefetchDirectDirBlock(b, kmd, priority)\n\t\t\t}\n\t\t}\n\tdefault:\n\t}\n}\n\nfunc (p *blockPrefetcher) Shutdown() <-chan struct{} {\n\tclose(p.progressCh)\n\tclose(p.doneCh)\n\tch := make(chan struct{})\n\tgo func() {\n\t\tp.sg.Wait()\n\t\tclose(ch)\n\t}()\n\treturn ch\n}\n<|endoftext|>"} {"text":"package game\n\nimport (\n\t\"github.com\/tanema\/amore\/keyboard\"\n)\n\ntype Player struct {\n\t*Entity\n\thealth float32\n\tdeadCounter float32\n\tisJumpingOrFlying bool\n\tisDead bool\n\tonGround bool\n\tachievedFullHealth bool\n}\n\nconst (\n\tdeadDuration float32 = 3 \/\/ seconds until res-pawn\n\trunAccel float32 = 500 \/\/ the player acceleration while going left\/right\n\tbrakeAccel float32 = 2000\n\tjumpVelocity float32 = 400 \/\/ the initial upwards velocity when jumping\n\tbeltWidth float32 = 2\n\tbeltHeight float32 = 8\n)\n\nfunc newPlayer(gameMap *Map, l, t float32) *Player {\n\tplayer := &Player{\n\t\thealth: 1,\n\t}\n\tplayer.Entity = newEntity(gameMap, player, \"player\", l, t, 32, 64)\n\tplayer.body.SetResponses(map[string]string{\n\t\t\"guardian\": \"slide\",\n\t\t\"block\": \"slide\",\n\t})\n\treturn player\n}\n\nfunc (player *Player) changeVelocityByKeys(dt float32) {\n\tplayer.isJumpingOrFlying = false\n\n\tif player.isDead {\n\t\treturn\n\t}\n\n\tif keyboard.IsDown(keyboard.KeyLeft) {\n\t\tif player.vx > 0 {\n\t\t\tplayer.vx -= dt * brakeAccel\n\t\t} else {\n\t\t\tplayer.vx -= dt * runAccel\n\t\t}\n\t} else if keyboard.IsDown(keyboard.KeyRight) {\n\t\tif player.vx < 0 {\n\t\t\tplayer.vx += dt * brakeAccel\n\t\t} else {\n\t\t\tplayer.vx += dt * runAccel\n\t\t}\n\t} else {\n\t\tbrake := dt * -brakeAccel\n\t\tif player.vx < 0 {\n\t\t\tbrake = dt * brakeAccel\n\t\t}\n\t\tif abs(brake) > abs(player.vx) {\n\t\t\tplayer.vx = 0\n\t\t} else {\n\t\t\tplayer.vx += brake\n\t\t}\n\t}\n\n\tif keyboard.IsDown(keyboard.KeyUp) && (player.canFly() || player.onGround) { \/\/ jump\/fly\n\t\tplayer.vy = -jumpVelocity\n\t\tplayer.isJumpingOrFlying = true\n\t}\n}\n\nfunc (player *Player) moveColliding(dt float32) {\n\tplayer.onGround = false\n\tl, t, cols := player.Entity.body.Move(player.l+player.vx*dt, player.t+player.vy*dt)\n\tfor _, col := range cols {\n\t\tif col.Body.Tag() != \"puff\" {\n\t\t\tplayer.changeVelocityByCollisionNormal(col.Normal.X, col.Normal.Y, 0)\n\t\t\tplayer.onGround = col.Normal.Y < 1\n\t\t}\n\t}\n\tplayer.l, player.t = l, t\n}\n\nfunc (player *Player) updateHealth(dt float32) {\n\tplayer.achievedFullHealth = false\n\tif player.health < 1 {\n\t\tplayer.health = min(1, player.health+dt\/6)\n\t\tplayer.achievedFullHealth = player.health == 1\n\t}\n}\n\nfunc (player *Player) playEffects() {\n\tif player.isJumpingOrFlying {\n\t\tif !player.onGround {\n\t\t\tl, t, w, h := player.Extents()\n\t\t\tnewPuff(player.gameMap, l, t+h\/2, 20*(1-randMax(1)), 50, 2, 3)\n\t\t\tnewPuff(player.gameMap, l+w, t+h\/2, 20*(1-randMax(1)), 50, 2, 3)\n\t\t}\n\t}\n}\n\nfunc (player *Player) updateOrder() int {\n\treturn 1\n}\n\nfunc (player *Player) update(dt float32) {\n\tplayer.updateHealth(dt)\n\tplayer.changeVelocityByKeys(dt)\n\tplayer.changeVelocityByGravity(dt)\n\tplayer.playEffects()\n\tplayer.moveColliding(dt)\n}\n\nfunc (player *Player) getColor() (r, g, b float32) {\n\tg = floor(255 * player.health)\n\treturn 255 - g, g, 0\n}\n\nfunc (player *Player) canFly() bool {\n\treturn player.health == 1\n}\n\nfunc (player *Player) draw(debug bool) {\n\tr, g, b := player.getColor()\n\tl, t, w, h := player.Extents()\n\tdrawFilledRectangle(l, t, w, h, r, g, b)\n\n\tif player.canFly() {\n\t\tdrawFilledRectangle(l-beltWidth, t+h\/2, w+2*beltWidth, beltHeight, 255, 255, 255)\n\t}\n\n\tif debug && player.onGround {\n\t\tdrawFilledRectangle(l, t+h-4, w, 4, 255, 255, 255)\n\t}\n}\n\nfunc (player *Player) damage(intensity float32) {\n\tif player.isDead {\n\t\treturn\n\t}\n\n\tif player.health == 1 {\n\t\tfor i := 1; i <= 3; i++ {\n\t\t\tnewDebris(player.gameMap,\n\t\t\t\trandRange(player.l, player.l+player.w),\n\t\t\t\tplayer.t+player.h\/2,\n\t\t\t\t255, 0, 0,\n\t\t\t)\n\t\t}\n\t}\n\n\tplayer.health = player.health - intensity\n\tif player.health <= 0 {\n\t\tplayer.destroy()\n\t\tplayer.isDead = true\n\t}\n}\n\nfunc (player *Player) destroy() {\n\tplayer.body.Remove()\n\tfor i := 1; i <= 20; i++ {\n\t\tnewDebris(player.gameMap,\n\t\t\trandRange(player.l, player.l+player.w),\n\t\t\trandRange(player.t, player.t+player.h),\n\t\t\t255, 0, 0)\n\t}\n}\nfixed wall jumpingpackage game\n\nimport (\n\t\"github.com\/tanema\/amore\/keyboard\"\n)\n\ntype Player struct {\n\t*Entity\n\thealth float32\n\tdeadCounter float32\n\tisJumpingOrFlying bool\n\tisDead bool\n\tonGround bool\n\tachievedFullHealth bool\n}\n\nconst (\n\tdeadDuration float32 = 3 \/\/ seconds until res-pawn\n\trunAccel float32 = 500 \/\/ the player acceleration while going left\/right\n\tbrakeAccel float32 = 2000\n\tjumpVelocity float32 = 400 \/\/ the initial upwards velocity when jumping\n\tbeltWidth float32 = 2\n\tbeltHeight float32 = 8\n)\n\nfunc newPlayer(gameMap *Map, l, t float32) *Player {\n\tplayer := &Player{\n\t\thealth: 1,\n\t}\n\tplayer.Entity = newEntity(gameMap, player, \"player\", l, t, 32, 64)\n\tplayer.body.SetResponses(map[string]string{\n\t\t\"guardian\": \"slide\",\n\t\t\"block\": \"slide\",\n\t})\n\treturn player\n}\n\nfunc (player *Player) changeVelocityByKeys(dt float32) {\n\tplayer.isJumpingOrFlying = false\n\n\tif player.isDead {\n\t\treturn\n\t}\n\n\tif keyboard.IsDown(keyboard.KeyLeft) {\n\t\tif player.vx > 0 {\n\t\t\tplayer.vx -= dt * brakeAccel\n\t\t} else {\n\t\t\tplayer.vx -= dt * runAccel\n\t\t}\n\t} else if keyboard.IsDown(keyboard.KeyRight) {\n\t\tif player.vx < 0 {\n\t\t\tplayer.vx += dt * brakeAccel\n\t\t} else {\n\t\t\tplayer.vx += dt * runAccel\n\t\t}\n\t} else {\n\t\tbrake := dt * -brakeAccel\n\t\tif player.vx < 0 {\n\t\t\tbrake = dt * brakeAccel\n\t\t}\n\t\tif abs(brake) > abs(player.vx) {\n\t\t\tplayer.vx = 0\n\t\t} else {\n\t\t\tplayer.vx += brake\n\t\t}\n\t}\n\n\tif keyboard.IsDown(keyboard.KeyUp) && (player.canFly() || player.onGround) { \/\/ jump\/fly\n\t\tplayer.vy = -jumpVelocity\n\t\tplayer.isJumpingOrFlying = true\n\t}\n}\n\nfunc (player *Player) moveColliding(dt float32) {\n\tplayer.onGround = false\n\tl, t, cols := player.Entity.body.Move(player.l+player.vx*dt, player.t+player.vy*dt)\n\tfor _, col := range cols {\n\t\tif col.Body.Tag() != \"puff\" {\n\t\t\tplayer.changeVelocityByCollisionNormal(col.Normal.X, col.Normal.Y, 0)\n\t\t\tplayer.onGround = col.Normal.Y == -1\n\t\t}\n\t}\n\tplayer.l, player.t = l, t\n}\n\nfunc (player *Player) updateHealth(dt float32) {\n\tplayer.achievedFullHealth = false\n\tif player.health < 1 {\n\t\tplayer.health = min(1, player.health+dt\/6)\n\t\tplayer.achievedFullHealth = player.health == 1\n\t}\n}\n\nfunc (player *Player) playEffects() {\n\tif player.isJumpingOrFlying {\n\t\tif !player.onGround {\n\t\t\tl, t, w, h := player.Extents()\n\t\t\tnewPuff(player.gameMap, l, t+h\/2, 20*(1-randMax(1)), 50, 2, 3)\n\t\t\tnewPuff(player.gameMap, l+w, t+h\/2, 20*(1-randMax(1)), 50, 2, 3)\n\t\t}\n\t}\n}\n\nfunc (player *Player) updateOrder() int {\n\treturn 1\n}\n\nfunc (player *Player) update(dt float32) {\n\tplayer.updateHealth(dt)\n\tplayer.changeVelocityByKeys(dt)\n\tplayer.changeVelocityByGravity(dt)\n\tplayer.playEffects()\n\tplayer.moveColliding(dt)\n}\n\nfunc (player *Player) getColor() (r, g, b float32) {\n\tg = floor(255 * player.health)\n\treturn 255 - g, g, 0\n}\n\nfunc (player *Player) canFly() bool {\n\treturn player.health == 1\n}\n\nfunc (player *Player) draw(debug bool) {\n\tr, g, b := player.getColor()\n\tl, t, w, h := player.Extents()\n\tdrawFilledRectangle(l, t, w, h, r, g, b)\n\n\tif player.canFly() {\n\t\tdrawFilledRectangle(l-beltWidth, t+h\/2, w+2*beltWidth, beltHeight, 255, 255, 255)\n\t}\n\n\tif debug && player.onGround {\n\t\tdrawFilledRectangle(l, t+h-4, w, 4, 255, 255, 255)\n\t}\n}\n\nfunc (player *Player) damage(intensity float32) {\n\tif player.isDead {\n\t\treturn\n\t}\n\n\tif player.health == 1 {\n\t\tfor i := 1; i <= 3; i++ {\n\t\t\tnewDebris(player.gameMap,\n\t\t\t\trandRange(player.l, player.l+player.w),\n\t\t\t\tplayer.t+player.h\/2,\n\t\t\t\t255, 0, 0,\n\t\t\t)\n\t\t}\n\t}\n\n\tplayer.health = player.health - intensity\n\tif player.health <= 0 {\n\t\tplayer.destroy()\n\t\tplayer.isDead = true\n\t}\n}\n\nfunc (player *Player) destroy() {\n\tplayer.body.Remove()\n\tfor i := 1; i <= 20; i++ {\n\t\tnewDebris(player.gameMap,\n\t\t\trandRange(player.l, player.l+player.w),\n\t\t\trandRange(player.t, player.t+player.h),\n\t\t\t255, 0, 0)\n\t}\n}\n<|endoftext|>"} {"text":"package widget\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/qor\/admin\"\n\t\"github.com\/qor\/media\/oss\"\n\t\"github.com\/qor\/qor\"\n\t\"github.com\/qor\/qor\/resource\"\n\t\"github.com\/qor\/qor\/utils\"\n\t\"github.com\/qor\/serializable_meta\"\n)\n\n\/\/ QorWidgetSettingInterface qor widget setting interface\ntype QorWidgetSettingInterface interface {\n\tGetPreviewIcon() string\n\tGetWidgetName() string\n\tSetWidgetName(string)\n\tGetGroupName() string\n\tSetGroupName(string)\n\tGetScope() string\n\tSetScope(string)\n\tGetTemplate() string\n\tSetTemplate(string)\n\tserializable_meta.SerializableMetaInterface\n}\n\n\/\/ QorWidgetSetting default qor widget setting struct\ntype QorWidgetSetting struct {\n\tName string `gorm:\"primary_key\"`\n\tScope string `gorm:\"primary_key;size:128;default:'default'\"`\n\tDescription string\n\tShared bool\n\tWidgetType string\n\tGroupName string\n\tTemplate string\n\tPreviewIcon oss.OSS\n\tserializable_meta.SerializableMeta\n\tCreatedAt time.Time\n\tUpdatedAt time.Time\n}\n\n\/\/ ResourceName get widget setting's resource name\nfunc (widgetSetting *QorWidgetSetting) ResourceName() string {\n\treturn \"Widget Content\"\n}\n\n\/\/ GetSerializableArgumentKind get serializable kind\nfunc (widgetSetting *QorWidgetSetting) GetSerializableArgumentKind() string {\n\tif widgetSetting.WidgetType != \"\" {\n\t\treturn widgetSetting.WidgetType\n\t}\n\treturn widgetSetting.Kind\n}\n\n\/\/ SetSerializableArgumentKind set serializable kind\nfunc (widgetSetting *QorWidgetSetting) SetSerializableArgumentKind(name string) {\n\twidgetSetting.WidgetType = name\n\twidgetSetting.Kind = name\n}\n\n\/\/ GetPreviewIcon get preview icon\nfunc (widgetSetting QorWidgetSetting) GetPreviewIcon() string {\n\treturn widgetSetting.PreviewIcon.URL()\n}\n\n\/\/ GetWidgetName get widget setting's group name\nfunc (widgetSetting QorWidgetSetting) GetWidgetName() string {\n\treturn widgetSetting.Name\n}\n\n\/\/ SetWidgetName set widget setting's group name\nfunc (widgetSetting *QorWidgetSetting) SetWidgetName(name string) {\n\twidgetSetting.Name = name\n}\n\n\/\/ GetGroupName get widget setting's group name\nfunc (widgetSetting QorWidgetSetting) GetGroupName() string {\n\treturn widgetSetting.GroupName\n}\n\n\/\/ SetGroupName set widget setting's group name\nfunc (widgetSetting *QorWidgetSetting) SetGroupName(groupName string) {\n\twidgetSetting.GroupName = groupName\n}\n\n\/\/ GetScope get widget's scope\nfunc (widgetSetting QorWidgetSetting) GetScope() string {\n\treturn widgetSetting.Scope\n}\n\n\/\/ SetScope set widget setting's scope\nfunc (widgetSetting *QorWidgetSetting) SetScope(scope string) {\n\twidgetSetting.Scope = scope\n}\n\n\/\/ GetTemplate get used widget template\nfunc (widgetSetting QorWidgetSetting) GetTemplate() string {\n\tif widget := GetWidget(widgetSetting.GetSerializableArgumentKind()); widget != nil {\n\t\tfor _, value := range widget.Templates {\n\t\t\tif value == widgetSetting.Template {\n\t\t\t\treturn value\n\t\t\t}\n\t\t}\n\n\t\t\/\/ return first value of defined widget templates\n\t\tfor _, value := range widget.Templates {\n\t\t\treturn value\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ SetTemplate set used widget's template\nfunc (widgetSetting *QorWidgetSetting) SetTemplate(template string) {\n\twidgetSetting.Template = template\n}\n\n\/\/ GetSerializableArgumentResource get setting's argument's resource\nfunc (widgetSetting *QorWidgetSetting) GetSerializableArgumentResource() *admin.Resource {\n\twidget := GetWidget(widgetSetting.GetSerializableArgumentKind())\n\tif widget != nil {\n\t\treturn widget.Setting\n\t}\n\treturn nil\n}\n\n\/\/ ConfigureQorResource a method used to config Widget for qor admin\nfunc (widgetSetting *QorWidgetSetting) ConfigureQorResource(res resource.Resourcer) {\n\tif res, ok := res.(*admin.Resource); ok {\n\t\tif res.GetMeta(\"Name\") == nil {\n\t\t\tres.Meta(&admin.Meta{Name: \"Name\"})\n\t\t}\n\n\t\tif res.GetMeta(\"DisplayName\") == nil {\n\t\t\tres.Meta(&admin.Meta{Name: \"DisplayName\", Label: \"Name\", Type: \"readonly\", FieldName: \"Name\"})\n\t\t}\n\n\t\tres.Meta(&admin.Meta{\n\t\t\tName: \"Scope\",\n\t\t\tType: \"hidden\",\n\t\t\tValuer: func(result interface{}, context *qor.Context) interface{} {\n\t\t\t\tif scope := context.Request.URL.Query().Get(\"widget_scope\"); scope != \"\" {\n\t\t\t\t\treturn scope\n\t\t\t\t}\n\n\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\tif scope := setting.GetScope(); scope != \"\" {\n\t\t\t\t\t\treturn scope\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn \"default\"\n\t\t\t},\n\t\t\tSetter: func(result interface{}, metaValue *resource.MetaValue, context *qor.Context) {\n\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\tsetting.SetScope(utils.ToString(metaValue.Value))\n\t\t\t\t}\n\t\t\t},\n\t\t})\n\n\t\tres.Meta(&admin.Meta{\n\t\t\tName: \"Widgets\",\n\t\t\tType: \"select_one\",\n\t\t\tValuer: func(result interface{}, context *qor.Context) interface{} {\n\t\t\t\tif typ := context.Request.URL.Query().Get(\"widget_type\"); typ != \"\" {\n\t\t\t\t\treturn typ\n\t\t\t\t}\n\n\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\twidget := GetWidget(setting.GetSerializableArgumentKind())\n\t\t\t\t\tif widget == nil {\n\t\t\t\t\t\treturn \"\"\n\t\t\t\t\t}\n\t\t\t\t\treturn widget.Name\n\t\t\t\t}\n\n\t\t\t\treturn \"\"\n\t\t\t},\n\t\t\tCollection: func(result interface{}, context *qor.Context) (results [][]string) {\n\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\tif setting.GetWidgetName() == \"\" {\n\t\t\t\t\t\tfor _, widget := range registeredWidgets {\n\t\t\t\t\t\t\tresults = append(results, []string{widget.Name, widget.Name})\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tgroupName := setting.GetGroupName()\n\t\t\t\t\t\tfor _, group := range registeredWidgetsGroup {\n\t\t\t\t\t\t\tif group.Name == groupName {\n\t\t\t\t\t\t\t\tfor _, widget := range group.Widgets {\n\t\t\t\t\t\t\t\t\tresults = append(results, []string{widget, widget})\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(results) == 0 {\n\t\t\t\t\t\tresults = append(results, []string{setting.GetSerializableArgumentKind(), setting.GetSerializableArgumentKind()})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t},\n\t\t\tSetter: func(result interface{}, metaValue *resource.MetaValue, context *qor.Context) {\n\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\tsetting.SetSerializableArgumentKind(utils.ToString(metaValue.Value))\n\t\t\t\t}\n\t\t\t},\n\t\t})\n\n\t\tres.Meta(&admin.Meta{\n\t\t\tName: \"Template\",\n\t\t\tType: \"select_one\",\n\t\t\tValuer: func(result interface{}, context *qor.Context) interface{} {\n\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\treturn setting.GetTemplate()\n\t\t\t\t}\n\t\t\t\treturn \"\"\n\t\t\t},\n\t\t\tCollection: func(result interface{}, context *qor.Context) (results [][]string) {\n\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\tif widget := GetWidget(setting.GetSerializableArgumentKind()); widget != nil {\n\t\t\t\t\t\tfor _, value := range widget.Templates {\n\t\t\t\t\t\t\tresults = append(results, []string{value, value})\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t},\n\t\t\tSetter: func(result interface{}, metaValue *resource.MetaValue, context *qor.Context) {\n\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\tsetting.SetTemplate(utils.ToString(metaValue.Value))\n\t\t\t\t}\n\t\t\t},\n\t\t})\n\n\t\tres.Meta(&admin.Meta{\n\t\t\tName: \"Shared\",\n\t\t\tLabel: \"This widget is shared\",\n\t\t})\n\n\t\tres.Scope(&admin.Scope{\n\t\t\tName: \"Shared\",\n\t\t\tLabel: \"Shared Widgets\",\n\t\t\tHandle: func(db *gorm.DB, _ *qor.Context) *gorm.DB {\n\t\t\t\treturn db.Where(\"shared = ?\", true)\n\t\t\t},\n\t\t})\n\n\t\tres.Action(&admin.Action{\n\t\t\tName: \"Preview\",\n\t\t\tURL: func(record interface{}, context *admin.Context) string {\n\t\t\t\treturn fmt.Sprintf(\"%v\/%v\/%v\/!preview\", context.Admin.GetRouter().Prefix, res.ToParam(), record.(QorWidgetSettingInterface).GetWidgetName())\n\t\t\t},\n\t\t\tModes: []string{\"edit\", \"menu_item\"},\n\t\t})\n\n\t\tres.UseTheme(\"widget\")\n\n\t\tres.IndexAttrs(\"Name\", \"Description\", \"CreatedAt\", \"UpdatedAt\")\n\t\tres.ShowAttrs(\"Name\", \"Scope\", \"WidgetType\", \"Template\", \"Description\", \"Value\", \"CreatedAt\", \"UpdatedAt\")\n\t\tres.EditAttrs(\n\t\t\t\"DisplayName\", \"Description\", \"Scope\", \"Widgets\", \"Template\",\n\t\t\t&admin.Section{\n\t\t\t\tTitle: \"Settings\",\n\t\t\t\tRows: [][]string{{\"Kind\"}, {\"SerializableMeta\"}},\n\t\t\t},\n\t\t\t\"Shared\",\n\t\t)\n\t\tres.NewAttrs(\"Name\", \"Description\", \"Scope\", \"Widgets\", \"Template\")\n\t}\n}\nAdd Shared to new attrspackage widget\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/qor\/admin\"\n\t\"github.com\/qor\/media\/oss\"\n\t\"github.com\/qor\/qor\"\n\t\"github.com\/qor\/qor\/resource\"\n\t\"github.com\/qor\/qor\/utils\"\n\t\"github.com\/qor\/serializable_meta\"\n)\n\n\/\/ QorWidgetSettingInterface qor widget setting interface\ntype QorWidgetSettingInterface interface {\n\tGetPreviewIcon() string\n\tGetWidgetName() string\n\tSetWidgetName(string)\n\tGetGroupName() string\n\tSetGroupName(string)\n\tGetScope() string\n\tSetScope(string)\n\tGetTemplate() string\n\tSetTemplate(string)\n\tserializable_meta.SerializableMetaInterface\n}\n\n\/\/ QorWidgetSetting default qor widget setting struct\ntype QorWidgetSetting struct {\n\tName string `gorm:\"primary_key\"`\n\tScope string `gorm:\"primary_key;size:128;default:'default'\"`\n\tDescription string\n\tShared bool\n\tWidgetType string\n\tGroupName string\n\tTemplate string\n\tPreviewIcon oss.OSS\n\tserializable_meta.SerializableMeta\n\tCreatedAt time.Time\n\tUpdatedAt time.Time\n}\n\n\/\/ ResourceName get widget setting's resource name\nfunc (widgetSetting *QorWidgetSetting) ResourceName() string {\n\treturn \"Widget Content\"\n}\n\n\/\/ GetSerializableArgumentKind get serializable kind\nfunc (widgetSetting *QorWidgetSetting) GetSerializableArgumentKind() string {\n\tif widgetSetting.WidgetType != \"\" {\n\t\treturn widgetSetting.WidgetType\n\t}\n\treturn widgetSetting.Kind\n}\n\n\/\/ SetSerializableArgumentKind set serializable kind\nfunc (widgetSetting *QorWidgetSetting) SetSerializableArgumentKind(name string) {\n\twidgetSetting.WidgetType = name\n\twidgetSetting.Kind = name\n}\n\n\/\/ GetPreviewIcon get preview icon\nfunc (widgetSetting QorWidgetSetting) GetPreviewIcon() string {\n\treturn widgetSetting.PreviewIcon.URL()\n}\n\n\/\/ GetWidgetName get widget setting's group name\nfunc (widgetSetting QorWidgetSetting) GetWidgetName() string {\n\treturn widgetSetting.Name\n}\n\n\/\/ SetWidgetName set widget setting's group name\nfunc (widgetSetting *QorWidgetSetting) SetWidgetName(name string) {\n\twidgetSetting.Name = name\n}\n\n\/\/ GetGroupName get widget setting's group name\nfunc (widgetSetting QorWidgetSetting) GetGroupName() string {\n\treturn widgetSetting.GroupName\n}\n\n\/\/ SetGroupName set widget setting's group name\nfunc (widgetSetting *QorWidgetSetting) SetGroupName(groupName string) {\n\twidgetSetting.GroupName = groupName\n}\n\n\/\/ GetScope get widget's scope\nfunc (widgetSetting QorWidgetSetting) GetScope() string {\n\treturn widgetSetting.Scope\n}\n\n\/\/ SetScope set widget setting's scope\nfunc (widgetSetting *QorWidgetSetting) SetScope(scope string) {\n\twidgetSetting.Scope = scope\n}\n\n\/\/ GetTemplate get used widget template\nfunc (widgetSetting QorWidgetSetting) GetTemplate() string {\n\tif widget := GetWidget(widgetSetting.GetSerializableArgumentKind()); widget != nil {\n\t\tfor _, value := range widget.Templates {\n\t\t\tif value == widgetSetting.Template {\n\t\t\t\treturn value\n\t\t\t}\n\t\t}\n\n\t\t\/\/ return first value of defined widget templates\n\t\tfor _, value := range widget.Templates {\n\t\t\treturn value\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ SetTemplate set used widget's template\nfunc (widgetSetting *QorWidgetSetting) SetTemplate(template string) {\n\twidgetSetting.Template = template\n}\n\n\/\/ GetSerializableArgumentResource get setting's argument's resource\nfunc (widgetSetting *QorWidgetSetting) GetSerializableArgumentResource() *admin.Resource {\n\twidget := GetWidget(widgetSetting.GetSerializableArgumentKind())\n\tif widget != nil {\n\t\treturn widget.Setting\n\t}\n\treturn nil\n}\n\n\/\/ ConfigureQorResource a method used to config Widget for qor admin\nfunc (widgetSetting *QorWidgetSetting) ConfigureQorResource(res resource.Resourcer) {\n\tif res, ok := res.(*admin.Resource); ok {\n\t\tif res.GetMeta(\"Name\") == nil {\n\t\t\tres.Meta(&admin.Meta{Name: \"Name\"})\n\t\t}\n\n\t\tif res.GetMeta(\"DisplayName\") == nil {\n\t\t\tres.Meta(&admin.Meta{Name: \"DisplayName\", Label: \"Name\", Type: \"readonly\", FieldName: \"Name\"})\n\t\t}\n\n\t\tif res.GetMeta(\"Scope\") == nil {\n\t\t\tres.Meta(&admin.Meta{\n\t\t\t\tName: \"Scope\",\n\t\t\t\tType: \"hidden\",\n\t\t\t\tValuer: func(result interface{}, context *qor.Context) interface{} {\n\t\t\t\t\tif scope := context.Request.URL.Query().Get(\"widget_scope\"); scope != \"\" {\n\t\t\t\t\t\treturn scope\n\t\t\t\t\t}\n\n\t\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\t\tif scope := setting.GetScope(); scope != \"\" {\n\t\t\t\t\t\t\treturn scope\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn \"default\"\n\t\t\t\t},\n\t\t\t\tSetter: func(result interface{}, metaValue *resource.MetaValue, context *qor.Context) {\n\t\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\t\tsetting.SetScope(utils.ToString(metaValue.Value))\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\tif res.GetMeta(\"Widgets\") == nil {\n\t\t\tres.Meta(&admin.Meta{\n\t\t\t\tName: \"Widgets\",\n\t\t\t\tType: \"select_one\",\n\t\t\t\tValuer: func(result interface{}, context *qor.Context) interface{} {\n\t\t\t\t\tif typ := context.Request.URL.Query().Get(\"widget_type\"); typ != \"\" {\n\t\t\t\t\t\treturn typ\n\t\t\t\t\t}\n\n\t\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\t\twidget := GetWidget(setting.GetSerializableArgumentKind())\n\t\t\t\t\t\tif widget == nil {\n\t\t\t\t\t\t\treturn \"\"\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn widget.Name\n\t\t\t\t\t}\n\n\t\t\t\t\treturn \"\"\n\t\t\t\t},\n\t\t\t\tCollection: func(result interface{}, context *qor.Context) (results [][]string) {\n\t\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\t\tif setting.GetWidgetName() == \"\" {\n\t\t\t\t\t\t\tfor _, widget := range registeredWidgets {\n\t\t\t\t\t\t\t\tresults = append(results, []string{widget.Name, widget.Name})\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tgroupName := setting.GetGroupName()\n\t\t\t\t\t\t\tfor _, group := range registeredWidgetsGroup {\n\t\t\t\t\t\t\t\tif group.Name == groupName {\n\t\t\t\t\t\t\t\t\tfor _, widget := range group.Widgets {\n\t\t\t\t\t\t\t\t\t\tresults = append(results, []string{widget, widget})\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif len(results) == 0 {\n\t\t\t\t\t\t\tresults = append(results, []string{setting.GetSerializableArgumentKind(), setting.GetSerializableArgumentKind()})\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t},\n\t\t\t\tSetter: func(result interface{}, metaValue *resource.MetaValue, context *qor.Context) {\n\t\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\t\tsetting.SetSerializableArgumentKind(utils.ToString(metaValue.Value))\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\tif res.GetMeta(\"Template\") == nil {\n\t\t\tres.Meta(&admin.Meta{\n\t\t\t\tName: \"Template\",\n\t\t\t\tType: \"select_one\",\n\t\t\t\tValuer: func(result interface{}, context *qor.Context) interface{} {\n\t\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\t\treturn setting.GetTemplate()\n\t\t\t\t\t}\n\t\t\t\t\treturn \"\"\n\t\t\t\t},\n\t\t\t\tCollection: func(result interface{}, context *qor.Context) (results [][]string) {\n\t\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\t\tif widget := GetWidget(setting.GetSerializableArgumentKind()); widget != nil {\n\t\t\t\t\t\t\tfor _, value := range widget.Templates {\n\t\t\t\t\t\t\t\tresults = append(results, []string{value, value})\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t},\n\t\t\t\tSetter: func(result interface{}, metaValue *resource.MetaValue, context *qor.Context) {\n\t\t\t\t\tif setting, ok := result.(QorWidgetSettingInterface); ok {\n\t\t\t\t\t\tsetting.SetTemplate(utils.ToString(metaValue.Value))\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\tif res.GetMeta(\"Shared\") == nil {\n\t\t\tres.Meta(&admin.Meta{\n\t\t\t\tName: \"Shared\",\n\t\t\t\tLabel: \"This widget is shared\",\n\t\t\t})\n\t\t}\n\n\t\tres.Scope(&admin.Scope{\n\t\t\tName: \"Shared\",\n\t\t\tLabel: \"Shared Widgets\",\n\t\t\tHandle: func(db *gorm.DB, _ *qor.Context) *gorm.DB {\n\t\t\t\treturn db.Where(\"shared = ?\", true)\n\t\t\t},\n\t\t})\n\n\t\tres.Action(&admin.Action{\n\t\t\tName: \"Preview\",\n\t\t\tURL: func(record interface{}, context *admin.Context) string {\n\t\t\t\treturn fmt.Sprintf(\"%v\/%v\/%v\/!preview\", context.Admin.GetRouter().Prefix, res.ToParam(), record.(QorWidgetSettingInterface).GetWidgetName())\n\t\t\t},\n\t\t\tModes: []string{\"edit\", \"menu_item\"},\n\t\t})\n\n\t\tres.UseTheme(\"widget\")\n\n\t\tres.IndexAttrs(\"Name\", \"Description\", \"CreatedAt\", \"UpdatedAt\")\n\t\tres.ShowAttrs(\"Name\", \"Scope\", \"WidgetType\", \"Template\", \"Description\", \"Value\", \"CreatedAt\", \"UpdatedAt\")\n\t\tres.EditAttrs(\n\t\t\t\"DisplayName\", \"Description\", \"Scope\", \"Widgets\", \"Template\",\n\t\t\t&admin.Section{\n\t\t\t\tTitle: \"Settings\",\n\t\t\t\tRows: [][]string{{\"Kind\"}, {\"SerializableMeta\"}},\n\t\t\t},\n\t\t\t\"Shared\",\n\t\t)\n\t\tres.NewAttrs(\"Name\", \"Description\", \"Scope\", \"Widgets\", \"Template\",\n\t\t\t&admin.Section{\n\t\t\t\tTitle: \"Settings\",\n\t\t\t\tRows: [][]string{{\"Kind\"}, {\"SerializableMeta\"}},\n\t\t\t},\n\t\t\t\"Shared\",\n\t\t)\n\t}\n}\n<|endoftext|>"} {"text":"package model\n\nimport (\n\t\"encoding\/json\"\n\te \"github.com\/lastbackend\/lastbackend\/libs\/errors\"\n\t\"github.com\/lastbackend\/lastbackend\/pkg\/service\"\n\t\"github.com\/lastbackend\/lastbackend\/pkg\/util\/table\"\n\t\"time\"\n\t\"fmt\"\n)\n\ntype ServiceList []Service\n\ntype Service struct {\n\t\/\/ Service uuid, incremented automatically\n\tID string `json:\"id\" gorethink:\"id,omitempty\"`\n\t\/\/ Service user\n\tUser string `json:\"user\" gorethink:\"user,omitempty\"`\n\t\/\/ Service project\n\tProject string `json:\"project\" gorethink:\"project,omitempty\"`\n\t\/\/ Service image\n\tImage string `json:\"image\" gorethink:\"image,omitempty\"`\n\t\/\/ Service name\n\tName string `json:\"name\" gorethink:\"name,omitempty\"`\n\t\/\/ Service spec\n\tSpec *service.Service `json:\"spec,omitempty\" gorethink:\"-\"`\n\t\/\/ Service created time\n\tCreated time.Time `json:\"created\" gorethink:\"created,omitempty\"`\n\t\/\/ Service updated time\n\tUpdated time.Time `json:\"updated\" gorethink:\"updated,omitempty\"`\n}\n\nfunc (s *Service) ToJson() ([]byte, *e.Err) {\n\tbuf, err := json.Marshal(s)\n\tif err != nil {\n\t\treturn nil, e.New(\"service\").Unknown(err)\n\t}\n\n\treturn buf, nil\n}\n\nfunc (s *Service) DrawTable(projectName string) {\n\ttable.PrintHorizontal(map[string]interface{}{\n\t\t\"ID\": s.ID,\n\t\t\"NAME\": s.Name,\n\t\t\"PROJECT\": projectName,\n\t\t\"PODS\": s.Spec.PodList.ListMeta.Total,\n\t})\n\n\tt := table.New([]string{\" \", \"NAME\", \"STATUS\", \"RESTARTS\", \"CONTAINERS\"})\n\tt.VisibleHeader = true\n\n\tfor _, pod := range s.Spec.PodList.Pods {\n\t\tt.AddRow(map[string]interface{}{\n\t\t\t\" \": \"\",\n\t\t\t\"NAME\": pod.ObjectMeta.Name,\n\t\t\t\"STATUS\": pod.PodStatus.PodPhase,\n\t\t\t\"RESTARTS\": pod.RestartCount,\n\t\t\t\"CONTAINERS\": pod.Containers.ListMeta.Total,\n\t\t})\n\t}\n\tt.AddRow(map[string]interface{}{})\n\n\tt.Print()\n}\n\nfunc (s *ServiceList) ToJson() ([]byte, *e.Err) {\n\n\tif s == nil {\n\t\treturn []byte(\"[]\"), nil\n\t}\n\n\tbuf, err := json.Marshal(s)\n\tif err != nil {\n\t\treturn nil, e.New(\"service\").Unknown(err)\n\t}\n\n\treturn buf, nil\n}\n\nfunc (s *ServiceList) DrawTable(projectName string) {\n\tfmt.Print(\" Project \", projectName + \"\\n\\n\")\n\n\tfor _, s := range *s {\n\t\t\/\/tservice := table.New([]string{\"ID\", \"NAME\", \"PODS\"})\n\t\t\/\/tservice.VisibleHeader = true\n\t\t\/\/\n\t\t\/\/tservice.AddRow(map[string]interface{}{\n\t\t\/\/\t\"ID\": s.ID,\n\t\t\/\/\t\"NAME\": s.Name,\n\t\t\/\/\t\"PODS\": s.Spec.PodList.ListMeta.Total,\n\t\t\/\/})\n\t\t\/\/tservice.Print()\n\n\t\ttable.PrintHorizontal(map[string]interface{}{\n\t\t\t\"ID\": s.ID,\n\t\t\t\"NAME\": s.Name,\n\t\t\t\"PODS\": s.Spec.PodList.ListMeta.Total,\n\t\t})\n\n\t\tfor _, pod := range s.Spec.PodList.Pods {\n\t\t\ttpods := table.New([]string{\" \", \"NAME\", \"STATUS\", \"RESTARTS\", \"CONTAINERS\"})\n\t\t\ttpods.VisibleHeader = true\n\n\t\t\ttpods.AddRow(map[string]interface{}{\n\t\t\t\t\" \": \"\",\n\t\t\t\t\"NAME\": pod.ObjectMeta.Name,\n\t\t\t\t\"STATUS\": pod.PodStatus.PodPhase,\n\t\t\t\t\"RESTARTS\": pod.RestartCount,\n\t\t\t\t\"CONTAINERS\": pod.Containers.ListMeta.Total,\n\t\t\t})\n\t\t\ttpods.Print()\n\t\t}\n\n\t\tfmt.Print(\"\\n\\n\")\n\t}\n}\nDelete comments service cmd viewpackage model\n\nimport (\n\t\"encoding\/json\"\n\te \"github.com\/lastbackend\/lastbackend\/libs\/errors\"\n\t\"github.com\/lastbackend\/lastbackend\/pkg\/service\"\n\t\"github.com\/lastbackend\/lastbackend\/pkg\/util\/table\"\n\t\"time\"\n\t\"fmt\"\n)\n\ntype ServiceList []Service\n\ntype Service struct {\n\t\/\/ Service uuid, incremented automatically\n\tID string `json:\"id\" gorethink:\"id,omitempty\"`\n\t\/\/ Service user\n\tUser string `json:\"user\" gorethink:\"user,omitempty\"`\n\t\/\/ Service project\n\tProject string `json:\"project\" gorethink:\"project,omitempty\"`\n\t\/\/ Service image\n\tImage string `json:\"image\" gorethink:\"image,omitempty\"`\n\t\/\/ Service name\n\tName string `json:\"name\" gorethink:\"name,omitempty\"`\n\t\/\/ Service spec\n\tSpec *service.Service `json:\"spec,omitempty\" gorethink:\"-\"`\n\t\/\/ Service created time\n\tCreated time.Time `json:\"created\" gorethink:\"created,omitempty\"`\n\t\/\/ Service updated time\n\tUpdated time.Time `json:\"updated\" gorethink:\"updated,omitempty\"`\n}\n\nfunc (s *Service) ToJson() ([]byte, *e.Err) {\n\tbuf, err := json.Marshal(s)\n\tif err != nil {\n\t\treturn nil, e.New(\"service\").Unknown(err)\n\t}\n\n\treturn buf, nil\n}\n\nfunc (s *Service) DrawTable(projectName string) {\n\ttable.PrintHorizontal(map[string]interface{}{\n\t\t\"ID\": s.ID,\n\t\t\"NAME\": s.Name,\n\t\t\"PROJECT\": projectName,\n\t\t\"PODS\": s.Spec.PodList.ListMeta.Total,\n\t})\n\n\tt := table.New([]string{\" \", \"NAME\", \"STATUS\", \"RESTARTS\", \"CONTAINERS\"})\n\tt.VisibleHeader = true\n\n\tfor _, pod := range s.Spec.PodList.Pods {\n\t\tt.AddRow(map[string]interface{}{\n\t\t\t\" \": \"\",\n\t\t\t\"NAME\": pod.ObjectMeta.Name,\n\t\t\t\"STATUS\": pod.PodStatus.PodPhase,\n\t\t\t\"RESTARTS\": pod.RestartCount,\n\t\t\t\"CONTAINERS\": pod.Containers.ListMeta.Total,\n\t\t})\n\t}\n\tt.AddRow(map[string]interface{}{})\n\n\tt.Print()\n}\n\nfunc (s *ServiceList) ToJson() ([]byte, *e.Err) {\n\n\tif s == nil {\n\t\treturn []byte(\"[]\"), nil\n\t}\n\n\tbuf, err := json.Marshal(s)\n\tif err != nil {\n\t\treturn nil, e.New(\"service\").Unknown(err)\n\t}\n\n\treturn buf, nil\n}\n\nfunc (s *ServiceList) DrawTable(projectName string) {\n\tfmt.Print(\" Project \", projectName + \"\\n\\n\")\n\n\tfor _, s := range *s {\n\t\ttable.PrintHorizontal(map[string]interface{}{\n\t\t\t\"ID\": s.ID,\n\t\t\t\"NAME\": s.Name,\n\t\t\t\"PODS\": s.Spec.PodList.ListMeta.Total,\n\t\t})\n\n\t\tfor _, pod := range s.Spec.PodList.Pods {\n\t\t\ttpods := table.New([]string{\" \", \"NAME\", \"STATUS\", \"RESTARTS\", \"CONTAINERS\"})\n\t\t\ttpods.VisibleHeader = true\n\n\t\t\ttpods.AddRow(map[string]interface{}{\n\t\t\t\t\" \": \"\",\n\t\t\t\t\"NAME\": pod.ObjectMeta.Name,\n\t\t\t\t\"STATUS\": pod.PodStatus.PodPhase,\n\t\t\t\t\"RESTARTS\": pod.RestartCount,\n\t\t\t\t\"CONTAINERS\": pod.Containers.ListMeta.Total,\n\t\t\t})\n\t\t\ttpods.Print()\n\t\t}\n\n\t\tfmt.Print(\"\\n\\n\")\n\t}\n}\n<|endoftext|>"} {"text":"package router\n\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst GossipInterval = 30 * time.Second\n\ntype GossipData interface {\n\tEncode() []byte\n\tMerge(GossipData)\n}\n\ntype Gossip interface {\n\t\/\/ specific message from one peer to another\n\t\/\/ intermediate peers relay it using unicast topology.\n\tGossipUnicast(dstPeerName PeerName, msg []byte) error\n\t\/\/ send gossip to every peer, relayed using broadcast topology.\n\tGossipBroadcast(update GossipData) error\n}\n\ntype Gossiper interface {\n\tOnGossipUnicast(sender PeerName, msg []byte) error\n\t\/\/ merge received data into state and return a representation of\n\t\/\/ the received data, for further propagation\n\tOnGossipBroadcast(update []byte) (GossipData, error)\n\t\/\/ return state of everything we know; gets called periodically\n\tGossip() GossipData\n\t\/\/ merge received data into state and return \"everything new I've\n\t\/\/ just learnt\", or nil if nothing in the received data was new\n\tOnGossip(update []byte) (GossipData, error)\n}\n\n\/\/ Accumulates GossipData that needs to be sent to one destination,\n\/\/ and sends it when possible.\ntype GossipSender struct {\n\tsend func(GossipData)\n\tcell chan GossipData\n\t\/\/ for testing\n\tsent bool\n\tflushch chan chan bool\n}\n\nfunc NewGossipSender(send func(GossipData)) *GossipSender {\n\treturn &GossipSender{send: send}\n}\n\nfunc (sender *GossipSender) Start() {\n\tsender.cell = make(chan GossipData, 1)\n\tsender.flushch = make(chan chan bool)\n\tgo sender.run()\n}\n\nfunc (sender *GossipSender) run() {\n\tfor {\n\t\tselect {\n\t\tcase pending := <-sender.cell:\n\t\t\tif pending == nil { \/\/ receive zero value when chan is closed\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsender.send(pending)\n\t\t\tsender.sent = true\n\t\tcase ch := <-sender.flushch:\n\t\t\t\/\/ send anything pending, then reply back whether we sent\n\t\t\t\/\/ anything since previous flush\n\t\t\tselect {\n\t\t\tcase pending := <-sender.cell:\n\t\t\t\tsender.send(pending)\n\t\t\t\tsender.sent = true\n\t\t\tdefault:\n\t\t\t}\n\t\t\tch <- sender.sent\n\t\t\tsender.sent = false\n\t\t}\n\t}\n}\n\nfunc (sender *GossipSender) Send(data GossipData) {\n\t\/\/ NB: this must not be invoked concurrently\n\tselect {\n\tcase pending := <-sender.cell:\n\t\tpending.Merge(data)\n\t\tsender.cell <- pending\n\tdefault:\n\t\tsender.cell <- data\n\t}\n}\n\nfunc (sender *GossipSender) Stop() {\n\tclose(sender.cell)\n}\n\ntype connectionSenders map[Connection]*GossipSender\ntype peerSenders map[PeerName]*GossipSender\n\ntype GossipChannel struct {\n\tsync.Mutex\n\tourself *LocalPeer\n\troutes *Routes\n\tname string\n\tgossiper Gossiper\n\tsenders connectionSenders\n\tbroadcasters peerSenders\n}\n\ntype GossipChannels map[string]*GossipChannel\n\nfunc (router *Router) NewGossip(channelName string, g Gossiper) Gossip {\n\tchannel := &GossipChannel{\n\t\tourself: router.Ourself,\n\t\troutes: router.Routes,\n\t\tname: channelName,\n\t\tgossiper: g,\n\t\tsenders: make(connectionSenders),\n\t\tbroadcasters: make(peerSenders)}\n\trouter.GossipChannels[channelName] = channel\n\treturn channel\n}\n\nfunc (router *Router) SendAllGossip() {\n\tfor _, channel := range router.GossipChannels {\n\t\tif gossip := channel.gossiper.Gossip(); gossip != nil {\n\t\t\tchannel.Send(router.Ourself.Name, gossip)\n\t\t}\n\t}\n}\n\nfunc (router *Router) SendAllGossipDown(conn Connection) {\n\tfor _, channel := range router.GossipChannels {\n\t\tif gossip := channel.gossiper.Gossip(); gossip != nil {\n\t\t\tchannel.SendDown(conn, channel.gossiper.Gossip())\n\t\t}\n\t}\n}\n\nfunc (router *Router) handleGossip(tag ProtocolTag, payload []byte) error {\n\tdecoder := gob.NewDecoder(bytes.NewReader(payload))\n\tvar channelName string\n\tif err := decoder.Decode(&channelName); err != nil {\n\t\treturn err\n\t}\n\tchannel, found := router.GossipChannels[channelName]\n\tif !found {\n\t\treturn fmt.Errorf(\"[gossip] received unknown channel with name %s\", channelName)\n\t}\n\tvar srcName PeerName\n\tif err := decoder.Decode(&srcName); err != nil {\n\t\treturn err\n\t}\n\tswitch tag {\n\tcase ProtocolGossipUnicast:\n\t\treturn channel.deliverUnicast(srcName, payload, decoder)\n\tcase ProtocolGossipBroadcast:\n\t\treturn channel.deliverBroadcast(srcName, payload, decoder)\n\tcase ProtocolGossip:\n\t\treturn channel.deliver(srcName, payload, decoder)\n\t}\n\treturn nil\n}\n\nfunc (c *GossipChannel) deliverUnicast(srcName PeerName, origPayload []byte, dec *gob.Decoder) error {\n\tvar destName PeerName\n\tif err := dec.Decode(&destName); err != nil {\n\t\treturn err\n\t}\n\tif c.ourself.Name != destName {\n\t\treturn c.relayUnicast(destName, origPayload)\n\t}\n\tvar payload []byte\n\tif err := dec.Decode(&payload); err != nil {\n\t\treturn err\n\t}\n\treturn c.gossiper.OnGossipUnicast(srcName, payload)\n}\n\nfunc (c *GossipChannel) deliverBroadcast(srcName PeerName, _ []byte, dec *gob.Decoder) error {\n\tvar payload []byte\n\tif err := dec.Decode(&payload); err != nil {\n\t\treturn err\n\t}\n\tdata, err := c.gossiper.OnGossipBroadcast(payload)\n\tif err != nil || data == nil {\n\t\treturn err\n\t}\n\treturn c.relayBroadcast(srcName, data)\n}\n\nfunc (c *GossipChannel) deliver(srcName PeerName, _ []byte, dec *gob.Decoder) error {\n\tvar payload []byte\n\tif err := dec.Decode(&payload); err != nil {\n\t\treturn err\n\t}\n\tif data, err := c.gossiper.OnGossip(payload); err != nil {\n\t\treturn err\n\t} else if data != nil {\n\t\tc.Send(srcName, data)\n\t}\n\treturn nil\n}\n\nfunc (c *GossipChannel) Send(srcName PeerName, data GossipData) {\n\t\/\/ do this outside the lock below so we avoid lock nesting\n\tc.routes.EnsureRecalculated()\n\tselectedConnections := make(ConnectionSet)\n\tfor name := range c.routes.RandomNeighbours(srcName) {\n\t\tif conn, found := c.ourself.ConnectionTo(name); found {\n\t\t\tselectedConnections[conn] = void\n\t\t}\n\t}\n\tif len(selectedConnections) == 0 {\n\t\treturn\n\t}\n\tconnections := c.ourself.Connections()\n\tc.Lock()\n\tdefer c.Unlock()\n\t\/\/ GC - randomly (courtesy of go's map iterator) pick some\n\t\/\/ existing entries and stop&remove them if the associated\n\t\/\/ connection is no longer active. We stop as soon as we\n\t\/\/ encounter a valid entry; the idea being that when there is\n\t\/\/ little or no garbage then this executes close to O(1)[1],\n\t\/\/ whereas when there is lots of garbage we remove it quickly.\n\t\/\/\n\t\/\/ [1] TODO Unfortunately, due to the desire to avoid nested\n\t\/\/ locks, instead of simply invoking Peer.ConnectionTo(name)\n\t\/\/ below, we have that Peer.Connections() invocation above. That\n\t\/\/ is O(n_our_connections) at best.\n\tfor conn, sender := range c.senders {\n\t\tif _, found := connections[conn]; !found {\n\t\t\tdelete(c.senders, conn)\n\t\t\tsender.Stop()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tfor conn := range selectedConnections {\n\t\tc.sendDown(conn, data)\n\t}\n}\n\nfunc (c *GossipChannel) SendDown(conn Connection, data GossipData) {\n\tc.Lock()\n\tc.sendDown(conn, data)\n\tc.Unlock()\n}\n\nfunc (c *GossipChannel) sendDown(conn Connection, data GossipData) {\n\tsender, found := c.senders[conn]\n\tif !found {\n\t\tsender = NewGossipSender(func(pending GossipData) {\n\t\t\tprotocolMsg := ProtocolMsg{ProtocolGossip, GobEncode(c.name, c.ourself.Name, pending.Encode())}\n\t\t\tconn.(ProtocolSender).SendProtocolMsg(protocolMsg)\n\t\t})\n\t\tc.senders[conn] = sender\n\t\tsender.Start()\n\t}\n\tsender.Send(data)\n}\n\nfunc (c *GossipChannel) GossipUnicast(dstPeerName PeerName, msg []byte) error {\n\treturn c.relayUnicast(dstPeerName, GobEncode(c.name, c.ourself.Name, dstPeerName, msg))\n}\n\nfunc (c *GossipChannel) GossipBroadcast(update GossipData) error {\n\treturn c.relayBroadcast(c.ourself.Name, update)\n}\n\nfunc (c *GossipChannel) relayUnicast(dstPeerName PeerName, buf []byte) error {\n\tif relayPeerName, found := c.routes.UnicastAll(dstPeerName); !found {\n\t\tc.log(\"unknown relay destination:\", dstPeerName)\n\t} else if conn, found := c.ourself.ConnectionTo(relayPeerName); !found {\n\t\tc.log(\"unable to find connection to relay peer\", relayPeerName)\n\t} else {\n\t\tconn.(ProtocolSender).SendProtocolMsg(ProtocolMsg{ProtocolGossipUnicast, buf})\n\t}\n\treturn nil\n}\n\nfunc (c *GossipChannel) relayBroadcast(srcName PeerName, update GossipData) error {\n\tnames := c.routes.PeerNames() \/\/ do this outside the lock so they don't nest\n\tc.Lock()\n\tdefer c.Unlock()\n\t\/\/ GC - randomly (courtesy of go's map iterator) pick some\n\t\/\/ existing broadcasters and stop&remove them if their source peer\n\t\/\/ is unknown. We stop as soon as we encounter a valid entry; the\n\t\/\/ idea being that when there is little or no garbage then this\n\t\/\/ executes close to O(1)[1], whereas when there is lots of\n\t\/\/ garbage we remove it quickly.\n\t\/\/\n\t\/\/ [1] TODO Unfortunately, due to the desire to avoid nested\n\t\/\/ locks, instead of simply invoking Peers.Fetch(name) below, we\n\t\/\/ have that Peers.Names() invocation above. That is O(n_peers) at\n\t\/\/ best.\n\tfor name, broadcaster := range c.broadcasters {\n\t\tif _, found := names[name]; !found {\n\t\t\tdelete(c.broadcasters, name)\n\t\t\tbroadcaster.Stop()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tbroadcaster, found := c.broadcasters[srcName]\n\tif !found {\n\t\tbroadcaster = NewGossipSender(func(pending GossipData) { c.sendBroadcast(srcName, pending) })\n\t\tc.broadcasters[srcName] = broadcaster\n\t\tbroadcaster.Start()\n\t}\n\tbroadcaster.Send(update)\n\treturn nil\n}\n\nfunc (c *GossipChannel) sendBroadcast(srcName PeerName, update GossipData) {\n\tc.routes.EnsureRecalculated()\n\tnextHops := c.routes.BroadcastAll(srcName)\n\tif len(nextHops) == 0 {\n\t\treturn\n\t}\n\tprotocolMsg := ProtocolMsg{ProtocolGossipBroadcast, GobEncode(c.name, srcName, update.Encode())}\n\t\/\/ FIXME a single blocked connection can stall us\n\tfor _, conn := range c.ourself.ConnectionsTo(nextHops) {\n\t\tconn.(ProtocolSender).SendProtocolMsg(protocolMsg)\n\t}\n}\n\nfunc (c *GossipChannel) log(args ...interface{}) {\n\tlog.Println(append(append([]interface{}{}, \"[gossip \"+c.name+\"]:\"), args...)...)\n}\n\n\/\/ for testing\n\nfunc (router *Router) sendPendingGossip() bool {\n\tsentSomething := false\n\tfor _, channel := range router.GossipChannels {\n\t\tchannel.Lock()\n\t\tfor _, sender := range channel.senders {\n\t\t\tsentSomething = sender.flush() || sentSomething\n\t\t}\n\t\tfor _, sender := range channel.broadcasters {\n\t\t\tsentSomething = sender.flush() || sentSomething\n\t\t}\n\t\tchannel.Unlock()\n\t}\n\treturn sentSomething\n}\n\nfunc (sender *GossipSender) flush() bool {\n\tch := make(chan bool)\n\tsender.flushch <- ch\n\treturn <-ch\n}\nrefactor: introduce NewGossipChannelpackage router\n\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst GossipInterval = 30 * time.Second\n\ntype GossipData interface {\n\tEncode() []byte\n\tMerge(GossipData)\n}\n\ntype Gossip interface {\n\t\/\/ specific message from one peer to another\n\t\/\/ intermediate peers relay it using unicast topology.\n\tGossipUnicast(dstPeerName PeerName, msg []byte) error\n\t\/\/ send gossip to every peer, relayed using broadcast topology.\n\tGossipBroadcast(update GossipData) error\n}\n\ntype Gossiper interface {\n\tOnGossipUnicast(sender PeerName, msg []byte) error\n\t\/\/ merge received data into state and return a representation of\n\t\/\/ the received data, for further propagation\n\tOnGossipBroadcast(update []byte) (GossipData, error)\n\t\/\/ return state of everything we know; gets called periodically\n\tGossip() GossipData\n\t\/\/ merge received data into state and return \"everything new I've\n\t\/\/ just learnt\", or nil if nothing in the received data was new\n\tOnGossip(update []byte) (GossipData, error)\n}\n\n\/\/ Accumulates GossipData that needs to be sent to one destination,\n\/\/ and sends it when possible.\ntype GossipSender struct {\n\tsend func(GossipData)\n\tcell chan GossipData\n\t\/\/ for testing\n\tsent bool\n\tflushch chan chan bool\n}\n\nfunc NewGossipSender(send func(GossipData)) *GossipSender {\n\treturn &GossipSender{send: send}\n}\n\nfunc (sender *GossipSender) Start() {\n\tsender.cell = make(chan GossipData, 1)\n\tsender.flushch = make(chan chan bool)\n\tgo sender.run()\n}\n\nfunc (sender *GossipSender) run() {\n\tfor {\n\t\tselect {\n\t\tcase pending := <-sender.cell:\n\t\t\tif pending == nil { \/\/ receive zero value when chan is closed\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsender.send(pending)\n\t\t\tsender.sent = true\n\t\tcase ch := <-sender.flushch:\n\t\t\t\/\/ send anything pending, then reply back whether we sent\n\t\t\t\/\/ anything since previous flush\n\t\t\tselect {\n\t\t\tcase pending := <-sender.cell:\n\t\t\t\tsender.send(pending)\n\t\t\t\tsender.sent = true\n\t\t\tdefault:\n\t\t\t}\n\t\t\tch <- sender.sent\n\t\t\tsender.sent = false\n\t\t}\n\t}\n}\n\nfunc (sender *GossipSender) Send(data GossipData) {\n\t\/\/ NB: this must not be invoked concurrently\n\tselect {\n\tcase pending := <-sender.cell:\n\t\tpending.Merge(data)\n\t\tsender.cell <- pending\n\tdefault:\n\t\tsender.cell <- data\n\t}\n}\n\nfunc (sender *GossipSender) Stop() {\n\tclose(sender.cell)\n}\n\ntype connectionSenders map[Connection]*GossipSender\ntype peerSenders map[PeerName]*GossipSender\n\ntype GossipChannel struct {\n\tsync.Mutex\n\tname string\n\tourself *LocalPeer\n\troutes *Routes\n\tgossiper Gossiper\n\tsenders connectionSenders\n\tbroadcasters peerSenders\n}\n\ntype GossipChannels map[string]*GossipChannel\n\nfunc NewGossipChannel(channelName string, ourself *LocalPeer, routes *Routes, g Gossiper) *GossipChannel {\n\treturn &GossipChannel{\n\t\tname: channelName,\n\t\tourself: ourself,\n\t\troutes: routes,\n\t\tgossiper: g,\n\t\tsenders: make(connectionSenders),\n\t\tbroadcasters: make(peerSenders)}\n}\n\nfunc (router *Router) NewGossip(channelName string, g Gossiper) Gossip {\n\tchannel := NewGossipChannel(channelName, router.Ourself, router.Routes, g)\n\trouter.GossipChannels[channelName] = channel\n\treturn channel\n}\n\nfunc (router *Router) SendAllGossip() {\n\tfor _, channel := range router.GossipChannels {\n\t\tif gossip := channel.gossiper.Gossip(); gossip != nil {\n\t\t\tchannel.Send(router.Ourself.Name, gossip)\n\t\t}\n\t}\n}\n\nfunc (router *Router) SendAllGossipDown(conn Connection) {\n\tfor _, channel := range router.GossipChannels {\n\t\tif gossip := channel.gossiper.Gossip(); gossip != nil {\n\t\t\tchannel.SendDown(conn, channel.gossiper.Gossip())\n\t\t}\n\t}\n}\n\nfunc (router *Router) handleGossip(tag ProtocolTag, payload []byte) error {\n\tdecoder := gob.NewDecoder(bytes.NewReader(payload))\n\tvar channelName string\n\tif err := decoder.Decode(&channelName); err != nil {\n\t\treturn err\n\t}\n\tchannel, found := router.GossipChannels[channelName]\n\tif !found {\n\t\treturn fmt.Errorf(\"[gossip] received unknown channel with name %s\", channelName)\n\t}\n\tvar srcName PeerName\n\tif err := decoder.Decode(&srcName); err != nil {\n\t\treturn err\n\t}\n\tswitch tag {\n\tcase ProtocolGossipUnicast:\n\t\treturn channel.deliverUnicast(srcName, payload, decoder)\n\tcase ProtocolGossipBroadcast:\n\t\treturn channel.deliverBroadcast(srcName, payload, decoder)\n\tcase ProtocolGossip:\n\t\treturn channel.deliver(srcName, payload, decoder)\n\t}\n\treturn nil\n}\n\nfunc (c *GossipChannel) deliverUnicast(srcName PeerName, origPayload []byte, dec *gob.Decoder) error {\n\tvar destName PeerName\n\tif err := dec.Decode(&destName); err != nil {\n\t\treturn err\n\t}\n\tif c.ourself.Name != destName {\n\t\treturn c.relayUnicast(destName, origPayload)\n\t}\n\tvar payload []byte\n\tif err := dec.Decode(&payload); err != nil {\n\t\treturn err\n\t}\n\treturn c.gossiper.OnGossipUnicast(srcName, payload)\n}\n\nfunc (c *GossipChannel) deliverBroadcast(srcName PeerName, _ []byte, dec *gob.Decoder) error {\n\tvar payload []byte\n\tif err := dec.Decode(&payload); err != nil {\n\t\treturn err\n\t}\n\tdata, err := c.gossiper.OnGossipBroadcast(payload)\n\tif err != nil || data == nil {\n\t\treturn err\n\t}\n\treturn c.relayBroadcast(srcName, data)\n}\n\nfunc (c *GossipChannel) deliver(srcName PeerName, _ []byte, dec *gob.Decoder) error {\n\tvar payload []byte\n\tif err := dec.Decode(&payload); err != nil {\n\t\treturn err\n\t}\n\tif data, err := c.gossiper.OnGossip(payload); err != nil {\n\t\treturn err\n\t} else if data != nil {\n\t\tc.Send(srcName, data)\n\t}\n\treturn nil\n}\n\nfunc (c *GossipChannel) Send(srcName PeerName, data GossipData) {\n\t\/\/ do this outside the lock below so we avoid lock nesting\n\tc.routes.EnsureRecalculated()\n\tselectedConnections := make(ConnectionSet)\n\tfor name := range c.routes.RandomNeighbours(srcName) {\n\t\tif conn, found := c.ourself.ConnectionTo(name); found {\n\t\t\tselectedConnections[conn] = void\n\t\t}\n\t}\n\tif len(selectedConnections) == 0 {\n\t\treturn\n\t}\n\tconnections := c.ourself.Connections()\n\tc.Lock()\n\tdefer c.Unlock()\n\t\/\/ GC - randomly (courtesy of go's map iterator) pick some\n\t\/\/ existing entries and stop&remove them if the associated\n\t\/\/ connection is no longer active. We stop as soon as we\n\t\/\/ encounter a valid entry; the idea being that when there is\n\t\/\/ little or no garbage then this executes close to O(1)[1],\n\t\/\/ whereas when there is lots of garbage we remove it quickly.\n\t\/\/\n\t\/\/ [1] TODO Unfortunately, due to the desire to avoid nested\n\t\/\/ locks, instead of simply invoking Peer.ConnectionTo(name)\n\t\/\/ below, we have that Peer.Connections() invocation above. That\n\t\/\/ is O(n_our_connections) at best.\n\tfor conn, sender := range c.senders {\n\t\tif _, found := connections[conn]; !found {\n\t\t\tdelete(c.senders, conn)\n\t\t\tsender.Stop()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tfor conn := range selectedConnections {\n\t\tc.sendDown(conn, data)\n\t}\n}\n\nfunc (c *GossipChannel) SendDown(conn Connection, data GossipData) {\n\tc.Lock()\n\tc.sendDown(conn, data)\n\tc.Unlock()\n}\n\nfunc (c *GossipChannel) sendDown(conn Connection, data GossipData) {\n\tsender, found := c.senders[conn]\n\tif !found {\n\t\tsender = NewGossipSender(func(pending GossipData) {\n\t\t\tprotocolMsg := ProtocolMsg{ProtocolGossip, GobEncode(c.name, c.ourself.Name, pending.Encode())}\n\t\t\tconn.(ProtocolSender).SendProtocolMsg(protocolMsg)\n\t\t})\n\t\tc.senders[conn] = sender\n\t\tsender.Start()\n\t}\n\tsender.Send(data)\n}\n\nfunc (c *GossipChannel) GossipUnicast(dstPeerName PeerName, msg []byte) error {\n\treturn c.relayUnicast(dstPeerName, GobEncode(c.name, c.ourself.Name, dstPeerName, msg))\n}\n\nfunc (c *GossipChannel) GossipBroadcast(update GossipData) error {\n\treturn c.relayBroadcast(c.ourself.Name, update)\n}\n\nfunc (c *GossipChannel) relayUnicast(dstPeerName PeerName, buf []byte) error {\n\tif relayPeerName, found := c.routes.UnicastAll(dstPeerName); !found {\n\t\tc.log(\"unknown relay destination:\", dstPeerName)\n\t} else if conn, found := c.ourself.ConnectionTo(relayPeerName); !found {\n\t\tc.log(\"unable to find connection to relay peer\", relayPeerName)\n\t} else {\n\t\tconn.(ProtocolSender).SendProtocolMsg(ProtocolMsg{ProtocolGossipUnicast, buf})\n\t}\n\treturn nil\n}\n\nfunc (c *GossipChannel) relayBroadcast(srcName PeerName, update GossipData) error {\n\tnames := c.routes.PeerNames() \/\/ do this outside the lock so they don't nest\n\tc.Lock()\n\tdefer c.Unlock()\n\t\/\/ GC - randomly (courtesy of go's map iterator) pick some\n\t\/\/ existing broadcasters and stop&remove them if their source peer\n\t\/\/ is unknown. We stop as soon as we encounter a valid entry; the\n\t\/\/ idea being that when there is little or no garbage then this\n\t\/\/ executes close to O(1)[1], whereas when there is lots of\n\t\/\/ garbage we remove it quickly.\n\t\/\/\n\t\/\/ [1] TODO Unfortunately, due to the desire to avoid nested\n\t\/\/ locks, instead of simply invoking Peers.Fetch(name) below, we\n\t\/\/ have that Peers.Names() invocation above. That is O(n_peers) at\n\t\/\/ best.\n\tfor name, broadcaster := range c.broadcasters {\n\t\tif _, found := names[name]; !found {\n\t\t\tdelete(c.broadcasters, name)\n\t\t\tbroadcaster.Stop()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tbroadcaster, found := c.broadcasters[srcName]\n\tif !found {\n\t\tbroadcaster = NewGossipSender(func(pending GossipData) { c.sendBroadcast(srcName, pending) })\n\t\tc.broadcasters[srcName] = broadcaster\n\t\tbroadcaster.Start()\n\t}\n\tbroadcaster.Send(update)\n\treturn nil\n}\n\nfunc (c *GossipChannel) sendBroadcast(srcName PeerName, update GossipData) {\n\tc.routes.EnsureRecalculated()\n\tnextHops := c.routes.BroadcastAll(srcName)\n\tif len(nextHops) == 0 {\n\t\treturn\n\t}\n\tprotocolMsg := ProtocolMsg{ProtocolGossipBroadcast, GobEncode(c.name, srcName, update.Encode())}\n\t\/\/ FIXME a single blocked connection can stall us\n\tfor _, conn := range c.ourself.ConnectionsTo(nextHops) {\n\t\tconn.(ProtocolSender).SendProtocolMsg(protocolMsg)\n\t}\n}\n\nfunc (c *GossipChannel) log(args ...interface{}) {\n\tlog.Println(append(append([]interface{}{}, \"[gossip \"+c.name+\"]:\"), args...)...)\n}\n\n\/\/ for testing\n\nfunc (router *Router) sendPendingGossip() bool {\n\tsentSomething := false\n\tfor _, channel := range router.GossipChannels {\n\t\tchannel.Lock()\n\t\tfor _, sender := range channel.senders {\n\t\t\tsentSomething = sender.flush() || sentSomething\n\t\t}\n\t\tfor _, sender := range channel.broadcasters {\n\t\t\tsentSomething = sender.flush() || sentSomething\n\t\t}\n\t\tchannel.Unlock()\n\t}\n\treturn sentSomething\n}\n\nfunc (sender *GossipSender) flush() bool {\n\tch := make(chan bool)\n\tsender.flushch <- ch\n\treturn <-ch\n}\n<|endoftext|>"} {"text":"package main\n\nimport \"fmt\"\nimport \"io\"\nimport \"os\"\nimport \"time\"\n\n\/*\n[B10]: ARIB-STD B10\n[ISO]: ISO\/IEC 13818-1\n*\/\n\nconst TS_PACKET_SIZE = 188\n\ntype AnalyzerState struct {\n\tpmtPids map[int]bool\n\tpcrPid int\n\tcaptionPid int\n\tcurrentTimestamp SystemClock\n\tclockOffset int64\n}\n\ntype SystemClock int64\n\nfunc main() {\n\tif len(os.Args) == 1 {\n\t\tfmt.Fprintf(os.Stderr, \"usage: %s MPEG2-TS-FILE\\n\", os.Args[0])\n\t\tos.Exit(1)\n\t}\n\tfin, err := os.Open(os.Args[1])\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer func() {\n\t\tif err := fin.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\tbuf := make([]byte, TS_PACKET_SIZE)\n\tstate := new(AnalyzerState)\n\tstate.pcrPid = -1\n\tstate.captionPid = -1\n\n\tfor {\n\t\tn, err := fin.Read(buf)\n\t\tif err != nil && err != io.EOF {\n\t\t\tpanic(err)\n\t\t}\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tanalyzePacket(buf, state)\n\t}\n}\n\nfunc assertSyncByte(packet []byte) {\n\tif packet[0] != 0x47 {\n\t\tpanic(\"sync_byte failed\")\n\t}\n}\n\nfunc analyzePacket(packet []byte, state *AnalyzerState) {\n\tassertSyncByte(packet)\n\n\tpayload_unit_start_indicator := (packet[1] & 0x40) != 0\n\tpid := int(packet[1]&0x1f)<<8 | int(packet[2])\n\thasAdaptation := (packet[3] & 0x20) != 0\n\thasPayload := (packet[3] & 0x10) != 0\n\tp := packet[4:]\n\n\tif hasAdaptation {\n\t\t\/\/ [ISO] 2.4.3.4\n\t\t\/\/ Table 2-6\n\t\tadaptation_field_length := p[0]\n\t\tp = p[1:]\n\t\tpcr_flag := (p[0] & 0x10) != 0\n\t\tif pcr_flag && pid == state.pcrPid {\n\t\t\tstate.currentTimestamp = extractPcr(p)\n\t\t}\n\t\tp = p[adaptation_field_length:]\n\t}\n\n\tif hasPayload {\n\t\tif pid == 0 {\n\t\t\tif len(state.pmtPids) == 0 {\n\t\t\t\tstate.pmtPids = extractPmtPids(p[1:])\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Found %d pids: %v\\n\", len(state.pmtPids), state.pmtPids)\n\t\t\t}\n\t\t} else if state.pmtPids != nil && state.pmtPids[pid] {\n\t\t\tif state.captionPid == -1 && payload_unit_start_indicator {\n\t\t\t\t\/\/ PMT section\n\t\t\t\tpcrPid := extractPcrPid(p[1:])\n\t\t\t\tcaptionPid := extractCaptionPid(p[1:])\n\t\t\t\tif captionPid != -1 {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"caption pid = %d, PCR_PID = %d\\n\", captionPid, pcrPid)\n\t\t\t\t\tstate.pcrPid = pcrPid\n\t\t\t\t\tstate.captionPid = captionPid\n\t\t\t\t}\n\t\t\t}\n\t\t} else if pid == 0x0014 {\n\t\t\t\/\/ Time Offset Table\n\t\t\t\/\/ [B10] 5.2.9\n\t\t\tt := extractJstTime(p[1:])\n\t\t\tif t != 0 {\n\t\t\t\tstate.clockOffset = t*100 - state.currentTimestamp.centitime()\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc extractPmtPids(payload []byte) map[int]bool {\n\t\/\/ [ISO] 2.4.4.3\n\t\/\/ Table 2-25\n\ttable_id := payload[0]\n\tpids := make(map[int]bool)\n\tif table_id != 0x00 {\n\t\treturn pids\n\t}\n\tsection_length := int(payload[1]&0x0F)<<8 | int(payload[2])\n\tindex := 8\n\tfor index < 3+section_length-4 {\n\t\tprogram_number := int(payload[index+0])<<8 | int(payload[index+1])\n\t\tif program_number != 0 {\n\t\t\tprogram_map_PID := int(payload[index+2]&0x1F)<<8 | int(payload[index+3])\n\t\t\tpids[program_map_PID] = true\n\t\t}\n\t\tindex += 4\n\t}\n\treturn pids\n}\n\nfunc extractPcrPid(payload []byte) int {\n\treturn (int(payload[8]&0x1f) << 8) | int(payload[9])\n}\n\nfunc extractCaptionPid(payload []byte) int {\n\t\/\/ [ISO] 2.4.4.8 Program Map Table\n\t\/\/ Table 2-28\n\ttable_id := payload[0]\n\tif table_id != 0x02 {\n\t\treturn -1\n\t}\n\tsection_length := int(payload[1]&0x0F)<<8 | int(payload[2])\n\tif section_length >= len(payload) {\n\t\treturn -1\n\t}\n\n\tprogram_info_length := int(payload[10]&0x0F)<<8 | int(payload[11])\n\tindex := 12 + program_info_length\n\n\tfor index < 3+section_length-4 {\n\t\tstream_type := payload[index+0]\n\t\tES_info_length := int(payload[index+3]&0xF)<<8 | int(payload[index+4])\n\t\tif stream_type == 0x06 {\n\t\t\telementary_PID := int(payload[index+1]&0x1F)<<8 | int(payload[index+2])\n\t\t\tsubIndex := index + 5\n\t\t\tfor subIndex < index+ES_info_length {\n\t\t\t\t\/\/ [ISO] 2.6 Program and program element descriptors\n\t\t\t\tdescriptor_tag := payload[subIndex+0]\n\t\t\t\tdescriptor_length := int(payload[subIndex+1])\n\t\t\t\tif descriptor_tag == 0x52 {\n\t\t\t\t\t\/\/ [B10] 6.2.16 Stream identifier descriptor\n\t\t\t\t\t\/\/ 表 6-28\n\t\t\t\t\tcomponent_tag := payload[subIndex+2]\n\t\t\t\t\tif component_tag == 0x87 {\n\t\t\t\t\t\treturn elementary_PID\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tsubIndex += 2 + descriptor_length\n\t\t\t}\n\t\t}\n\t\tindex += 5 + ES_info_length\n\t}\n\treturn -1\n}\n\nfunc extractPcr(payload []byte) SystemClock {\n\tpcr_base := (int64(payload[1]) << 25) |\n\t\t(int64(payload[2]) << 17) |\n\t\t(int64(payload[3]) << 9) |\n\t\t(int64(payload[4]) << 1) |\n\t\t(int64(payload[5]&0x80) >> 7)\n\tpcr_ext := (int64(payload[5] & 0x01)) | int64(payload[6])\n\t\/\/ [ISO] 2.4.2.2\n\treturn SystemClock(pcr_base*300 + pcr_ext)\n}\n\nfunc extractJstTime(payload []byte) int64 {\n\tif payload[0] != 0x73 {\n\t\treturn 0\n\t}\n\n\t\/\/ [B10] Appendix C\n\tMJD := (int(payload[3]) << 8) | int(payload[4])\n\ty := int((float64(MJD) - 15078.2) \/ 365.25)\n\tm := int((float64(MJD) - 14956.1 - float64(int(float64(y)*365.25))) \/ 30.6001)\n\tk := 0\n\tif m == 14 || m == 15 {\n\t\tk = 1\n\t}\n\tyear := y + k + 1900\n\tmonth := m - 2 - k*12\n\tday := MJD - 14956 - int(float64(y)*365.25) - int(float64(m)*30.6001)\n\thour := decodeBcd(payload[5])\n\tminute := decodeBcd(payload[6])\n\tsecond := decodeBcd(payload[7])\n\n\tstr := fmt.Sprintf(\"%d-%02d-%02dT%02d:%02d:%02d+09:00\", year, month, day, hour, minute, second)\n\tt, err := time.Parse(time.RFC3339, str)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn t.Unix()\n}\n\nfunc decodeBcd(n byte) int {\n\treturn (int(n)>>4)*10 + int(n&0x0f)\n}\n\nconst K int64 = 27000000\n\nfunc (clock SystemClock) centitime() int64 {\n\treturn int64(clock) \/ (K \/ 100)\n}\nPrint subtitlepackage main\n\nimport \"fmt\"\nimport \"io\"\nimport \"os\"\nimport \"time\"\n\n\/*\n[B10]: ARIB-STD B10\n[ISO]: ISO\/IEC 13818-1\n*\/\n\nconst TS_PACKET_SIZE = 188\n\ntype AnalyzerState struct {\n\tpmtPids map[int]bool\n\tpcrPid int\n\tcaptionPid int\n\tcurrentTimestamp SystemClock\n\tclockOffset int64\n\tpreviousSubtitle string\n\tpreviousIsBlank bool\n\tpreviousTimestamp SystemClock\n\tpreludePrinted bool\n}\n\ntype SystemClock int64\n\nfunc main() {\n\tif len(os.Args) == 1 {\n\t\tfmt.Fprintf(os.Stderr, \"usage: %s MPEG2-TS-FILE\\n\", os.Args[0])\n\t\tos.Exit(1)\n\t}\n\tfin, err := os.Open(os.Args[1])\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer func() {\n\t\tif err := fin.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\tbuf := make([]byte, TS_PACKET_SIZE)\n\tstate := new(AnalyzerState)\n\tstate.pcrPid = -1\n\tstate.captionPid = -1\n\n\tfor {\n\t\tn, err := fin.Read(buf)\n\t\tif err != nil && err != io.EOF {\n\t\t\tpanic(err)\n\t\t}\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tanalyzePacket(buf, state)\n\t}\n}\n\nfunc assertSyncByte(packet []byte) {\n\tif packet[0] != 0x47 {\n\t\tpanic(\"sync_byte failed\")\n\t}\n}\n\nfunc analyzePacket(packet []byte, state *AnalyzerState) {\n\tassertSyncByte(packet)\n\n\tpayload_unit_start_indicator := (packet[1] & 0x40) != 0\n\tpid := int(packet[1]&0x1f)<<8 | int(packet[2])\n\thasAdaptation := (packet[3] & 0x20) != 0\n\thasPayload := (packet[3] & 0x10) != 0\n\tp := packet[4:]\n\n\tif hasAdaptation {\n\t\t\/\/ [ISO] 2.4.3.4\n\t\t\/\/ Table 2-6\n\t\tadaptation_field_length := p[0]\n\t\tp = p[1:]\n\t\tpcr_flag := (p[0] & 0x10) != 0\n\t\tif pcr_flag && pid == state.pcrPid {\n\t\t\tstate.currentTimestamp = extractPcr(p)\n\t\t}\n\t\tp = p[adaptation_field_length:]\n\t}\n\n\tif hasPayload {\n\t\tif pid == 0 {\n\t\t\tif len(state.pmtPids) == 0 {\n\t\t\t\tstate.pmtPids = extractPmtPids(p[1:])\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Found %d pids: %v\\n\", len(state.pmtPids), state.pmtPids)\n\t\t\t}\n\t\t} else if state.pmtPids != nil && state.pmtPids[pid] {\n\t\t\tif state.captionPid == -1 && payload_unit_start_indicator {\n\t\t\t\t\/\/ PMT section\n\t\t\t\tpcrPid := extractPcrPid(p[1:])\n\t\t\t\tcaptionPid := extractCaptionPid(p[1:])\n\t\t\t\tif captionPid != -1 {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"caption pid = %d, PCR_PID = %d\\n\", captionPid, pcrPid)\n\t\t\t\t\tstate.pcrPid = pcrPid\n\t\t\t\t\tstate.captionPid = captionPid\n\t\t\t\t}\n\t\t\t}\n\t\t} else if pid == 0x0014 {\n\t\t\t\/\/ Time Offset Table\n\t\t\t\/\/ [B10] 5.2.9\n\t\t\tt := extractJstTime(p[1:])\n\t\t\tif t != 0 {\n\t\t\t\tstate.clockOffset = t*100 - state.currentTimestamp.centitime()\n\t\t\t}\n\t\t} else if pid == state.captionPid {\n\t\t\tif payload_unit_start_indicator {\n\t\t\t\tdumpCaption(p, state)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc extractPmtPids(payload []byte) map[int]bool {\n\t\/\/ [ISO] 2.4.4.3\n\t\/\/ Table 2-25\n\ttable_id := payload[0]\n\tpids := make(map[int]bool)\n\tif table_id != 0x00 {\n\t\treturn pids\n\t}\n\tsection_length := int(payload[1]&0x0F)<<8 | int(payload[2])\n\tindex := 8\n\tfor index < 3+section_length-4 {\n\t\tprogram_number := int(payload[index+0])<<8 | int(payload[index+1])\n\t\tif program_number != 0 {\n\t\t\tprogram_map_PID := int(payload[index+2]&0x1F)<<8 | int(payload[index+3])\n\t\t\tpids[program_map_PID] = true\n\t\t}\n\t\tindex += 4\n\t}\n\treturn pids\n}\n\nfunc extractPcrPid(payload []byte) int {\n\treturn (int(payload[8]&0x1f) << 8) | int(payload[9])\n}\n\nfunc extractCaptionPid(payload []byte) int {\n\t\/\/ [ISO] 2.4.4.8 Program Map Table\n\t\/\/ Table 2-28\n\ttable_id := payload[0]\n\tif table_id != 0x02 {\n\t\treturn -1\n\t}\n\tsection_length := int(payload[1]&0x0F)<<8 | int(payload[2])\n\tif section_length >= len(payload) {\n\t\treturn -1\n\t}\n\n\tprogram_info_length := int(payload[10]&0x0F)<<8 | int(payload[11])\n\tindex := 12 + program_info_length\n\n\tfor index < 3+section_length-4 {\n\t\tstream_type := payload[index+0]\n\t\tES_info_length := int(payload[index+3]&0xF)<<8 | int(payload[index+4])\n\t\tif stream_type == 0x06 {\n\t\t\telementary_PID := int(payload[index+1]&0x1F)<<8 | int(payload[index+2])\n\t\t\tsubIndex := index + 5\n\t\t\tfor subIndex < index+ES_info_length {\n\t\t\t\t\/\/ [ISO] 2.6 Program and program element descriptors\n\t\t\t\tdescriptor_tag := payload[subIndex+0]\n\t\t\t\tdescriptor_length := int(payload[subIndex+1])\n\t\t\t\tif descriptor_tag == 0x52 {\n\t\t\t\t\t\/\/ [B10] 6.2.16 Stream identifier descriptor\n\t\t\t\t\t\/\/ 表 6-28\n\t\t\t\t\tcomponent_tag := payload[subIndex+2]\n\t\t\t\t\tif component_tag == 0x87 {\n\t\t\t\t\t\treturn elementary_PID\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tsubIndex += 2 + descriptor_length\n\t\t\t}\n\t\t}\n\t\tindex += 5 + ES_info_length\n\t}\n\treturn -1\n}\n\nfunc extractPcr(payload []byte) SystemClock {\n\tpcr_base := (int64(payload[1]) << 25) |\n\t\t(int64(payload[2]) << 17) |\n\t\t(int64(payload[3]) << 9) |\n\t\t(int64(payload[4]) << 1) |\n\t\t(int64(payload[5]&0x80) >> 7)\n\tpcr_ext := (int64(payload[5] & 0x01)) | int64(payload[6])\n\t\/\/ [ISO] 2.4.2.2\n\treturn SystemClock(pcr_base*300 + pcr_ext)\n}\n\nfunc extractJstTime(payload []byte) int64 {\n\tif payload[0] != 0x73 {\n\t\treturn 0\n\t}\n\n\t\/\/ [B10] Appendix C\n\tMJD := (int(payload[3]) << 8) | int(payload[4])\n\ty := int((float64(MJD) - 15078.2) \/ 365.25)\n\tm := int((float64(MJD) - 14956.1 - float64(int(float64(y)*365.25))) \/ 30.6001)\n\tk := 0\n\tif m == 14 || m == 15 {\n\t\tk = 1\n\t}\n\tyear := y + k + 1900\n\tmonth := m - 2 - k*12\n\tday := MJD - 14956 - int(float64(y)*365.25) - int(float64(m)*30.6001)\n\thour := decodeBcd(payload[5])\n\tminute := decodeBcd(payload[6])\n\tsecond := decodeBcd(payload[7])\n\n\tstr := fmt.Sprintf(\"%d-%02d-%02dT%02d:%02d:%02d+09:00\", year, month, day, hour, minute, second)\n\tt, err := time.Parse(time.RFC3339, str)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn t.Unix()\n}\n\nfunc decodeBcd(n byte) int {\n\treturn (int(n)>>4)*10 + int(n&0x0f)\n}\n\nfunc dumpCaption(payload []byte, state *AnalyzerState) {\n\tPES_header_data_length := payload[8]\n\tPES_data_packet_header_length := payload[11+PES_header_data_length] & 0x0F\n\tp := payload[12+PES_header_data_length+PES_data_packet_header_length:]\n\n\t\/\/ [B24] Table 9-1 (p184)\n\tdata_group_id := (p[0] & 0xFC) >> 2\n\tif data_group_id == 0x00 || data_group_id == 0x20 {\n\t\t\/\/ [B24] Table 9-3 (p186)\n\t\t\/\/ caption_management_data\n\t\tnum_languages := p[6]\n\t\tp = p[7+num_languages*5:]\n\t} else {\n\t\t\/\/ caption_data\n\t\tp = p[6:]\n\t}\n\t\/\/ [B24] Table 9-3 (p186)\n\tdata_unit_loop_length := (int(p[0]) << 16) | (int(p[1]) << 8) | int(p[2])\n\tindex := 0\n\tfor index < data_unit_loop_length {\n\t\tq := p[index:]\n\t\tdata_unit_parameter := q[4]\n\t\tdata_unit_size := (int(q[5]) << 16) | (int(q[6]) << 8) | int(q[7])\n\t\tif data_unit_parameter == 0x20 {\n\t\t\tif len(state.previousSubtitle) != 0 && !(isBlank(state.previousSubtitle) && state.previousIsBlank) {\n\t\t\t\tprevTimeCenti := state.previousTimestamp.centitime() + state.clockOffset\n\t\t\t\tcurTimeCenti := state.currentTimestamp.centitime() + state.clockOffset\n\t\t\t\tprevTime := prevTimeCenti \/ 100\n\t\t\t\tcurTime := curTimeCenti \/ 100\n\t\t\t\tprevCenti := prevTimeCenti % 100\n\t\t\t\tcurCenti := curTimeCenti % 100\n\t\t\t\tprev := time.Unix(prevTime, 0)\n\t\t\t\tcur := time.Unix(curTime, 0)\n\t\t\t\tif !state.preludePrinted {\n\t\t\t\t\tprintPrelude()\n\t\t\t\t\tstate.preludePrinted = true\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"Dialogue: 0,%02d:%02d:%02d.%02d,%02d:%02d:%02d.%02d,Default,,,,,,%s\\n\",\n\t\t\t\t\tprev.Hour(), prev.Minute(), prev.Second(), prevCenti,\n\t\t\t\t\tcur.Hour(), cur.Minute(), cur.Second(), curCenti,\n\t\t\t\t\tstate.previousSubtitle)\n\t\t\t}\n\t\t\tstate.previousIsBlank = isBlank(state.previousSubtitle)\n\t\t\tstate.previousSubtitle = decodeCprofile(q[8:], data_unit_size)\n\t\t\tstate.previousTimestamp = state.currentTimestamp\n\t\t}\n\t\tindex += 5 + data_unit_size\n\t}\n}\n\nfunc isBlank(str string) bool {\n\tfor _, c := range str {\n\t\tif c != ' ' {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc printPrelude() {\n\tfmt.Println(\"[Script Info]\")\n\tfmt.Println(\"ScriptType: v4.00+\")\n\tfmt.Println(\"Collisions: Normal\")\n\tfmt.Println(\"ScaledBorderAndShadow: yes\")\n\tfmt.Println(\"Timer: 100.0000\")\n\tfmt.Println(\"\\n[Events]\")\n}\n\nfunc decodeCprofile(str []byte, length int) string {\n\treturn \"dummy\"\n}\n\nconst K int64 = 27000000\n\nfunc (clock SystemClock) centitime() int64 {\n\treturn int64(clock) \/ (K \/ 100)\n}\n<|endoftext|>"} {"text":"package routes\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\ntype router struct {\n\troot *url.URL\n\n\tgetHandlers map[string]GetHandlerFunc\n\tpostHandlers map[string]PostHandlerFunc\n}\n\nfunc NewRouter(rootPath string) (*router, error) {\n\tr := &router{}\n\n\terr := r.SetRootPath(rootPath)\n\tif err != nil {\n\t\treturn r, err\n\t}\n\n\treturn r, nil\n}\n\nfunc (r *router) SetRootPath(path string) error {\n\tnewRoot, err := url.Parse(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid path format %s: %v\", path, err)\n\t}\n\n\tr.root = newRoot\n\n\treturn nil\n}\n\nfunc (r *router) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tpath := req.URL.Path\n\tmethod := req.Method\n\n\tswitch method {\n\n\tcase http.MethodGet:\n\t\tif route, ok := r.getHandlers[path]; ok {\n\t\t\troute(&w, valuesToGetParams(req.URL.Query()), nil)\n\t\t}\n\t\thttp.NotFound(w, req)\n\tcase http.MethodPost:\n\t\tif route, ok := r.postHandlers[path]; ok {\n\t\t\tvar body []byte\n\t\t\t_, err := req.Body.Read(body)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tlog.Panicf(\"can not read request body: %v\", err)\n\t\t\t}\n\n\t\t\troute(&w, PostBody(body), nil)\n\t\t}\n\t\thttp.NotFound(w, req)\n\n\tdefault:\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\tfmt.Fprintf(w, \"method not allowed: %s\", method)\n\t}\n}\n\ntype GetHandlerFunc func(*http.ResponseWriter, GetParams, PathParams)\ntype PostHandlerFunc func(*http.ResponseWriter, PostBody, PathParams)\n\ntype PathParams map[string][]byte\ntype GetParams map[string]string\ntype PostBody []byte \/\/ Byte array with request body\n\nfunc valuesToGetParams(values url.Values) GetParams {\n\tvar params map[string]string\n\tfor key := range values {\n\t\tparams[key] = values.Get(key)\n\t}\n\treturn params\n}\nAdd scratch for router structure.package routes\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\nfunc NewRouter(rootPath string) (*router, error) {\n\tr := &router{}\n\n\terr := r.SetRootPath(rootPath)\n\tif err != nil {\n\t\treturn r, err\n\t}\n\n\treturn r, nil\n}\n\ntype Pattern string\n\ntype router struct {\n\troot *url.URL\n\n\tgetHandlers map[Pattern]GetHandlerFunc\n\tpostHandlers map[Pattern]PostHandlerFunc\n}\n\n\/\/ Set router root path, other paths will be relative to it\nfunc (r *router) SetRootPath(path string) error {\n\tnewRoot, err := url.Parse(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid path format %s: %v\", path, err)\n\t}\n\n\tr.root = newRoot\n\n\treturn nil\n}\n\n\/\/Helper types for different http method handlers\ntype GetHandlerFunc func(*http.ResponseWriter, GetParams, PathParams)\ntype PostHandlerFunc func(*http.ResponseWriter, PostBody, PathParams)\n\n\/\/ Example: url \"\/api\/v1\/users\/1\" and pattern \"\/api\/v1\/users\/:id\"\n\/\/ path params = {\"id\": \"1\"}\ntype PathParams map[string][]byte\n\n\/\/ Get params stands for \"query params\"\ntype GetParams map[string]string\n\n\/\/ Converts url.Url.Query() from \"Values\" (map[string][]string)\n\/\/ to \"GetParams\" (map[string]string)\nfunc valuesToGetParams(values url.Values) GetParams {\n\tvar params map[string]string\n\tfor key := range values {\n\t\tparams[key] = values.Get(key)\n\t}\n\treturn params\n}\n\n\/\/ Type for http post body\ntype PostBody []byte \/\/ Byte array with request body\n\n\/\/ Add new get handler\nfunc (r *router) Get(pattern string, handler GetHandlerFunc) error {\n\tfullPattern, err := r.root.Parse(pattern)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.getHandlers[Pattern(fullPattern.Path)] = handler\n\n\treturn nil\n}\n\nfunc (r *router) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tmethod := req.Method\n\n\tswitch method {\n\n\tcase http.MethodGet:\n\t\tr.handleGet(w, req)\n\tcase http.MethodPost:\n\t\tr.handlePost(w, req)\n\tdefault:\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\tfmt.Fprintf(w, \"method not allowed: %s\", method)\n\t}\n}\n\nfunc (r *router) handlePost(w http.ResponseWriter, req *http.Request) {\n\n}\n\nfunc (r *router) handleGet(w http.ResponseWriter, req *http.Request) {\n\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"koding\/artifact\"\n\t\"koding\/common\"\n\t\"koding\/db\/mongodb\/modelhelper\"\n\t\"koding\/tools\/config\"\n\t\"net\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com\/PuerkitoBio\/throttled\"\n\t\"github.com\/PuerkitoBio\/throttled\/store\"\n\t\"github.com\/koding\/logging\"\n\t\"github.com\/koding\/metrics\"\n\t\"github.com\/koding\/redis\"\n)\n\nvar (\n\tWorkerName = \"ingestor\"\n\tflagConfig = flag.String(\"c\", \"dev\", \"Configuration profile from file\")\n)\n\nfunc initializeConf() *config.Config {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tflag.Parse()\n\tif *flagConfig == \"\" {\n\t\tpanic(\"Please define config file with -c\")\n\t}\n\n\treturn config.MustConfig(*flagConfig)\n}\n\nfunc main() {\n\tlog := common.CreateLogger(WorkerName, false)\n\n\tconf := initializeConf()\n\tmodelhelper.Initialize(conf.Mongo)\n\n\tdefer modelhelper.Close()\n\n\tredisConn, err := redis.NewRedisSession(&redis.RedisConf{Server: conf.Redis})\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\tdefer redisConn.Close()\n\n\tdogclient, err := metrics.NewDogStatsD(WorkerName)\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\tstathandler := &GatherStat{log: log, dog: dogclient}\n\terrhandler := &GatherError{log: log, dog: dogclient}\n\n\tmux := http.NewServeMux()\n\n\tth := throttled.RateLimit(\n\t\tthrottled.Q{Requests: 10, Window: time.Hour},\n\t\t&throttled.VaryBy{Path: true},\n\t\tstore.NewRedisStore(redisConn.Pool(), WorkerName, 0),\n\t)\n\n\ttStathandler := th.Throttle(stathandler)\n\tmux.Handle(\"\/ingest\", tStathandler)\n\n\ttErrHandler := th.Throttle(errhandler)\n\tmux.Handle(\"\/errors\", tErrHandler)\n\n\tmux.HandleFunc(\"\/version\", artifact.VersionHandler())\n\tmux.HandleFunc(\"\/healthCheck\", artifact.HealthCheckHandler(WorkerName))\n\n\tport := fmt.Sprintf(\"%v\", conf.GatherIngestor.Port)\n\n\tlog.Info(\"Listening on server: %s\", port)\n\n\tlistener, err := net.Listen(\"tcp\", \":\"+port)\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\tdefer listener.Close()\n\n\tif err = http.Serve(listener, mux); err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n}\n\nfunc write500Err(log logging.Logger, err error, w http.ResponseWriter) {\n\twriteErr(http.StatusInternalServerError, log, err, w)\n}\n\nfunc write404Err(log logging.Logger, err error, w http.ResponseWriter) {\n\twriteErr(http.StatusBadRequest, log, err, w)\n}\n\nfunc writeErr(code int, log logging.Logger, err error, w http.ResponseWriter) {\n\tlog.Error(err.Error())\n\n\tw.WriteHeader(code)\n\tw.Write([]byte(err.Error()))\n}\ngatheringestor: throttle by remove address, but not pathpackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"koding\/artifact\"\n\t\"koding\/common\"\n\t\"koding\/db\/mongodb\/modelhelper\"\n\t\"koding\/tools\/config\"\n\t\"net\"\n\t\"net\/http\"\n\t\"runtime\"\n\n\t\"github.com\/PuerkitoBio\/throttled\"\n\t\"github.com\/PuerkitoBio\/throttled\/store\"\n\t\"github.com\/koding\/logging\"\n\t\"github.com\/koding\/metrics\"\n\t\"github.com\/koding\/redis\"\n)\n\nvar (\n\tWorkerName = \"ingestor\"\n\tflagConfig = flag.String(\"c\", \"dev\", \"Configuration profile from file\")\n)\n\nfunc initializeConf() *config.Config {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tflag.Parse()\n\tif *flagConfig == \"\" {\n\t\tpanic(\"Please define config file with -c\")\n\t}\n\n\treturn config.MustConfig(*flagConfig)\n}\n\nfunc main() {\n\tlog := common.CreateLogger(WorkerName, false)\n\n\tconf := initializeConf()\n\tmodelhelper.Initialize(conf.Mongo)\n\n\tdefer modelhelper.Close()\n\n\tredisConn, err := redis.NewRedisSession(&redis.RedisConf{Server: conf.Redis})\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\tdefer redisConn.Close()\n\n\tdogclient, err := metrics.NewDogStatsD(WorkerName)\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\tstathandler := &GatherStat{log: log, dog: dogclient}\n\terrhandler := &GatherError{log: log, dog: dogclient}\n\n\tmux := http.NewServeMux()\n\n\tth := throttled.RateLimit(\n\t\tthrottled.PerHour(10),\n\t\t&throttled.VaryBy{RemoteAddr: true, Path: false},\n\t\tstore.NewRedisStore(redisConn.Pool(), WorkerName, 0),\n\t)\n\n\ttStathandler := th.Throttle(stathandler)\n\tmux.Handle(\"\/ingest\", tStathandler)\n\n\ttErrHandler := th.Throttle(errhandler)\n\tmux.Handle(\"\/errors\", tErrHandler)\n\n\tmux.HandleFunc(\"\/version\", artifact.VersionHandler())\n\tmux.HandleFunc(\"\/healthCheck\", artifact.HealthCheckHandler(WorkerName))\n\n\tport := fmt.Sprintf(\"%v\", conf.GatherIngestor.Port)\n\n\tlog.Info(\"Listening on server: %s\", port)\n\n\tlistener, err := net.Listen(\"tcp\", \":\"+port)\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\tdefer listener.Close()\n\n\tif err = http.Serve(listener, mux); err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n}\n\nfunc write500Err(log logging.Logger, err error, w http.ResponseWriter) {\n\twriteErr(http.StatusInternalServerError, log, err, w)\n}\n\nfunc write404Err(log logging.Logger, err error, w http.ResponseWriter) {\n\twriteErr(http.StatusBadRequest, log, err, w)\n}\n\nfunc writeErr(code int, log logging.Logger, err error, w http.ResponseWriter) {\n\tlog.Error(err.Error())\n\n\tw.WriteHeader(code)\n\tw.Write([]byte(err.Error()))\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2020 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package azblobbackupstorage implements the BackupStorage interface\n\/\/ for Azure Blob Storage\npackage azblobbackupstorage\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/Azure\/azure-pipeline-go\/pipeline\"\n\t\"github.com\/Azure\/azure-storage-blob-go\/azblob\"\n\t\"vitess.io\/vitess\/go\/vt\/concurrency\"\n\t\"vitess.io\/vitess\/go\/vt\/log\"\n\t\"vitess.io\/vitess\/go\/vt\/mysqlctl\/backupstorage\"\n)\n\nvar (\n\t\/\/ This is the account name\n\taccountName = flag.String(\"azblob_backup_account_name\", \"\", \"Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used\")\n\n\t\/\/ This is the private access key\n\taccountKeyFile = flag.String(\"azblob_backup_account_key_file\", \"\", \"Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path)\")\n\n\t\/\/ This is the name of the container that will store the backups\n\tcontainerName = flag.String(\"azblob_backup_container_name\", \"\", \"Azure Blob Container Name\")\n\n\t\/\/ This is an optional prefix to prepend to all files\n\tstorageRoot = flag.String(\"azblob_backup_storage_root\", \"\", \"Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '\/' (e.g. just 'a\/b' not '\/a\/b\/')\")\n\n\tazBlobParallelism = flag.Int(\"azblob_backup_parallelism\", 1, \"Azure Blob operation parallelism (requires extra memory when increased)\")\n)\n\nconst (\n\tdefaultRetryCount = 5\n\tdelimiter = \"\/\"\n)\n\n\/\/ Return a Shared credential from the available credential sources.\n\/\/ We will use credentials in the following order\n\/\/ 1. Direct Command Line Flag (azblob_backup_account_name, azblob_backup_account_key)\n\/\/ 2. Environment variables\nfunc azInternalCredentials() (string, string, error) {\n\tactName := *accountName\n\tif actName == \"\" {\n\t\t\/\/ Check the Environmental Value\n\t\tactName = os.Getenv(\"VT_AZBLOB_ACCOUNT_NAME\")\n\t}\n\n\tvar actKey string\n\tif *accountKeyFile != \"\" {\n\t\tlog.Infof(\"Getting Azure Storage Account key from file: %s\", *accountKeyFile)\n\t\tdat, err := ioutil.ReadFile(*accountKeyFile)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\tactKey = string(dat)\n\t} else {\n\t\tactKey = os.Getenv(\"VT_AZBLOB_ACCOUNT_KEY\")\n\t}\n\n\tif actName == \"\" || actKey == \"\" {\n\t\treturn \"\", \"\", fmt.Errorf(\"Azure Storage Account credentials not found in command-line flags or environment variables\")\n\t}\n\treturn actName, actKey, nil\n}\n\nfunc azCredentials() (*azblob.SharedKeyCredential, error) {\n\tactName, actKey, err := azInternalCredentials()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn azblob.NewSharedKeyCredential(actName, actKey)\n}\n\nfunc azServiceURL(credentials *azblob.SharedKeyCredential) azblob.ServiceURL {\n\tpipeline := azblob.NewPipeline(credentials, azblob.PipelineOptions{\n\t\tRetry: azblob.RetryOptions{\n\t\t\tPolicy: azblob.RetryPolicyFixed,\n\t\t\tMaxTries: defaultRetryCount,\n\t\t\t\/\/ Per https:\/\/godoc.org\/github.com\/Azure\/azure-storage-blob-go\/azblob#RetryOptions\n\t\t\t\/\/ this should be set to a very nigh number (they claim 60s per MB).\n\t\t\t\/\/ That could end up being days so we are limiting this to four hours.\n\t\t\tTryTimeout: 4 * time.Hour,\n\t\t},\n\t\tLog: pipeline.LogOptions{\n\t\t\tLog: func(level pipeline.LogLevel, message string) {\n\t\t\t\tswitch level {\n\t\t\t\tcase pipeline.LogFatal:\n\t\t\t\tcase pipeline.LogPanic:\n\t\t\t\t\tlog.Fatal(message)\n\t\t\t\t\tbreak\n\t\t\t\tcase pipeline.LogError:\n\t\t\t\t\tlog.Error(message)\n\t\t\t\t\tbreak\n\t\t\t\tcase pipeline.LogWarning:\n\t\t\t\t\tlog.Warning(message)\n\t\t\t\t\tbreak\n\t\t\t\tcase pipeline.LogInfo:\n\t\t\t\tcase pipeline.LogDebug:\n\t\t\t\t\tlog.Info(message)\n\t\t\t\t}\n\t\t\t},\n\t\t\tShouldLog: func(level pipeline.LogLevel) bool {\n\t\t\t\tswitch level {\n\t\t\t\tcase pipeline.LogFatal:\n\t\t\t\tcase pipeline.LogPanic:\n\t\t\t\t\treturn bool(log.V(3))\n\t\t\t\tcase pipeline.LogError:\n\t\t\t\t\treturn bool(log.V(3))\n\t\t\t\tcase pipeline.LogWarning:\n\t\t\t\t\treturn bool(log.V(2))\n\t\t\t\tcase pipeline.LogInfo:\n\t\t\t\tcase pipeline.LogDebug:\n\t\t\t\t\treturn bool(log.V(1))\n\t\t\t\t}\n\t\t\t\treturn false\n\t\t\t},\n\t\t},\n\t})\n\tu := url.URL{\n\t\tScheme: \"https\",\n\t\tHost: credentials.AccountName() + \".blob.core.windows.net\",\n\t\tPath: \"\/\",\n\t}\n\treturn azblob.NewServiceURL(u, pipeline)\n}\n\n\/\/ AZBlobBackupHandle implements BackupHandle for Azure Blob service.\ntype AZBlobBackupHandle struct {\n\tbs *AZBlobBackupStorage\n\tdir string\n\tname string\n\treadOnly bool\n\twaitGroup sync.WaitGroup\n\terrors concurrency.AllErrorRecorder\n\tctx context.Context\n\tcancel context.CancelFunc\n}\n\n\/\/ Directory implements BackupHandle.\nfunc (bh *AZBlobBackupHandle) Directory() string {\n\treturn bh.dir\n}\n\n\/\/ Name implements BackupHandle.\nfunc (bh *AZBlobBackupHandle) Name() string {\n\treturn bh.name\n}\n\n\/\/ RecordError is part of the concurrency.ErrorRecorder interface.\nfunc (bh *AZBlobBackupHandle) RecordError(err error) {\n\tbh.errors.RecordError(err)\n}\n\n\/\/ HasErrors is part of the concurrency.ErrorRecorder interface.\nfunc (bh *AZBlobBackupHandle) HasErrors() bool {\n\treturn bh.errors.HasErrors()\n}\n\n\/\/ Error is part of the concurrency.ErrorRecorder interface.\nfunc (bh *AZBlobBackupHandle) Error() error {\n\treturn bh.errors.Error()\n}\n\n\/\/ AddFile implements BackupHandle.\nfunc (bh *AZBlobBackupHandle) AddFile(ctx context.Context, filename string, filesize int64) (io.WriteCloser, error) {\n\tif bh.readOnly {\n\t\treturn nil, fmt.Errorf(\"AddFile cannot be called on read-only backup\")\n\t}\n\t\/\/ Error out if the file size it too large ( ~4.75 TB)\n\tif filesize > azblob.BlockBlobMaxStageBlockBytes*azblob.BlockBlobMaxBlocks {\n\t\treturn nil, fmt.Errorf(\"filesize (%v) is too large to upload to az blob (max size %v)\", filesize, azblob.BlockBlobMaxStageBlockBytes*azblob.BlockBlobMaxBlocks)\n\t}\n\n\tobj := objName(bh.dir, bh.name, filename)\n\tcontainerURL, err := bh.bs.containerURL()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tblockBlobURL := containerURL.NewBlockBlobURL(obj)\n\n\treader, writer := io.Pipe()\n\tbh.waitGroup.Add(1)\n\n\tgo func() {\n\t\tdefer bh.waitGroup.Done()\n\t\t_, err := azblob.UploadStreamToBlockBlob(bh.ctx, reader, blockBlobURL, azblob.UploadStreamToBlockBlobOptions{\n\t\t\tBufferSize: azblob.BlockBlobMaxStageBlockBytes,\n\t\t\tMaxBuffers: *azBlobParallelism,\n\t\t})\n\t\tif err != nil {\n\t\t\treader.CloseWithError(err)\n\t\t\tbh.RecordError(err)\n\t\t}\n\t}()\n\n\treturn writer, nil\n}\n\n\/\/ EndBackup implements BackupHandle.\nfunc (bh *AZBlobBackupHandle) EndBackup(ctx context.Context) error {\n\tif bh.readOnly {\n\t\treturn fmt.Errorf(\"EndBackup cannot be called on read-only backup\")\n\t}\n\tbh.waitGroup.Wait()\n\treturn bh.Error()\n}\n\n\/\/ AbortBackup implements BackupHandle.\nfunc (bh *AZBlobBackupHandle) AbortBackup(ctx context.Context) error {\n\tif bh.readOnly {\n\t\treturn fmt.Errorf(\"AbortBackup cannot be called on read-only backup\")\n\t}\n\t\/\/ Cancel the context of any uploads.\n\tbh.cancel()\n\n\t\/\/ Remove the backup\n\treturn bh.bs.RemoveBackup(ctx, bh.dir, bh.name)\n}\n\n\/\/ ReadFile implements BackupHandle.\nfunc (bh *AZBlobBackupHandle) ReadFile(ctx context.Context, filename string) (io.ReadCloser, error) {\n\tif !bh.readOnly {\n\t\treturn nil, fmt.Errorf(\"ReadFile cannot be called on read-write backup\")\n\t}\n\n\tobj := objName(bh.dir, filename)\n\tcontainerURL, err := bh.bs.containerURL()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tblobURL := containerURL.NewBlobURL(obj)\n\n\tresp, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.Body(azblob.RetryReaderOptions{\n\t\tMaxRetryRequests: defaultRetryCount,\n\t\tNotifyFailedRead: func(failureCount int, lastError error, offset int64, count int64, willRetry bool) {\n\t\t\tlog.Warningf(\"ReadFile: [azblob] container: %s, directory: %s, filename: %s, error: %v\", *containerName, objName(bh.dir, \"\"), filename, lastError)\n\t\t},\n\t\tTreatEarlyCloseAsError: true,\n\t}), nil\n}\n\n\/\/ AZBlobBackupStorage structs implements the BackupStorage interface for AZBlob\ntype AZBlobBackupStorage struct {\n}\n\nfunc (bs *AZBlobBackupStorage) containerURL() (*azblob.ContainerURL, error) {\n\tcredentials, err := azCredentials()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tu := azServiceURL(credentials).NewContainerURL(*containerName)\n\treturn &u, nil\n}\n\n\/\/ ListBackups implements BackupStorage.\nfunc (bs *AZBlobBackupStorage) ListBackups(ctx context.Context, dir string) ([]backupstorage.BackupHandle, error) {\n\tlog.Infof(\"ListBackups: [azblob] container: %s, directory: %v\", *containerName, objName(dir, \"\"))\n\n\tcontainerURL, err := bs.containerURL()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsearchPrefix := objName(dir, \"\")\n\n\tresult := make([]backupstorage.BackupHandle, 0)\n\tvar subdirs []string\n\n\tfor marker := (azblob.Marker{}); marker.NotDone(); {\n\t\t\/\/ This returns Blobs in sorted order so we don't need to sort them a second time.\n\t\tresp, err := containerURL.ListBlobsHierarchySegment(ctx, marker, delimiter, azblob.ListBlobsSegmentOptions{\n\t\t\tPrefix: searchPrefix,\n\t\t\tMaxResults: 0,\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, item := range resp.Segment.BlobPrefixes {\n\t\t\tsubdir := strings.TrimPrefix(item.Name, searchPrefix)\n\t\t\tsubdir = strings.TrimSuffix(subdir, delimiter)\n\t\t\tsubdirs = append(subdirs, subdir)\n\t\t}\n\n\t\tmarker = resp.NextMarker\n\t}\n\n\tfor _, subdir := range subdirs {\n\t\tcancelableCtx, cancel := context.WithCancel(ctx)\n\t\tresult = append(result, &AZBlobBackupHandle{\n\t\t\tbs: bs,\n\t\t\tdir: strings.Join([]string{dir, subdir}, \"\/\"),\n\t\t\tname: subdir,\n\t\t\treadOnly: true,\n\t\t\tctx: cancelableCtx,\n\t\t\tcancel: cancel,\n\t\t})\n\t}\n\n\treturn result, nil\n}\n\n\/\/ StartBackup implements BackupStorage.\nfunc (bs *AZBlobBackupStorage) StartBackup(ctx context.Context, dir, name string) (backupstorage.BackupHandle, error) {\n\tcancelableCtx, cancel := context.WithCancel(ctx)\n\treturn &AZBlobBackupHandle{\n\t\tbs: bs,\n\t\tdir: dir,\n\t\tname: name,\n\t\treadOnly: false,\n\t\tctx: cancelableCtx,\n\t\tcancel: cancel,\n\t}, nil\n}\n\n\/\/ RemoveBackup implements BackupStorage.\nfunc (bs *AZBlobBackupStorage) RemoveBackup(ctx context.Context, dir, name string) error {\n\tlog.Infof(\"ListBackups: [azblob] container: %s, directory: %s\", *containerName, objName(dir, \"\"))\n\n\tcontainerURL, err := bs.containerURL()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsearchPrefix := objName(dir, name, \"\")\n\n\tfor marker := (azblob.Marker{}); marker.NotDone(); {\n\t\tresp, err := containerURL.ListBlobsHierarchySegment(ctx, marker, delimiter, azblob.ListBlobsSegmentOptions{\n\t\t\tPrefix: searchPrefix,\n\t\t\tMaxResults: 0,\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Right now there is no batch delete so we must iterate over all the blobs to delete them one by one\n\t\t\/\/ One day we will be able to use this https:\/\/docs.microsoft.com\/en-us\/rest\/api\/storageservices\/blob-batch\n\t\t\/\/ but currently it is listed as a preview and its not in the go API\n\t\tfor _, item := range resp.Segment.BlobItems {\n\t\t\t_, err := containerURL.NewBlobURL(item.Name).Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tmarker = resp.NextMarker\n\t}\n\n\t\/\/ Delete the blob representing the folder of the backup, remove any trailing slash to signify we want to remove the folder\n\t\/\/ NOTE: you must set DeleteSnapshotsOptionNone or this will error out with a server side error\n\tfor retry := 0; retry < defaultRetryCount; retry = retry + 1 {\n\t\t\/\/ Since the deletion of blob's is asyncronious we may need to wait a bit before we delete the folder\n\t\t\/\/ Also refresh the client just for good measure\n\t\ttime.Sleep(10 * time.Second)\n\t\tcontainerURL, err = bs.containerURL()\n\n\t\tlog.Infof(\"Removing backup directory: %v\", strings.TrimSuffix(searchPrefix, \"\/\"))\n\t\t_, err = containerURL.NewBlobURL(strings.TrimSuffix(searchPrefix, \"\/\")).Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn err\n}\n\n\/\/ Close implements BackupStorage.\nfunc (bs *AZBlobBackupStorage) Close() error {\n\t\/\/ This function is a No-op\n\treturn nil\n}\n\n\/\/ objName joins path parts into an object name.\n\/\/ Unlike path.Join, it doesn't collapse \"..\" or strip trailing slashes.\n\/\/ It also adds the value of the -azblob_backup_storage_root flag if set.\nfunc objName(parts ...string) string {\n\tif *storageRoot != \"\" {\n\t\treturn *storageRoot + \"\/\" + strings.Join(parts, \"\/\")\n\t}\n\treturn strings.Join(parts, \"\/\")\n}\n\nfunc init() {\n\tbackupstorage.BackupStorageMap[\"azblob\"] = &AZBlobBackupStorage{}\n}\nFixing the Linter\/*\nCopyright 2020 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package azblobbackupstorage implements the BackupStorage interface\n\/\/ for Azure Blob Storage\npackage azblobbackupstorage\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/Azure\/azure-pipeline-go\/pipeline\"\n\t\"github.com\/Azure\/azure-storage-blob-go\/azblob\"\n\t\"vitess.io\/vitess\/go\/vt\/concurrency\"\n\t\"vitess.io\/vitess\/go\/vt\/log\"\n\t\"vitess.io\/vitess\/go\/vt\/mysqlctl\/backupstorage\"\n)\n\nvar (\n\t\/\/ This is the account name\n\taccountName = flag.String(\"azblob_backup_account_name\", \"\", \"Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used\")\n\n\t\/\/ This is the private access key\n\taccountKeyFile = flag.String(\"azblob_backup_account_key_file\", \"\", \"Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path)\")\n\n\t\/\/ This is the name of the container that will store the backups\n\tcontainerName = flag.String(\"azblob_backup_container_name\", \"\", \"Azure Blob Container Name\")\n\n\t\/\/ This is an optional prefix to prepend to all files\n\tstorageRoot = flag.String(\"azblob_backup_storage_root\", \"\", \"Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '\/' (e.g. just 'a\/b' not '\/a\/b\/')\")\n\n\tazBlobParallelism = flag.Int(\"azblob_backup_parallelism\", 1, \"Azure Blob operation parallelism (requires extra memory when increased)\")\n)\n\nconst (\n\tdefaultRetryCount = 5\n\tdelimiter = \"\/\"\n)\n\n\/\/ Return a Shared credential from the available credential sources.\n\/\/ We will use credentials in the following order\n\/\/ 1. Direct Command Line Flag (azblob_backup_account_name, azblob_backup_account_key)\n\/\/ 2. Environment variables\nfunc azInternalCredentials() (string, string, error) {\n\tactName := *accountName\n\tif actName == \"\" {\n\t\t\/\/ Check the Environmental Value\n\t\tactName = os.Getenv(\"VT_AZBLOB_ACCOUNT_NAME\")\n\t}\n\n\tvar actKey string\n\tif *accountKeyFile != \"\" {\n\t\tlog.Infof(\"Getting Azure Storage Account key from file: %s\", *accountKeyFile)\n\t\tdat, err := ioutil.ReadFile(*accountKeyFile)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\tactKey = string(dat)\n\t} else {\n\t\tactKey = os.Getenv(\"VT_AZBLOB_ACCOUNT_KEY\")\n\t}\n\n\tif actName == \"\" || actKey == \"\" {\n\t\treturn \"\", \"\", fmt.Errorf(\"Azure Storage Account credentials not found in command-line flags or environment variables\")\n\t}\n\treturn actName, actKey, nil\n}\n\nfunc azCredentials() (*azblob.SharedKeyCredential, error) {\n\tactName, actKey, err := azInternalCredentials()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn azblob.NewSharedKeyCredential(actName, actKey)\n}\n\nfunc azServiceURL(credentials *azblob.SharedKeyCredential) azblob.ServiceURL {\n\tpipeline := azblob.NewPipeline(credentials, azblob.PipelineOptions{\n\t\tRetry: azblob.RetryOptions{\n\t\t\tPolicy: azblob.RetryPolicyFixed,\n\t\t\tMaxTries: defaultRetryCount,\n\t\t\t\/\/ Per https:\/\/godoc.org\/github.com\/Azure\/azure-storage-blob-go\/azblob#RetryOptions\n\t\t\t\/\/ this should be set to a very nigh number (they claim 60s per MB).\n\t\t\t\/\/ That could end up being days so we are limiting this to four hours.\n\t\t\tTryTimeout: 4 * time.Hour,\n\t\t},\n\t\tLog: pipeline.LogOptions{\n\t\t\tLog: func(level pipeline.LogLevel, message string) {\n\t\t\t\tswitch level {\n\t\t\t\tcase pipeline.LogFatal, pipeline.LogPanic:\n\t\t\t\t\tlog.Fatal(message)\n\t\t\t\tcase pipeline.LogError:\n\t\t\t\t\tlog.Error(message)\n\t\t\t\tcase pipeline.LogWarning:\n\t\t\t\t\tlog.Warning(message)\n\t\t\t\tcase pipeline.LogInfo, pipeline.LogDebug:\n\t\t\t\t\tlog.Info(message)\n\t\t\t\t}\n\t\t\t},\n\t\t\tShouldLog: func(level pipeline.LogLevel) bool {\n\t\t\t\tswitch level {\n\t\t\t\tcase pipeline.LogFatal, pipeline.LogPanic:\n\t\t\t\t\treturn bool(log.V(3))\n\t\t\t\tcase pipeline.LogError:\n\t\t\t\t\treturn bool(log.V(3))\n\t\t\t\tcase pipeline.LogWarning:\n\t\t\t\t\treturn bool(log.V(2))\n\t\t\t\tcase pipeline.LogInfo, pipeline.LogDebug:\n\t\t\t\t\treturn bool(log.V(1))\n\t\t\t\t}\n\t\t\t\treturn false\n\t\t\t},\n\t\t},\n\t})\n\tu := url.URL{\n\t\tScheme: \"https\",\n\t\tHost: credentials.AccountName() + \".blob.core.windows.net\",\n\t\tPath: \"\/\",\n\t}\n\treturn azblob.NewServiceURL(u, pipeline)\n}\n\n\/\/ AZBlobBackupHandle implements BackupHandle for Azure Blob service.\ntype AZBlobBackupHandle struct {\n\tbs *AZBlobBackupStorage\n\tdir string\n\tname string\n\treadOnly bool\n\twaitGroup sync.WaitGroup\n\terrors concurrency.AllErrorRecorder\n\tctx context.Context\n\tcancel context.CancelFunc\n}\n\n\/\/ Directory implements BackupHandle.\nfunc (bh *AZBlobBackupHandle) Directory() string {\n\treturn bh.dir\n}\n\n\/\/ Name implements BackupHandle.\nfunc (bh *AZBlobBackupHandle) Name() string {\n\treturn bh.name\n}\n\n\/\/ RecordError is part of the concurrency.ErrorRecorder interface.\nfunc (bh *AZBlobBackupHandle) RecordError(err error) {\n\tbh.errors.RecordError(err)\n}\n\n\/\/ HasErrors is part of the concurrency.ErrorRecorder interface.\nfunc (bh *AZBlobBackupHandle) HasErrors() bool {\n\treturn bh.errors.HasErrors()\n}\n\n\/\/ Error is part of the concurrency.ErrorRecorder interface.\nfunc (bh *AZBlobBackupHandle) Error() error {\n\treturn bh.errors.Error()\n}\n\n\/\/ AddFile implements BackupHandle.\nfunc (bh *AZBlobBackupHandle) AddFile(ctx context.Context, filename string, filesize int64) (io.WriteCloser, error) {\n\tif bh.readOnly {\n\t\treturn nil, fmt.Errorf(\"AddFile cannot be called on read-only backup\")\n\t}\n\t\/\/ Error out if the file size it too large ( ~4.75 TB)\n\tif filesize > azblob.BlockBlobMaxStageBlockBytes*azblob.BlockBlobMaxBlocks {\n\t\treturn nil, fmt.Errorf(\"filesize (%v) is too large to upload to az blob (max size %v)\", filesize, azblob.BlockBlobMaxStageBlockBytes*azblob.BlockBlobMaxBlocks)\n\t}\n\n\tobj := objName(bh.dir, bh.name, filename)\n\tcontainerURL, err := bh.bs.containerURL()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tblockBlobURL := containerURL.NewBlockBlobURL(obj)\n\n\treader, writer := io.Pipe()\n\tbh.waitGroup.Add(1)\n\n\tgo func() {\n\t\tdefer bh.waitGroup.Done()\n\t\t_, err := azblob.UploadStreamToBlockBlob(bh.ctx, reader, blockBlobURL, azblob.UploadStreamToBlockBlobOptions{\n\t\t\tBufferSize: azblob.BlockBlobMaxStageBlockBytes,\n\t\t\tMaxBuffers: *azBlobParallelism,\n\t\t})\n\t\tif err != nil {\n\t\t\treader.CloseWithError(err)\n\t\t\tbh.RecordError(err)\n\t\t}\n\t}()\n\n\treturn writer, nil\n}\n\n\/\/ EndBackup implements BackupHandle.\nfunc (bh *AZBlobBackupHandle) EndBackup(ctx context.Context) error {\n\tif bh.readOnly {\n\t\treturn fmt.Errorf(\"EndBackup cannot be called on read-only backup\")\n\t}\n\tbh.waitGroup.Wait()\n\treturn bh.Error()\n}\n\n\/\/ AbortBackup implements BackupHandle.\nfunc (bh *AZBlobBackupHandle) AbortBackup(ctx context.Context) error {\n\tif bh.readOnly {\n\t\treturn fmt.Errorf(\"AbortBackup cannot be called on read-only backup\")\n\t}\n\t\/\/ Cancel the context of any uploads.\n\tbh.cancel()\n\n\t\/\/ Remove the backup\n\treturn bh.bs.RemoveBackup(ctx, bh.dir, bh.name)\n}\n\n\/\/ ReadFile implements BackupHandle.\nfunc (bh *AZBlobBackupHandle) ReadFile(ctx context.Context, filename string) (io.ReadCloser, error) {\n\tif !bh.readOnly {\n\t\treturn nil, fmt.Errorf(\"ReadFile cannot be called on read-write backup\")\n\t}\n\n\tobj := objName(bh.dir, filename)\n\tcontainerURL, err := bh.bs.containerURL()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tblobURL := containerURL.NewBlobURL(obj)\n\n\tresp, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.Body(azblob.RetryReaderOptions{\n\t\tMaxRetryRequests: defaultRetryCount,\n\t\tNotifyFailedRead: func(failureCount int, lastError error, offset int64, count int64, willRetry bool) {\n\t\t\tlog.Warningf(\"ReadFile: [azblob] container: %s, directory: %s, filename: %s, error: %v\", *containerName, objName(bh.dir, \"\"), filename, lastError)\n\t\t},\n\t\tTreatEarlyCloseAsError: true,\n\t}), nil\n}\n\n\/\/ AZBlobBackupStorage structs implements the BackupStorage interface for AZBlob\ntype AZBlobBackupStorage struct {\n}\n\nfunc (bs *AZBlobBackupStorage) containerURL() (*azblob.ContainerURL, error) {\n\tcredentials, err := azCredentials()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tu := azServiceURL(credentials).NewContainerURL(*containerName)\n\treturn &u, nil\n}\n\n\/\/ ListBackups implements BackupStorage.\nfunc (bs *AZBlobBackupStorage) ListBackups(ctx context.Context, dir string) ([]backupstorage.BackupHandle, error) {\n\tlog.Infof(\"ListBackups: [azblob] container: %s, directory: %v\", *containerName, objName(dir, \"\"))\n\n\tcontainerURL, err := bs.containerURL()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsearchPrefix := objName(dir, \"\")\n\n\tresult := make([]backupstorage.BackupHandle, 0)\n\tvar subdirs []string\n\n\tfor marker := (azblob.Marker{}); marker.NotDone(); {\n\t\t\/\/ This returns Blobs in sorted order so we don't need to sort them a second time.\n\t\tresp, err := containerURL.ListBlobsHierarchySegment(ctx, marker, delimiter, azblob.ListBlobsSegmentOptions{\n\t\t\tPrefix: searchPrefix,\n\t\t\tMaxResults: 0,\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, item := range resp.Segment.BlobPrefixes {\n\t\t\tsubdir := strings.TrimPrefix(item.Name, searchPrefix)\n\t\t\tsubdir = strings.TrimSuffix(subdir, delimiter)\n\t\t\tsubdirs = append(subdirs, subdir)\n\t\t}\n\n\t\tmarker = resp.NextMarker\n\t}\n\n\tfor _, subdir := range subdirs {\n\t\tcancelableCtx, cancel := context.WithCancel(ctx)\n\t\tresult = append(result, &AZBlobBackupHandle{\n\t\t\tbs: bs,\n\t\t\tdir: strings.Join([]string{dir, subdir}, \"\/\"),\n\t\t\tname: subdir,\n\t\t\treadOnly: true,\n\t\t\tctx: cancelableCtx,\n\t\t\tcancel: cancel,\n\t\t})\n\t}\n\n\treturn result, nil\n}\n\n\/\/ StartBackup implements BackupStorage.\nfunc (bs *AZBlobBackupStorage) StartBackup(ctx context.Context, dir, name string) (backupstorage.BackupHandle, error) {\n\tcancelableCtx, cancel := context.WithCancel(ctx)\n\treturn &AZBlobBackupHandle{\n\t\tbs: bs,\n\t\tdir: dir,\n\t\tname: name,\n\t\treadOnly: false,\n\t\tctx: cancelableCtx,\n\t\tcancel: cancel,\n\t}, nil\n}\n\n\/\/ RemoveBackup implements BackupStorage.\nfunc (bs *AZBlobBackupStorage) RemoveBackup(ctx context.Context, dir, name string) error {\n\tlog.Infof(\"ListBackups: [azblob] container: %s, directory: %s\", *containerName, objName(dir, \"\"))\n\n\tcontainerURL, err := bs.containerURL()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsearchPrefix := objName(dir, name, \"\")\n\n\tfor marker := (azblob.Marker{}); marker.NotDone(); {\n\t\tresp, err := containerURL.ListBlobsHierarchySegment(ctx, marker, delimiter, azblob.ListBlobsSegmentOptions{\n\t\t\tPrefix: searchPrefix,\n\t\t\tMaxResults: 0,\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Right now there is no batch delete so we must iterate over all the blobs to delete them one by one\n\t\t\/\/ One day we will be able to use this https:\/\/docs.microsoft.com\/en-us\/rest\/api\/storageservices\/blob-batch\n\t\t\/\/ but currently it is listed as a preview and its not in the go API\n\t\tfor _, item := range resp.Segment.BlobItems {\n\t\t\t_, err := containerURL.NewBlobURL(item.Name).Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tmarker = resp.NextMarker\n\t}\n\n\t\/\/ Delete the blob representing the folder of the backup, remove any trailing slash to signify we want to remove the folder\n\t\/\/ NOTE: you must set DeleteSnapshotsOptionNone or this will error out with a server side error\n\tfor retry := 0; retry < defaultRetryCount; retry = retry + 1 {\n\t\t\/\/ Since the deletion of blob's is asyncronious we may need to wait a bit before we delete the folder\n\t\t\/\/ Also refresh the client just for good measure\n\t\ttime.Sleep(10 * time.Second)\n\t\tcontainerURL, err = bs.containerURL()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Infof(\"Removing backup directory: %v\", strings.TrimSuffix(searchPrefix, \"\/\"))\n\t\t_, err = containerURL.NewBlobURL(strings.TrimSuffix(searchPrefix, \"\/\")).Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn err\n}\n\n\/\/ Close implements BackupStorage.\nfunc (bs *AZBlobBackupStorage) Close() error {\n\t\/\/ This function is a No-op\n\treturn nil\n}\n\n\/\/ objName joins path parts into an object name.\n\/\/ Unlike path.Join, it doesn't collapse \"..\" or strip trailing slashes.\n\/\/ It also adds the value of the -azblob_backup_storage_root flag if set.\nfunc objName(parts ...string) string {\n\tif *storageRoot != \"\" {\n\t\treturn *storageRoot + \"\/\" + strings.Join(parts, \"\/\")\n\t}\n\treturn strings.Join(parts, \"\/\")\n}\n\nfunc init() {\n\tbackupstorage.BackupStorageMap[\"azblob\"] = &AZBlobBackupStorage{}\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/justinas\/alice\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nvar middleware = alice.New(logger, auth)\n\nfunc logger(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Println(\"### logger begin\")\n\t\tt1 := time.Now()\n\t\tnext.ServeHTTP(w, r)\n\t\tt2 := time.Since(t1)\n\t\tfmt.Println(\"### logger request duration\", t2)\n\t\tfmt.Println(\"### logger end\")\n\t})\n\n}\nUpdate middleware.gopackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/justinas\/alice\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nvar middleware = alice.New(logger, auth)\n\nfunc logger(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Println(\"### logger begin %v\\n\", r.URL)\n\t\tt1 := time.Now()\n\t\tnext.ServeHTTP(w, r)\n\t\tt2 := time.Since(t1)\n\t\tfmt.Println(\"### logger request duration\", t2)\n\t\tfmt.Println(\"### logger end\")\n\t})\n\n}\n<|endoftext|>"} {"text":"package lib\n\nimport \"encoding\/asn1\"\n\n\/\/ OidDescription returns a human-readable name, a short acronym from RFC1485, a snake_case slug suitable as a json key,\n\/\/ and a boolean describing whether multiple copies can appear on an X509 cert.\ntype OidDescription struct {\n\tName string\n\tShort string\n\tSlug string\n\tMultiple bool\n}\n\nfunc describeOid(oid asn1.ObjectIdentifier) OidDescription {\n\traw := oid.String()\n\t\/\/ Multiple should be true for any types that are []string in x509.pkix.Name. When in doubt, set it to true.\n\tnames := map[string]OidDescription{\n\t\t\"2.5.4.3\": {\"CommonName\", \"CN\", \"common_name\", false},\n\t\t\"2.5.4.5\": {\"EV Incorporation Registration Number\", \"\", \"ev_registration_number\", false},\n\t\t\"2.5.4.6\": {\"Country\", \"C\", \"country\", true},\n\t\t\"2.5.4.7\": {\"Locality\", \"L\", \"locality\", true},\n\t\t\"2.5.4.8\": {\"Province\", \"ST\", \"province\", true},\n\t\t\"2.5.4.9\": {\"Street\", \"\", \"street\", true},\n\t\t\"2.5.4.10\": {\"Organization\", \"O\", \"organization\", true},\n\t\t\"2.5.4.11\": {\"Organizational Unit\", \"OU\", \"organizational_unit\", true},\n\t\t\"2.5.4.15\": {\"Business Category\", \"\", \"business_category\", true},\n\t\t\"2.5.4.17\": {\"Postal Code\", \"\", \"postalcode\", true},\n\t\t\"1.2.840.113549.1.9.1\": {\"Email Address\", \"\", \"email_address\", true},\n\t\t\"1.3.6.1.4.1.311.60.2.1.1\": {\"EV Incorporation Locality\", \"\", \"ev_locality\", true},\n\t\t\"1.3.6.1.4.1.311.60.2.1.2\": {\"EV Incorporation Province\", \"\", \"ev_province\", true},\n\t\t\"1.3.6.1.4.1.311.60.2.1.3\": {\"EV Incorporation Country\", \"\", \"ev_country\", true},\n\t\t\"0.9.2342.19200300.100.1.1\": {\"User ID\", \"UID\", \"user_id\", true},\n\t}\n\tif description, ok := names[raw]; ok {\n\t\treturn description\n\t}\n\treturn OidDescription{raw, \"\", raw, true}\n}\n\nfunc oidShort(oid asn1.ObjectIdentifier) string {\n\treturn describeOid(oid).Short\n}\n\nfunc oidName(oid asn1.ObjectIdentifier) string {\n\treturn describeOid(oid).Name\n}\nAdd DomainComponent from RFC 2247package lib\n\nimport \"encoding\/asn1\"\n\n\/\/ OidDescription returns a human-readable name, a short acronym from RFC1485, a snake_case slug suitable as a json key,\n\/\/ and a boolean describing whether multiple copies can appear on an X509 cert.\ntype OidDescription struct {\n\tName string\n\tShort string\n\tSlug string\n\tMultiple bool\n}\n\nfunc describeOid(oid asn1.ObjectIdentifier) OidDescription {\n\traw := oid.String()\n\t\/\/ Multiple should be true for any types that are []string in x509.pkix.Name. When in doubt, set it to true.\n\tnames := map[string]OidDescription{\n\t\t\"2.5.4.3\": {\"CommonName\", \"CN\", \"common_name\", false},\n\t\t\"2.5.4.5\": {\"EV Incorporation Registration Number\", \"\", \"ev_registration_number\", false},\n\t\t\"2.5.4.6\": {\"Country\", \"C\", \"country\", true},\n\t\t\"2.5.4.7\": {\"Locality\", \"L\", \"locality\", true},\n\t\t\"2.5.4.8\": {\"Province\", \"ST\", \"province\", true},\n\t\t\"2.5.4.9\": {\"Street\", \"\", \"street\", true},\n\t\t\"2.5.4.10\": {\"Organization\", \"O\", \"organization\", true},\n\t\t\"2.5.4.11\": {\"Organizational Unit\", \"OU\", \"organizational_unit\", true},\n\t\t\"2.5.4.15\": {\"Business Category\", \"\", \"business_category\", true},\n\t\t\"2.5.4.17\": {\"Postal Code\", \"\", \"postalcode\", true},\n\t\t\"1.2.840.113549.1.9.1\": {\"Email Address\", \"\", \"email_address\", true},\n\t\t\"1.3.6.1.4.1.311.60.2.1.1\": {\"EV Incorporation Locality\", \"\", \"ev_locality\", true},\n\t\t\"1.3.6.1.4.1.311.60.2.1.2\": {\"EV Incorporation Province\", \"\", \"ev_province\", true},\n\t\t\"1.3.6.1.4.1.311.60.2.1.3\": {\"EV Incorporation Country\", \"\", \"ev_country\", true},\n\t\t\"0.9.2342.19200300.100.1.1\": {\"User ID\", \"UID\", \"user_id\", true},\n\t\t\"0.9.2342.19200300.100.1.25\": {\"Domain Component\", \"DC\", \"domain_component\", true},\n\t}\n\tif description, ok := names[raw]; ok {\n\t\treturn description\n\t}\n\treturn OidDescription{raw, \"\", raw, true}\n}\n\nfunc oidShort(oid asn1.ObjectIdentifier) string {\n\treturn describeOid(oid).Short\n}\n\nfunc oidName(oid asn1.ObjectIdentifier) string {\n\treturn describeOid(oid).Name\n}\n<|endoftext|>"} {"text":"package actor_test\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/AsynkronIT\/protoactor-go\/actor\"\n)\n\ntype setBehaviorActor struct{}\n\n\/\/ Receive is the default message handler when an actor is started\nfunc (f *setBehaviorActor) Receive(context actor.Context) {\n\tif msg, ok := context.Message().(string); ok && msg == \"other\" {\n\t\t\/\/ Change actor's receive message handler to Other\n\t\tcontext.SetBehavior(f.Other)\n\t}\n}\n\nfunc (f *setBehaviorActor) Other(context actor.Context) {\n\tfmt.Println(context.Message())\n}\n\n\/\/ SetBehavior allows an actor to change its Receive handler, providing basic support for state machines\nfunc ExampleContext_setBehavior() {\n\tpid := actor.Spawn(actor.FromInstance(&setBehaviorActor{}))\n\tdefer pid.Stop()\n\n\tpid.Tell(\"other\")\n\tpid.RequestFuture(\"hello from other\", 10*time.Millisecond).Wait()\n\n\t\/\/ Output: hello from other\n}\nexplicit waitpackage actor_test\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com\/AsynkronIT\/protoactor-go\/actor\"\n)\n\ntype setBehaviorActor struct {\n\tsync.WaitGroup\n}\n\n\/\/ Receive is the default message handler when an actor is started\nfunc (f *setBehaviorActor) Receive(context actor.Context) {\n\tif msg, ok := context.Message().(string); ok && msg == \"other\" {\n\t\t\/\/ Change actor's receive message handler to Other\n\t\tcontext.SetBehavior(f.Other)\n\t}\n}\n\nfunc (f *setBehaviorActor) Other(context actor.Context) {\n\tfmt.Println(context.Message())\n\tf.Done()\n}\n\n\/\/ SetBehavior allows an actor to change its Receive handler, providing basic support for state machines\nfunc ExampleContext_setBehavior() {\n\ta := &setBehaviorActor{}\n\ta.Add(1)\n\tpid := actor.Spawn(actor.FromInstance(a))\n\tdefer pid.Stop()\n\n\tpid.Tell(\"other\")\n\tpid.Tell(\"hello from other\")\n\ta.Wait()\n\n\t\/\/ Output: hello from other\n}\n<|endoftext|>"} {"text":"package mongoofficial_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"github.com\/tidepool-org\/platform\/pointer\"\n\t\"github.com\/tidepool-org\/platform\/store\/structured\/mongoofficial\"\n)\n\nvar _ = Describe(\"Config\", func() {\n\tscheme := \"mongodb+srv\"\n\taddresses := []string{\"https:\/\/1.2.3.4:5678\", \"http:\/\/a.b.c.d:9999\"}\n\ttls := false\n\tdatabase := \"tp_database\"\n\tcollectionPrefix := \"tp_collection_prefix\"\n\tusername := \"tp_username\"\n\tpassword := \"tp_password\"\n\ttimeout := time.Duration(120) * time.Second\n\toptParams := \"safe=1\"\n\n\tDescribe(\"Load\", func() {\n\t\tvar config *mongoofficial.Config\n\n\t\tBeforeEach(func() {\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_SCHEME\", scheme)).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_TLS\", fmt.Sprintf(\"%v\", tls))).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_DATABASE\", database)).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_ADDRESSES\", strings.Join(addresses, \",\"))).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_COLLECTION_PREFIX\", collectionPrefix)).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_USERNAME\", username)).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_PASSWORD\", password)).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_TIMEOUT\", fmt.Sprintf(\"%vs\", int(timeout.Seconds())))).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_OPT_PARAMS\", optParams)).To(Succeed())\n\n\t\t\tconfig = &mongoofficial.Config{}\n\t\t\tExpect(config.Load()).To(Succeed())\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_SCHEME\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_ADDRESSES\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_TLS\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_DATABASE\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_COLLECTION_PREFIX\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_USERNAME\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_PASSWORD\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_TIMEOUT\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_OPT_PARAMS\")\n\t\t})\n\n\t\tIt(\"loads scheme from environment\", func() {\n\t\t\tExpect(config.Scheme).To(Equal(scheme))\n\t\t})\n\n\t\tIt(\"loads addresses from environment\", func() {\n\t\t\tExpect(config.Addresses).To(ConsistOf(addresses))\n\t\t})\n\n\t\tIt(\"loads tls from environment\", func() {\n\t\t\tExpect(config.TLS).To(Equal(false))\n\t\t})\n\n\t\tIt(\"sets tls to 'true' if not found in env\", func() {\n\t\t\tExpect(os.Unsetenv(\"TIDEPOOL_STORE_TLS\")).To(Succeed())\n\t\t\tconfig = &mongoofficial.Config{}\n\t\t\tExpect(config.Load()).To(Succeed())\n\t\t\tExpect(config.TLS).To(Equal(true))\n\t\t})\n\n\t\tIt(\"loads database from environment\", func() {\n\t\t\tExpect(config.Database).To(Equal(database))\n\t\t})\n\n\t\tIt(\"loads collection prefix from environment\", func() {\n\t\t\tExpect(config.CollectionPrefix).To(Equal(collectionPrefix))\n\t\t})\n\n\t\tIt(\"loads username from environment\", func() {\n\t\t\tExpect(config.Username).ToNot(BeNil())\n\t\t\tExpect(*config.Username).To(Equal(username))\n\t\t})\n\n\t\tIt(\"loads password from environment\", func() {\n\t\t\tExpect(config.Password).ToNot(BeNil())\n\t\t\tExpect(*config.Password).To(Equal(password))\n\t\t})\n\n\t\tIt(\"loads timeout from environment\", func() {\n\t\t\tExpect(config.Timeout).To(Equal(timeout))\n\t\t})\n\n\t\tIt(\"uses default timeout of 60 seconds if timeout not found in env\", func() {\n\t\t\tExpect(os.Unsetenv(\"TIDEPOOL_STORE_TIMEOUT\")).To(Succeed())\n\t\t\tconfig = &mongoofficial.Config{}\n\t\t\tExpect(config.Load()).To(Succeed())\n\t\t\tExpect(config.Timeout).To(Equal(time.Second * time.Duration(60)))\n\t\t})\n\n\t\tIt(\"loads optional params from environment\", func() {\n\t\t\tExpect(config.OptParams).ToNot(BeNil())\n\t\t\tExpect(*config.OptParams).To(Equal(optParams))\n\t\t})\n\t})\n\n\tContext(\"Validate\", func() {\n\t\tvar config *mongoofficial.Config\n\n\t\tBeforeEach(func() {\n\t\t\tconfig = &mongoofficial.Config{\n\t\t\t\tAddresses: []string{\"www.mongo.com:4321\"},\n\t\t\t\tTLS: tls,\n\t\t\t\tDatabase: database,\n\t\t\t\tCollectionPrefix: collectionPrefix,\n\t\t\t\tUsername: pointer.FromString(username),\n\t\t\t\tPassword: pointer.FromString(password),\n\t\t\t\tTimeout: timeout,\n\t\t\t\tOptParams: nil,\n\t\t\t}\n\t\t})\n\n\t\tIt(\"return success if all are valid\", func() {\n\t\t\tExpect(config.Validate()).To(Succeed())\n\t\t})\n\n\t\tIt(\"returns an error if the addresses is nil\", func() {\n\t\t\tconfig.Addresses = nil\n\t\t\tExpect(config.Validate()).To(MatchError(\"addresses is missing\"))\n\t\t})\n\n\t\tIt(\"returns an error if the addresses is empty\", func() {\n\t\t\tconfig.Addresses = []string{}\n\t\t\tExpect(config.Validate()).To(MatchError(\"addresses is missing\"))\n\t\t})\n\n\t\tIt(\"returns an error if one of the addresses is missing\", func() {\n\t\t\tconfig.Addresses = []string{\"\"}\n\t\t\tExpect(config.Validate()).To(MatchError(\"address is missing\"))\n\t\t})\n\n\t\tIt(\"returns an error if one of the addresses is not a parseable URL\", func() {\n\t\t\tconfig.Addresses = []string{\"Not%Parseable\"}\n\t\t\tExpect(config.Validate()).To(MatchError(\"address is invalid\"))\n\t\t})\n\n\t\tIt(\"returns an error if the database is missing\", func() {\n\t\t\tconfig.Database = \"\"\n\t\t\tExpect(config.Validate()).To(MatchError(\"database is missing\"))\n\t\t})\n\n\t\tIt(\"returns success if the username is not specified\", func() {\n\t\t\tconfig.Username = nil\n\t\t\tExpect(config.Validate()).To(Succeed())\n\t\t})\n\n\t\tIt(\"returns success if the password is not specified\", func() {\n\t\t\tconfig.Password = nil\n\t\t\tExpect(config.Validate()).To(Succeed())\n\t\t})\n\n\t\tIt(\"returns an error if the timeout is invalid\", func() {\n\t\t\tconfig.Timeout = 0\n\t\t\tExpect(config.Validate()).To(MatchError(\"timeout is invalid\"))\n\t\t})\n\t})\n})\nRestore previous values of env vars in testpackage mongoofficial_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"github.com\/tidepool-org\/platform\/pointer\"\n\t\"github.com\/tidepool-org\/platform\/store\/structured\/mongoofficial\"\n)\n\nvar _ = Describe(\"Config\", func() {\n\tscheme := \"mongodb+srv\"\n\taddresses := []string{\"https:\/\/1.2.3.4:5678\", \"http:\/\/a.b.c.d:9999\"}\n\ttls := false\n\tdatabase := \"tp_database\"\n\tcollectionPrefix := \"tp_collection_prefix\"\n\tusername := \"tp_username\"\n\tpassword := \"tp_password\"\n\ttimeout := time.Duration(120) * time.Second\n\toptParams := \"replicaSet=Cluster0-shard-0&authSource=admin&w=majority\"\n\n\tDescribe(\"Load\", func() {\n\t\tvar config *mongoofficial.Config\n\t\tvar variables = []string{\n\t\t\t\"TIDEPOOL_STORE_SCHEME\",\n\t\t\t\"TIDEPOOL_STORE_TLS\",\n\t\t\t\"TIDEPOOL_STORE_DATABASE\",\n\t\t\t\"TIDEPOOL_STORE_ADDRESSES\",\n\t\t\t\"TIDEPOOL_STORE_COLLECTION_PREFIX\",\n\t\t\t\"TIDEPOOL_STORE_USERNAME\",\n\t\t\t\"TIDEPOOL_STORE_PASSWORD\",\n\t\t\t\"TIDEPOOL_STORE_TIMEOUT\",\n\t\t\t\"TIDEPOOL_STORE_OPT_PARAMS\",\n\t\t}\n\t\tvar existingEnvVars map[string]string\n\n\t\tBeforeEach(func() {\n\t\t\texistingEnvVars = make(map[string]string)\n\t\t\tfor _, v := range variables {\n\t\t\t\texistingEnvVars[v] = os.Getenv(v)\n\t\t\t}\n\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_SCHEME\", scheme)).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_TLS\", fmt.Sprintf(\"%v\", tls))).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_DATABASE\", database)).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_ADDRESSES\", strings.Join(addresses, \",\"))).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_COLLECTION_PREFIX\", collectionPrefix)).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_USERNAME\", username)).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_PASSWORD\", password)).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_TIMEOUT\", fmt.Sprintf(\"%vs\", int(timeout.Seconds())))).To(Succeed())\n\t\t\tExpect(os.Setenv(\"TIDEPOOL_STORE_OPT_PARAMS\", optParams)).To(Succeed())\n\n\t\t\tconfig = &mongoofficial.Config{}\n\t\t\tExpect(config.Load()).To(Succeed())\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\texistingEnvVars = make(map[string]string)\n\t\t\tfor _, v := range variables {\n\t\t\t\t_ = os.Setenv(v, existingEnvVars[v])\n\t\t\t}\n\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_SCHEME\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_ADDRESSES\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_TLS\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_DATABASE\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_COLLECTION_PREFIX\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_USERNAME\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_PASSWORD\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_TIMEOUT\")\n\t\t\t_ = os.Unsetenv(\"TIDEPOOL_STORE_OPT_PARAMS\")\n\t\t})\n\n\t\tIt(\"loads scheme from environment\", func() {\n\t\t\tExpect(config.Scheme).To(Equal(scheme))\n\t\t})\n\n\t\tIt(\"loads addresses from environment\", func() {\n\t\t\tExpect(config.Addresses).To(ConsistOf(addresses))\n\t\t})\n\n\t\tIt(\"loads tls from environment\", func() {\n\t\t\tExpect(config.TLS).To(Equal(tls))\n\t\t})\n\n\t\tIt(\"sets tls to 'true' if not found in env\", func() {\n\t\t\tExpect(os.Unsetenv(\"TIDEPOOL_STORE_TLS\")).To(Succeed())\n\t\t\tconfig = &mongoofficial.Config{}\n\t\t\tExpect(config.Load()).To(Succeed())\n\t\t\tExpect(config.TLS).To(Equal(true))\n\t\t})\n\n\t\tIt(\"loads database from environment\", func() {\n\t\t\tExpect(config.Database).To(Equal(database))\n\t\t})\n\n\t\tIt(\"loads collection prefix from environment\", func() {\n\t\t\tExpect(config.CollectionPrefix).To(Equal(collectionPrefix))\n\t\t})\n\n\t\tIt(\"loads username from environment\", func() {\n\t\t\tExpect(config.Username).ToNot(BeNil())\n\t\t\tExpect(*config.Username).To(Equal(username))\n\t\t})\n\n\t\tIt(\"loads password from environment\", func() {\n\t\t\tExpect(config.Password).ToNot(BeNil())\n\t\t\tExpect(*config.Password).To(Equal(password))\n\t\t})\n\n\t\tIt(\"loads timeout from environment\", func() {\n\t\t\tExpect(config.Timeout).To(Equal(timeout))\n\t\t})\n\n\t\tIt(\"uses default timeout of 60 seconds if timeout not found in env\", func() {\n\t\t\tExpect(os.Unsetenv(\"TIDEPOOL_STORE_TIMEOUT\")).To(Succeed())\n\t\t\tconfig = &mongoofficial.Config{}\n\t\t\tExpect(config.Load()).To(Succeed())\n\t\t\tExpect(config.Timeout).To(Equal(time.Second * time.Duration(60)))\n\t\t})\n\n\t\tIt(\"loads optional params from environment\", func() {\n\t\t\tExpect(config.OptParams).ToNot(BeNil())\n\t\t\tExpect(*config.OptParams).To(Equal(optParams))\n\t\t})\n\t})\n\n\tContext(\"Validate\", func() {\n\t\tvar config *mongoofficial.Config\n\n\t\tBeforeEach(func() {\n\t\t\tconfig = &mongoofficial.Config{\n\t\t\t\tAddresses: []string{\"www.mongo.com:4321\"},\n\t\t\t\tTLS: tls,\n\t\t\t\tDatabase: database,\n\t\t\t\tCollectionPrefix: collectionPrefix,\n\t\t\t\tUsername: pointer.FromString(username),\n\t\t\t\tPassword: pointer.FromString(password),\n\t\t\t\tTimeout: timeout,\n\t\t\t\tOptParams: nil,\n\t\t\t}\n\t\t})\n\n\t\tIt(\"return success if all are valid\", func() {\n\t\t\tExpect(config.Validate()).To(Succeed())\n\t\t})\n\n\t\tIt(\"returns an error if the addresses is nil\", func() {\n\t\t\tconfig.Addresses = nil\n\t\t\tExpect(config.Validate()).To(MatchError(\"addresses is missing\"))\n\t\t})\n\n\t\tIt(\"returns an error if the addresses is empty\", func() {\n\t\t\tconfig.Addresses = []string{}\n\t\t\tExpect(config.Validate()).To(MatchError(\"addresses is missing\"))\n\t\t})\n\n\t\tIt(\"returns an error if one of the addresses is missing\", func() {\n\t\t\tconfig.Addresses = []string{\"\"}\n\t\t\tExpect(config.Validate()).To(MatchError(\"address is missing\"))\n\t\t})\n\n\t\tIt(\"returns an error if one of the addresses is not a parseable URL\", func() {\n\t\t\tconfig.Addresses = []string{\"Not%Parseable\"}\n\t\t\tExpect(config.Validate()).To(MatchError(\"address is invalid\"))\n\t\t})\n\n\t\tIt(\"returns an error if the database is missing\", func() {\n\t\t\tconfig.Database = \"\"\n\t\t\tExpect(config.Validate()).To(MatchError(\"database is missing\"))\n\t\t})\n\n\t\tIt(\"returns success if the username is not specified\", func() {\n\t\t\tconfig.Username = nil\n\t\t\tExpect(config.Validate()).To(Succeed())\n\t\t})\n\n\t\tIt(\"returns success if the password is not specified\", func() {\n\t\t\tconfig.Password = nil\n\t\t\tExpect(config.Validate()).To(Succeed())\n\t\t})\n\n\t\tIt(\"returns an error if the timeout is invalid\", func() {\n\t\t\tconfig.Timeout = 0\n\t\t\tExpect(config.Validate()).To(MatchError(\"timeout is invalid\"))\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"package instana\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/opentracing\/basictracer-go\"\n\text \"github.com\/opentracing\/opentracing-go\/ext\"\n)\n\ntype SpanRecorder struct {\n\tsync.RWMutex\n\tspans []Span\n\ttestMode bool\n}\n\ntype Span struct {\n\tTraceID uint64 `json:\"t\"`\n\tParentID *uint64 `json:\"p,omitempty\"`\n\tSpanID uint64 `json:\"s\"`\n\tTimestamp uint64 `json:\"ts\"`\n\tDuration uint64 `json:\"d\"`\n\tName string `json:\"n\"`\n\tFrom *FromS `json:\"f\"`\n\tData interface{} `json:\"data\"`\n}\n\n\/\/ NewRecorder Establish a new span recorder\nfunc NewRecorder() *SpanRecorder {\n\tr := new(SpanRecorder)\n\tr.init()\n\treturn r\n}\n\n\/\/ NewTestRecorder Establish a new span recorder used for testing\nfunc NewTestRecorder() *SpanRecorder {\n\tr := new(SpanRecorder)\n\tr.testMode = true\n\tr.init()\n\treturn r\n}\n\n\/\/ GetSpans returns a copy of the array of spans accumulated so far.\nfunc (r *SpanRecorder) GetSpans() []Span {\n\tr.RLock()\n\tdefer r.RUnlock()\n\tspans := make([]Span, len(r.spans))\n\tcopy(spans, r.spans)\n\treturn spans\n}\n\nfunc getTag(rawSpan basictracer.RawSpan, tag string) interface{} {\n\tvar x, ok = rawSpan.Tags[tag]\n\tif !ok {\n\t\tx = \"\"\n\t}\n\treturn x\n}\n\nfunc getIntTag(rawSpan basictracer.RawSpan, tag string) int {\n\td := rawSpan.Tags[tag]\n\tif d == nil {\n\t\treturn -1\n\t}\n\n\tr, ok := d.(int)\n\tif !ok {\n\t\treturn -1\n\t}\n\n\treturn r\n}\n\nfunc getStringTag(rawSpan basictracer.RawSpan, tag string) string {\n\td := rawSpan.Tags[tag]\n\tif d == nil {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprint(d)\n}\n\nfunc getHostName(rawSpan basictracer.RawSpan) string {\n\thostTag := getStringTag(rawSpan, string(ext.PeerHostname))\n\tif hostTag != \"\" {\n\t\treturn hostTag\n\t}\n\n\th, err := os.Hostname()\n\tif err != nil {\n\t\th = \"localhost\"\n\t}\n\n\treturn h\n}\n\nfunc getServiceName(rawSpan basictracer.RawSpan) string {\n\t\/\/ ServiceName can be determined from multiple sources and has\n\t\/\/ the following priority (preferred first):\n\t\/\/ 1. If added to the span via the OT component tag\n\t\/\/ 2. If added to the span via the OT http.url tag\n\t\/\/ 3. Specified in the tracer instantiation via Service option\n\tcomponent := getStringTag(rawSpan, string(ext.Component))\n\n\tif len(component) > 0 {\n\t\treturn component\n\t} else if len(component) == 0 {\n\t\thttpURL := getStringTag(rawSpan, string(ext.HTTPUrl))\n\n\t\tif len(httpURL) > 0 {\n\t\t\treturn httpURL\n\t\t}\n\t}\n\treturn sensor.serviceName\n}\n\nfunc getSpanKind(rawSpan basictracer.RawSpan) string {\n\tkind := getStringTag(rawSpan, string(ext.SpanKind))\n\n\tswitch kind {\n\tcase string(ext.SpanKindRPCServerEnum), \"consumer\", \"entry\":\n\t\treturn \"entry\"\n\tcase string(ext.SpanKindRPCClientEnum), \"producer\", \"exit\":\n\t\treturn \"exit\"\n\t}\n\treturn \"\"\n}\n\nfunc collectLogs(rawSpan basictracer.RawSpan) map[uint64]map[string]interface{} {\n\tlogs := make(map[uint64]map[string]interface{})\n\tfor _, l := range rawSpan.Logs {\n\t\tif _, ok := logs[uint64(l.Timestamp.UnixNano())\/uint64(time.Millisecond)]; !ok {\n\t\t\tlogs[uint64(l.Timestamp.UnixNano())\/uint64(time.Millisecond)] = make(map[string]interface{})\n\t\t}\n\n\t\tfor _, f := range l.Fields {\n\t\t\tlogs[uint64(l.Timestamp.UnixNano())\/uint64(time.Millisecond)][f.Key()] = f.Value()\n\t\t}\n\t}\n\n\treturn logs\n}\n\nfunc (r *SpanRecorder) init() {\n\tr.reset()\n\n\tif r.testMode {\n\t\tlog.debug(\"Recorder in test mode. Not reporting spans to the backend.\")\n\t} else {\n\t\tticker := time.NewTicker(1 * time.Second)\n\t\tgo func() {\n\t\t\tfor range ticker.C {\n\t\t\t\tlog.debug(\"Sending spans to agent\", len(r.spans))\n\n\t\t\t\tr.send()\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (r *SpanRecorder) reset() {\n\tr.Lock()\n\tdefer r.Unlock()\n\tr.spans = make([]Span, 0, sensor.options.MaxBufferedSpans)\n}\n\nfunc (r *SpanRecorder) RecordSpan(rawSpan basictracer.RawSpan) {\n\tvar data = &Data{}\n\tkind := getSpanKind(rawSpan)\n\n\tdata.SDK = &SDKData{\n\t\tName: rawSpan.Operation,\n\t\tType: kind,\n\t\tCustom: &CustomData{Tags: rawSpan.Tags, Logs: collectLogs(rawSpan)}}\n\n\tbaggage := make(map[string]string)\n\trawSpan.Context.ForeachBaggageItem(func(k string, v string) bool {\n\t\tbaggage[k] = v\n\n\t\treturn true\n\t})\n\n\tif len(baggage) > 0 {\n\t\tdata.SDK.Custom.Baggage = baggage\n\t}\n\n\tdata.Service = getServiceName(rawSpan)\n\n\tvar parentID *uint64\n\tif rawSpan.ParentSpanID == 0 {\n\t\tparentID = nil\n\t} else {\n\t\tparentID = &rawSpan.ParentSpanID\n\t}\n\n\tr.Lock()\n\tdefer r.Unlock()\n\n\tif len(r.spans) == sensor.options.MaxBufferedSpans {\n\t\tr.spans = r.spans[1:]\n\t}\n\n\tr.spans = append(r.spans, Span{\n\t\tTraceID: rawSpan.Context.TraceID,\n\t\tParentID: parentID,\n\t\tSpanID: rawSpan.Context.SpanID,\n\t\tTimestamp: uint64(rawSpan.Start.UnixNano()) \/ uint64(time.Millisecond),\n\t\tDuration: uint64(rawSpan.Duration) \/ uint64(time.Millisecond),\n\t\tName: \"sdk\",\n\t\tFrom: sensor.agent.from,\n\t\tData: &data})\n\n\tif !r.testMode && (len(r.spans) == sensor.options.ForceTransmissionStartingAt) {\n\t\tlog.debug(\"Forcing spans to agent\", len(r.spans))\n\n\t\tr.send()\n\t}\n}\n\nfunc (r *SpanRecorder) send() {\n\tif sensor.agent.canSend() && !r.testMode {\n\t\tgo func() {\n\t\t\t_, err := sensor.agent.request(sensor.agent.makeURL(AgentTracesURL), \"POST\", r.spans)\n\n\t\t\tr.reset()\n\n\t\t\tif err != nil {\n\t\t\t\tsensor.agent.reset()\n\t\t\t}\n\t\t}()\n\t}\n}\nDon't queue spans if we're not ready\/announcedpackage instana\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/opentracing\/basictracer-go\"\n\text \"github.com\/opentracing\/opentracing-go\/ext\"\n)\n\ntype SpanRecorder struct {\n\tsync.RWMutex\n\tspans []Span\n\ttestMode bool\n}\n\ntype Span struct {\n\tTraceID uint64 `json:\"t\"`\n\tParentID *uint64 `json:\"p,omitempty\"`\n\tSpanID uint64 `json:\"s\"`\n\tTimestamp uint64 `json:\"ts\"`\n\tDuration uint64 `json:\"d\"`\n\tName string `json:\"n\"`\n\tFrom *FromS `json:\"f\"`\n\tData interface{} `json:\"data\"`\n}\n\n\/\/ NewRecorder Establish a new span recorder\nfunc NewRecorder() *SpanRecorder {\n\tr := new(SpanRecorder)\n\tr.init()\n\treturn r\n}\n\n\/\/ NewTestRecorder Establish a new span recorder used for testing\nfunc NewTestRecorder() *SpanRecorder {\n\tr := new(SpanRecorder)\n\tr.testMode = true\n\tr.init()\n\treturn r\n}\n\n\/\/ GetSpans returns a copy of the array of spans accumulated so far.\nfunc (r *SpanRecorder) GetSpans() []Span {\n\tr.RLock()\n\tdefer r.RUnlock()\n\tspans := make([]Span, len(r.spans))\n\tcopy(spans, r.spans)\n\treturn spans\n}\n\nfunc getTag(rawSpan basictracer.RawSpan, tag string) interface{} {\n\tvar x, ok = rawSpan.Tags[tag]\n\tif !ok {\n\t\tx = \"\"\n\t}\n\treturn x\n}\n\nfunc getIntTag(rawSpan basictracer.RawSpan, tag string) int {\n\td := rawSpan.Tags[tag]\n\tif d == nil {\n\t\treturn -1\n\t}\n\n\tr, ok := d.(int)\n\tif !ok {\n\t\treturn -1\n\t}\n\n\treturn r\n}\n\nfunc getStringTag(rawSpan basictracer.RawSpan, tag string) string {\n\td := rawSpan.Tags[tag]\n\tif d == nil {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprint(d)\n}\n\nfunc getHostName(rawSpan basictracer.RawSpan) string {\n\thostTag := getStringTag(rawSpan, string(ext.PeerHostname))\n\tif hostTag != \"\" {\n\t\treturn hostTag\n\t}\n\n\th, err := os.Hostname()\n\tif err != nil {\n\t\th = \"localhost\"\n\t}\n\n\treturn h\n}\n\nfunc getServiceName(rawSpan basictracer.RawSpan) string {\n\t\/\/ ServiceName can be determined from multiple sources and has\n\t\/\/ the following priority (preferred first):\n\t\/\/ 1. If added to the span via the OT component tag\n\t\/\/ 2. If added to the span via the OT http.url tag\n\t\/\/ 3. Specified in the tracer instantiation via Service option\n\tcomponent := getStringTag(rawSpan, string(ext.Component))\n\n\tif len(component) > 0 {\n\t\treturn component\n\t} else if len(component) == 0 {\n\t\thttpURL := getStringTag(rawSpan, string(ext.HTTPUrl))\n\n\t\tif len(httpURL) > 0 {\n\t\t\treturn httpURL\n\t\t}\n\t}\n\treturn sensor.serviceName\n}\n\nfunc getSpanKind(rawSpan basictracer.RawSpan) string {\n\tkind := getStringTag(rawSpan, string(ext.SpanKind))\n\n\tswitch kind {\n\tcase string(ext.SpanKindRPCServerEnum), \"consumer\", \"entry\":\n\t\treturn \"entry\"\n\tcase string(ext.SpanKindRPCClientEnum), \"producer\", \"exit\":\n\t\treturn \"exit\"\n\t}\n\treturn \"\"\n}\n\nfunc collectLogs(rawSpan basictracer.RawSpan) map[uint64]map[string]interface{} {\n\tlogs := make(map[uint64]map[string]interface{})\n\tfor _, l := range rawSpan.Logs {\n\t\tif _, ok := logs[uint64(l.Timestamp.UnixNano())\/uint64(time.Millisecond)]; !ok {\n\t\t\tlogs[uint64(l.Timestamp.UnixNano())\/uint64(time.Millisecond)] = make(map[string]interface{})\n\t\t}\n\n\t\tfor _, f := range l.Fields {\n\t\t\tlogs[uint64(l.Timestamp.UnixNano())\/uint64(time.Millisecond)][f.Key()] = f.Value()\n\t\t}\n\t}\n\n\treturn logs\n}\n\nfunc (r *SpanRecorder) init() {\n\tr.reset()\n\n\tif r.testMode {\n\t\tlog.debug(\"Recorder in test mode. Not reporting spans to the backend.\")\n\t} else {\n\t\tticker := time.NewTicker(1 * time.Second)\n\t\tgo func() {\n\t\t\tfor range ticker.C {\n\t\t\t\tlog.debug(\"Sending spans to agent\", len(r.spans))\n\n\t\t\t\tr.send()\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (r *SpanRecorder) reset() {\n\tr.Lock()\n\tdefer r.Unlock()\n\tr.spans = make([]Span, 0, sensor.options.MaxBufferedSpans)\n}\n\nfunc (r *SpanRecorder) RecordSpan(rawSpan basictracer.RawSpan) {\n\t\/\/ If we're not announced and not in test mode then just\n\t\/\/ return\n\tif !r.testMode && !sensor.agent.canSend() {\n\t\treturn\n\t}\n\n\tvar data = &Data{}\n\tkind := getSpanKind(rawSpan)\n\n\tdata.SDK = &SDKData{\n\t\tName: rawSpan.Operation,\n\t\tType: kind,\n\t\tCustom: &CustomData{Tags: rawSpan.Tags, Logs: collectLogs(rawSpan)}}\n\n\tbaggage := make(map[string]string)\n\trawSpan.Context.ForeachBaggageItem(func(k string, v string) bool {\n\t\tbaggage[k] = v\n\n\t\treturn true\n\t})\n\n\tif len(baggage) > 0 {\n\t\tdata.SDK.Custom.Baggage = baggage\n\t}\n\n\tdata.Service = getServiceName(rawSpan)\n\n\tvar parentID *uint64\n\tif rawSpan.ParentSpanID == 0 {\n\t\tparentID = nil\n\t} else {\n\t\tparentID = &rawSpan.ParentSpanID\n\t}\n\n\tr.Lock()\n\tdefer r.Unlock()\n\n\tif len(r.spans) == sensor.options.MaxBufferedSpans {\n\t\tr.spans = r.spans[1:]\n\t}\n\n\tr.spans = append(r.spans, Span{\n\t\tTraceID: rawSpan.Context.TraceID,\n\t\tParentID: parentID,\n\t\tSpanID: rawSpan.Context.SpanID,\n\t\tTimestamp: uint64(rawSpan.Start.UnixNano()) \/ uint64(time.Millisecond),\n\t\tDuration: uint64(rawSpan.Duration) \/ uint64(time.Millisecond),\n\t\tName: \"sdk\",\n\t\tFrom: sensor.agent.from,\n\t\tData: &data})\n\n\tif !r.testMode && (len(r.spans) == sensor.options.ForceTransmissionStartingAt) {\n\t\tlog.debug(\"Forcing spans to agent\", len(r.spans))\n\n\t\tr.send()\n\t}\n}\n\nfunc (r *SpanRecorder) send() {\n\tif sensor.agent.canSend() && !r.testMode {\n\t\tgo func() {\n\t\t\t_, err := sensor.agent.request(sensor.agent.makeURL(AgentTracesURL), \"POST\", r.spans)\n\n\t\t\tr.reset()\n\n\t\t\tif err != nil {\n\t\t\t\tsensor.agent.reset()\n\t\t\t}\n\t\t}()\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2014 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License. See the AUTHORS file\n\/\/ for names of contributors.\n\/\/\n\/\/ Author: Spencer Kimball (spencer.kimball@gmail.com)\n\npackage rpc\n\nimport (\n\t\"github.com\/cockroachdb\/cockroach\/proto\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/hlc\"\n)\n\n\/\/ A HeartbeatService exposes a method to echo its request params. It doubles\n\/\/ as a way to measure the offset of the server from other nodes. It uses the\n\/\/ clock to return the server time every heartbeat. It also keeps track of\n\/\/ remote clocks sent to it by storing them in the remoteClockMonitor.\ntype HeartbeatService struct {\n\t\/\/ Provides the nanosecond unix epoch timestamp of the processor.\n\tclock *hlc.Clock\n\t\/\/ A pointer to the RemoteClockMonitor configured in the RPC Context,\n\t\/\/ shared by rpc clients, to keep track of remote clock measurements.\n\tremoteClockMonitor *RemoteClockMonitor\n}\n\n\/\/ Ping echos the contents of the request to the response, and returns the\n\/\/ server's current clock value, allowing the requester to measure its clock.\n\/\/ The reqeuster should also an estimate of their offset from this server along\n\/\/ with their address.\nfunc (hs *HeartbeatService) Ping(args *proto.PingRequest, reply *proto.PingResponse) error {\n\treply.Pong = args.Ping\n\tserverOffset := args.Offset\n\t\/\/ The server offset should be the opposite of the client offset.\n\tserverOffset.Offset = -serverOffset.Offset\n\ths.remoteClockMonitor.UpdateOffset(args.Addr, serverOffset)\n\treply.ServerTime = hs.clock.PhysicalNow()\n\treturn nil\n}\n\n\/\/ A ManualHeartbeatService allows manual control of when heartbeats occur, to\n\/\/ facilitate testing.\ntype ManualHeartbeatService struct {\n\tclock *hlc.Clock\n\tremoteClockMonitor *RemoteClockMonitor\n\t\/\/ Heartbeats are processed when a value is sent here.\n\tready chan struct{}\n}\n\n\/\/ Ping waits until the heartbeat service is ready to respond to a Heartbeat.\nfunc (mhs *ManualHeartbeatService) Ping(args *proto.PingRequest, reply *proto.PingResponse) error {\n\t<-mhs.ready\n\ths := HeartbeatService{\n\t\tclock: mhs.clock,\n\t\tremoteClockMonitor: mhs.remoteClockMonitor,\n\t}\n\treturn hs.Ping(args, reply)\n}\nFix a tiny grammar mistake in Ping()'s comment\/\/ Copyright 2014 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License. See the AUTHORS file\n\/\/ for names of contributors.\n\/\/\n\/\/ Author: Spencer Kimball (spencer.kimball@gmail.com)\n\npackage rpc\n\nimport (\n\t\"github.com\/cockroachdb\/cockroach\/proto\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/hlc\"\n)\n\n\/\/ A HeartbeatService exposes a method to echo its request params. It doubles\n\/\/ as a way to measure the offset of the server from other nodes. It uses the\n\/\/ clock to return the server time every heartbeat. It also keeps track of\n\/\/ remote clocks sent to it by storing them in the remoteClockMonitor.\ntype HeartbeatService struct {\n\t\/\/ Provides the nanosecond unix epoch timestamp of the processor.\n\tclock *hlc.Clock\n\t\/\/ A pointer to the RemoteClockMonitor configured in the RPC Context,\n\t\/\/ shared by rpc clients, to keep track of remote clock measurements.\n\tremoteClockMonitor *RemoteClockMonitor\n}\n\n\/\/ Ping echos the contents of the request to the response, and returns the\n\/\/ server's current clock value, allowing the requester to measure its clock.\n\/\/ The requester should also estimate its offset from this server along\n\/\/ with the requester's address.\nfunc (hs *HeartbeatService) Ping(args *proto.PingRequest, reply *proto.PingResponse) error {\n\treply.Pong = args.Ping\n\tserverOffset := args.Offset\n\t\/\/ The server offset should be the opposite of the client offset.\n\tserverOffset.Offset = -serverOffset.Offset\n\ths.remoteClockMonitor.UpdateOffset(args.Addr, serverOffset)\n\treply.ServerTime = hs.clock.PhysicalNow()\n\treturn nil\n}\n\n\/\/ A ManualHeartbeatService allows manual control of when heartbeats occur, to\n\/\/ facilitate testing.\ntype ManualHeartbeatService struct {\n\tclock *hlc.Clock\n\tremoteClockMonitor *RemoteClockMonitor\n\t\/\/ Heartbeats are processed when a value is sent here.\n\tready chan struct{}\n}\n\n\/\/ Ping waits until the heartbeat service is ready to respond to a Heartbeat.\nfunc (mhs *ManualHeartbeatService) Ping(args *proto.PingRequest, reply *proto.PingResponse) error {\n\t<-mhs.ready\n\ths := HeartbeatService{\n\t\tclock: mhs.clock,\n\t\tremoteClockMonitor: mhs.remoteClockMonitor,\n\t}\n\treturn hs.Ping(args, reply)\n}\n<|endoftext|>"} {"text":"package rpc\n\nimport (\n\t\"encoding\/json\"\n\t\/\/ \"fmt\"\n\t\"math\/big\"\n\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\t\"github.com\/ethereum\/go-ethereum\/core\/state\"\n\t\"github.com\/ethereum\/go-ethereum\/core\/types\"\n)\n\ntype BlockRes struct {\n\tfullTx bool\n\n\tBlockNumber *big.Int `json:\"number\"`\n\tBlockHash common.Hash `json:\"hash\"`\n\tParentHash common.Hash `json:\"parentHash\"`\n\tNonce [8]byte `json:\"nonce\"`\n\tSha3Uncles common.Hash `json:\"sha3Uncles\"`\n\tLogsBloom types.Bloom `json:\"logsBloom\"`\n\tTransactionRoot common.Hash `json:\"transactionRoot\"`\n\tStateRoot common.Hash `json:\"stateRoot\"`\n\tMiner common.Address `json:\"miner\"`\n\tDifficulty *big.Int `json:\"difficulty\"`\n\tTotalDifficulty *big.Int `json:\"totalDifficulty\"`\n\tSize *big.Int `json:\"size\"`\n\tExtraData []byte `json:\"extraData\"`\n\tGasLimit *big.Int `json:\"gasLimit\"`\n\tMinGasPrice int64 `json:\"minGasPrice\"`\n\tGasUsed *big.Int `json:\"gasUsed\"`\n\tUnixTimestamp int64 `json:\"timestamp\"`\n\tTransactions []*TransactionRes `json:\"transactions\"`\n\tUncles []common.Hash `json:\"uncles\"`\n}\n\nfunc (b *BlockRes) MarshalJSON() ([]byte, error) {\n\tvar ext struct {\n\t\tBlockNumber string `json:\"number\"`\n\t\tBlockHash string `json:\"hash\"`\n\t\tParentHash string `json:\"parentHash\"`\n\t\tNonce string `json:\"nonce\"`\n\t\tSha3Uncles string `json:\"sha3Uncles\"`\n\t\tLogsBloom string `json:\"logsBloom\"`\n\t\tTransactionRoot string `json:\"transactionRoot\"`\n\t\tStateRoot string `json:\"stateRoot\"`\n\t\tMiner string `json:\"miner\"`\n\t\tDifficulty string `json:\"difficulty\"`\n\t\tTotalDifficulty string `json:\"totalDifficulty\"`\n\t\tSize string `json:\"size\"`\n\t\tExtraData string `json:\"extraData\"`\n\t\tGasLimit string `json:\"gasLimit\"`\n\t\tMinGasPrice string `json:\"minGasPrice\"`\n\t\tGasUsed string `json:\"gasUsed\"`\n\t\tUnixTimestamp string `json:\"timestamp\"`\n\t\tTransactions []interface{} `json:\"transactions\"`\n\t\tUncles []string `json:\"uncles\"`\n\t}\n\n\t\/\/ convert strict types to hexified strings\n\text.BlockNumber = common.ToHex(b.BlockNumber.Bytes())\n\text.BlockHash = b.BlockHash.Hex()\n\text.ParentHash = b.ParentHash.Hex()\n\text.Nonce = common.ToHex(b.Nonce[:])\n\text.Sha3Uncles = b.Sha3Uncles.Hex()\n\text.LogsBloom = common.ToHex(b.LogsBloom[:])\n\text.TransactionRoot = b.TransactionRoot.Hex()\n\text.StateRoot = b.StateRoot.Hex()\n\text.Miner = b.Miner.Hex()\n\text.Difficulty = common.ToHex(b.Difficulty.Bytes())\n\text.TotalDifficulty = common.ToHex(b.TotalDifficulty.Bytes())\n\text.Size = common.ToHex(b.Size.Bytes())\n\t\/\/ ext.ExtraData = common.ToHex(b.ExtraData)\n\text.GasLimit = common.ToHex(b.GasLimit.Bytes())\n\t\/\/ ext.MinGasPrice = common.ToHex(big.NewInt(b.MinGasPrice).Bytes())\n\text.GasUsed = common.ToHex(b.GasUsed.Bytes())\n\text.UnixTimestamp = common.ToHex(big.NewInt(b.UnixTimestamp).Bytes())\n\text.Transactions = make([]interface{}, len(b.Transactions))\n\tif b.fullTx {\n\t\tfor i, tx := range b.Transactions {\n\t\t\text.Transactions[i] = tx\n\t\t}\n\t} else {\n\t\tfor i, tx := range b.Transactions {\n\t\t\text.Transactions[i] = tx.Hash.Hex()\n\t\t}\n\t}\n\text.Uncles = make([]string, len(b.Uncles))\n\tfor i, v := range b.Uncles {\n\t\text.Uncles[i] = v.Hex()\n\t}\n\n\treturn json.Marshal(ext)\n}\n\nfunc NewBlockRes(block *types.Block) *BlockRes {\n\tif block == nil {\n\t\treturn &BlockRes{}\n\t}\n\n\tres := new(BlockRes)\n\tres.BlockNumber = block.Number()\n\tres.BlockHash = block.Hash()\n\tres.ParentHash = block.ParentHash()\n\tres.Nonce = block.Header().Nonce\n\tres.Sha3Uncles = block.Header().UncleHash\n\tres.LogsBloom = block.Bloom()\n\tres.TransactionRoot = block.Header().TxHash\n\tres.StateRoot = block.Root()\n\tres.Miner = block.Header().Coinbase\n\tres.Difficulty = block.Difficulty()\n\tres.TotalDifficulty = block.Td\n\tres.Size = big.NewInt(int64(block.Size()))\n\t\/\/ res.ExtraData =\n\tres.GasLimit = block.GasLimit()\n\t\/\/ res.MinGasPrice =\n\tres.GasUsed = block.GasUsed()\n\tres.UnixTimestamp = block.Time()\n\tres.Transactions = make([]*TransactionRes, len(block.Transactions()))\n\tfor i, tx := range block.Transactions() {\n\t\tv := NewTransactionRes(tx)\n\t\tv.BlockHash = block.Hash()\n\t\tv.BlockNumber = block.Number().Int64()\n\t\tv.TxIndex = int64(i)\n\t\tres.Transactions[i] = v\n\t}\n\tres.Uncles = make([]common.Hash, len(block.Uncles()))\n\tfor i, uncle := range block.Uncles() {\n\t\tres.Uncles[i] = uncle.Hash()\n\t}\n\treturn res\n}\n\ntype TransactionRes struct {\n\tHash common.Hash `json:\"hash\"`\n\tNonce uint64 `json:\"nonce\"`\n\tBlockHash common.Hash `json:\"blockHash,omitempty\"`\n\tBlockNumber int64 `json:\"blockNumber,omitempty\"`\n\tTxIndex int64 `json:\"transactionIndex,omitempty\"`\n\tFrom common.Address `json:\"from\"`\n\tTo *common.Address `json:\"to\"`\n\tValue *big.Int `json:\"value\"`\n\tGas *big.Int `json:\"gas\"`\n\tGasPrice *big.Int `json:\"gasPrice\"`\n\tInput []byte `json:\"input\"`\n}\n\nfunc (t *TransactionRes) MarshalJSON() ([]byte, error) {\n\tvar ext struct {\n\t\tHash string `json:\"hash\"`\n\t\tNonce string `json:\"nonce\"`\n\t\tBlockHash string `json:\"blockHash,omitempty\"`\n\t\tBlockNumber string `json:\"blockNumber,omitempty\"`\n\t\tTxIndex string `json:\"transactionIndex,omitempty\"`\n\t\tFrom string `json:\"from\"`\n\t\tTo interface{} `json:\"to\"`\n\t\tValue string `json:\"value\"`\n\t\tGas string `json:\"gas\"`\n\t\tGasPrice string `json:\"gasPrice\"`\n\t\tInput string `json:\"input\"`\n\t}\n\n\text.Hash = t.Hash.Hex()\n\text.Nonce = common.ToHex(big.NewInt(int64(t.Nonce)).Bytes())\n\text.BlockHash = t.BlockHash.Hex()\n\text.BlockNumber = common.ToHex(big.NewInt(t.BlockNumber).Bytes())\n\text.TxIndex = common.ToHex(big.NewInt(t.TxIndex).Bytes())\n\text.From = t.From.Hex()\n\tif t.To == nil {\n\t\text.To = nil\n\t} else {\n\t\text.To = t.To.Hex()\n\t}\n\text.Value = common.ToHex(t.Value.Bytes())\n\text.Gas = common.ToHex(t.Gas.Bytes())\n\text.GasPrice = common.ToHex(t.GasPrice.Bytes())\n\text.Input = common.ToHex(t.Input)\n\n\treturn json.Marshal(ext)\n}\n\nfunc NewTransactionRes(tx *types.Transaction) *TransactionRes {\n\tvar v = new(TransactionRes)\n\tv.Hash = tx.Hash()\n\tv.Nonce = tx.Nonce()\n\tv.From, _ = tx.From()\n\tv.To = tx.To()\n\tv.Value = tx.Value()\n\tv.Gas = tx.Gas()\n\tv.GasPrice = tx.GasPrice()\n\tv.Input = tx.Data()\n\treturn v\n}\n\ntype FilterLogRes struct {\n\tHash string `json:\"hash\"`\n\tAddress string `json:\"address\"`\n\tData string `json:\"data\"`\n\tBlockNumber string `json:\"blockNumber\"`\n\tTransactionHash string `json:\"transactionHash\"`\n\tBlockHash string `json:\"blockHash\"`\n\tTransactionIndex string `json:\"transactionIndex\"`\n\tLogIndex string `json:\"logIndex\"`\n}\n\ntype FilterWhisperRes struct {\n\tHash string `json:\"hash\"`\n\tFrom string `json:\"from\"`\n\tTo string `json:\"to\"`\n\tExpiry string `json:\"expiry\"`\n\tSent string `json:\"sent\"`\n\tTtl string `json:\"ttl\"`\n\tTopics string `json:\"topics\"`\n\tPayload string `json:\"payload\"`\n\tWorkProved string `json:\"workProved\"`\n}\n\ntype LogRes struct {\n\tAddress common.Address `json:\"address\"`\n\tTopics []common.Hash `json:\"topics\"`\n\tData []byte `json:\"data\"`\n\tNumber uint64 `json:\"number\"`\n}\n\nfunc NewLogRes(log state.Log) LogRes {\n\tvar l LogRes\n\tl.Topics = make([]common.Hash, len(log.Topics()))\n\tl.Address = log.Address()\n\tl.Data = log.Data()\n\tl.Number = log.Number()\n\tfor j, topic := range log.Topics() {\n\t\tl.Topics[j] = topic\n\t}\n\treturn l\n}\n\nfunc (l *LogRes) MarshalJSON() ([]byte, error) {\n\tvar ext struct {\n\t\tAddress string `json:\"address\"`\n\t\tTopics []string `json:\"topics\"`\n\t\tData string `json:\"data\"`\n\t\tNumber string `json:\"number\"`\n\t}\n\n\text.Address = l.Address.Hex()\n\text.Data = common.ToHex(l.Data)\n\text.Number = common.ToHex(big.NewInt(int64(l.Number)).Bytes())\n\text.Topics = make([]string, len(l.Topics))\n\tfor i, v := range l.Topics {\n\t\text.Topics[i] = v.Hex()\n\t}\n\n\treturn json.Marshal(ext)\n}\n\nfunc NewLogsRes(logs state.Logs) (ls []LogRes) {\n\tls = make([]LogRes, len(logs))\n\n\tfor i, log := range logs {\n\t\tls[i] = NewLogRes(log)\n\t}\n\n\treturn\n}\nAdd ExtraData field to RPC outputpackage rpc\n\nimport (\n\t\"encoding\/json\"\n\t\/\/ \"fmt\"\n\t\"math\/big\"\n\n\t\"github.com\/ethereum\/go-ethereum\/common\"\n\t\"github.com\/ethereum\/go-ethereum\/core\/state\"\n\t\"github.com\/ethereum\/go-ethereum\/core\/types\"\n)\n\ntype BlockRes struct {\n\tfullTx bool\n\n\tBlockNumber *big.Int `json:\"number\"`\n\tBlockHash common.Hash `json:\"hash\"`\n\tParentHash common.Hash `json:\"parentHash\"`\n\tNonce [8]byte `json:\"nonce\"`\n\tSha3Uncles common.Hash `json:\"sha3Uncles\"`\n\tLogsBloom types.Bloom `json:\"logsBloom\"`\n\tTransactionRoot common.Hash `json:\"transactionRoot\"`\n\tStateRoot common.Hash `json:\"stateRoot\"`\n\tMiner common.Address `json:\"miner\"`\n\tDifficulty *big.Int `json:\"difficulty\"`\n\tTotalDifficulty *big.Int `json:\"totalDifficulty\"`\n\tSize *big.Int `json:\"size\"`\n\tExtraData []byte `json:\"extraData\"`\n\tGasLimit *big.Int `json:\"gasLimit\"`\n\tMinGasPrice int64 `json:\"minGasPrice\"`\n\tGasUsed *big.Int `json:\"gasUsed\"`\n\tUnixTimestamp int64 `json:\"timestamp\"`\n\tTransactions []*TransactionRes `json:\"transactions\"`\n\tUncles []common.Hash `json:\"uncles\"`\n}\n\nfunc (b *BlockRes) MarshalJSON() ([]byte, error) {\n\tvar ext struct {\n\t\tBlockNumber string `json:\"number\"`\n\t\tBlockHash string `json:\"hash\"`\n\t\tParentHash string `json:\"parentHash\"`\n\t\tNonce string `json:\"nonce\"`\n\t\tSha3Uncles string `json:\"sha3Uncles\"`\n\t\tLogsBloom string `json:\"logsBloom\"`\n\t\tTransactionRoot string `json:\"transactionRoot\"`\n\t\tStateRoot string `json:\"stateRoot\"`\n\t\tMiner string `json:\"miner\"`\n\t\tDifficulty string `json:\"difficulty\"`\n\t\tTotalDifficulty string `json:\"totalDifficulty\"`\n\t\tSize string `json:\"size\"`\n\t\tExtraData string `json:\"extraData\"`\n\t\tGasLimit string `json:\"gasLimit\"`\n\t\tMinGasPrice string `json:\"minGasPrice\"`\n\t\tGasUsed string `json:\"gasUsed\"`\n\t\tUnixTimestamp string `json:\"timestamp\"`\n\t\tTransactions []interface{} `json:\"transactions\"`\n\t\tUncles []string `json:\"uncles\"`\n\t}\n\n\t\/\/ convert strict types to hexified strings\n\text.BlockNumber = common.ToHex(b.BlockNumber.Bytes())\n\text.BlockHash = b.BlockHash.Hex()\n\text.ParentHash = b.ParentHash.Hex()\n\text.Nonce = common.ToHex(b.Nonce[:])\n\text.Sha3Uncles = b.Sha3Uncles.Hex()\n\text.LogsBloom = common.ToHex(b.LogsBloom[:])\n\text.TransactionRoot = b.TransactionRoot.Hex()\n\text.StateRoot = b.StateRoot.Hex()\n\text.Miner = b.Miner.Hex()\n\text.Difficulty = common.ToHex(b.Difficulty.Bytes())\n\text.TotalDifficulty = common.ToHex(b.TotalDifficulty.Bytes())\n\text.Size = common.ToHex(b.Size.Bytes())\n\text.ExtraData = common.ToHex(b.ExtraData)\n\text.GasLimit = common.ToHex(b.GasLimit.Bytes())\n\t\/\/ ext.MinGasPrice = common.ToHex(big.NewInt(b.MinGasPrice).Bytes())\n\text.GasUsed = common.ToHex(b.GasUsed.Bytes())\n\text.UnixTimestamp = common.ToHex(big.NewInt(b.UnixTimestamp).Bytes())\n\text.Transactions = make([]interface{}, len(b.Transactions))\n\tif b.fullTx {\n\t\tfor i, tx := range b.Transactions {\n\t\t\text.Transactions[i] = tx\n\t\t}\n\t} else {\n\t\tfor i, tx := range b.Transactions {\n\t\t\text.Transactions[i] = tx.Hash.Hex()\n\t\t}\n\t}\n\text.Uncles = make([]string, len(b.Uncles))\n\tfor i, v := range b.Uncles {\n\t\text.Uncles[i] = v.Hex()\n\t}\n\n\treturn json.Marshal(ext)\n}\n\nfunc NewBlockRes(block *types.Block) *BlockRes {\n\tif block == nil {\n\t\treturn &BlockRes{}\n\t}\n\n\tres := new(BlockRes)\n\tres.BlockNumber = block.Number()\n\tres.BlockHash = block.Hash()\n\tres.ParentHash = block.ParentHash()\n\tres.Nonce = block.Header().Nonce\n\tres.Sha3Uncles = block.Header().UncleHash\n\tres.LogsBloom = block.Bloom()\n\tres.TransactionRoot = block.Header().TxHash\n\tres.StateRoot = block.Root()\n\tres.Miner = block.Header().Coinbase\n\tres.Difficulty = block.Difficulty()\n\tres.TotalDifficulty = block.Td\n\tres.Size = big.NewInt(int64(block.Size()))\n\tres.ExtraData = []byte(block.Header().Extra)\n\tres.GasLimit = block.GasLimit()\n\t\/\/ res.MinGasPrice =\n\tres.GasUsed = block.GasUsed()\n\tres.UnixTimestamp = block.Time()\n\tres.Transactions = make([]*TransactionRes, len(block.Transactions()))\n\tfor i, tx := range block.Transactions() {\n\t\tv := NewTransactionRes(tx)\n\t\tv.BlockHash = block.Hash()\n\t\tv.BlockNumber = block.Number().Int64()\n\t\tv.TxIndex = int64(i)\n\t\tres.Transactions[i] = v\n\t}\n\tres.Uncles = make([]common.Hash, len(block.Uncles()))\n\tfor i, uncle := range block.Uncles() {\n\t\tres.Uncles[i] = uncle.Hash()\n\t}\n\treturn res\n}\n\ntype TransactionRes struct {\n\tHash common.Hash `json:\"hash\"`\n\tNonce uint64 `json:\"nonce\"`\n\tBlockHash common.Hash `json:\"blockHash,omitempty\"`\n\tBlockNumber int64 `json:\"blockNumber,omitempty\"`\n\tTxIndex int64 `json:\"transactionIndex,omitempty\"`\n\tFrom common.Address `json:\"from\"`\n\tTo *common.Address `json:\"to\"`\n\tValue *big.Int `json:\"value\"`\n\tGas *big.Int `json:\"gas\"`\n\tGasPrice *big.Int `json:\"gasPrice\"`\n\tInput []byte `json:\"input\"`\n}\n\nfunc (t *TransactionRes) MarshalJSON() ([]byte, error) {\n\tvar ext struct {\n\t\tHash string `json:\"hash\"`\n\t\tNonce string `json:\"nonce\"`\n\t\tBlockHash string `json:\"blockHash,omitempty\"`\n\t\tBlockNumber string `json:\"blockNumber,omitempty\"`\n\t\tTxIndex string `json:\"transactionIndex,omitempty\"`\n\t\tFrom string `json:\"from\"`\n\t\tTo interface{} `json:\"to\"`\n\t\tValue string `json:\"value\"`\n\t\tGas string `json:\"gas\"`\n\t\tGasPrice string `json:\"gasPrice\"`\n\t\tInput string `json:\"input\"`\n\t}\n\n\text.Hash = t.Hash.Hex()\n\text.Nonce = common.ToHex(big.NewInt(int64(t.Nonce)).Bytes())\n\text.BlockHash = t.BlockHash.Hex()\n\text.BlockNumber = common.ToHex(big.NewInt(t.BlockNumber).Bytes())\n\text.TxIndex = common.ToHex(big.NewInt(t.TxIndex).Bytes())\n\text.From = t.From.Hex()\n\tif t.To == nil {\n\t\text.To = nil\n\t} else {\n\t\text.To = t.To.Hex()\n\t}\n\text.Value = common.ToHex(t.Value.Bytes())\n\text.Gas = common.ToHex(t.Gas.Bytes())\n\text.GasPrice = common.ToHex(t.GasPrice.Bytes())\n\text.Input = common.ToHex(t.Input)\n\n\treturn json.Marshal(ext)\n}\n\nfunc NewTransactionRes(tx *types.Transaction) *TransactionRes {\n\tvar v = new(TransactionRes)\n\tv.Hash = tx.Hash()\n\tv.Nonce = tx.Nonce()\n\tv.From, _ = tx.From()\n\tv.To = tx.To()\n\tv.Value = tx.Value()\n\tv.Gas = tx.Gas()\n\tv.GasPrice = tx.GasPrice()\n\tv.Input = tx.Data()\n\treturn v\n}\n\ntype FilterLogRes struct {\n\tHash string `json:\"hash\"`\n\tAddress string `json:\"address\"`\n\tData string `json:\"data\"`\n\tBlockNumber string `json:\"blockNumber\"`\n\tTransactionHash string `json:\"transactionHash\"`\n\tBlockHash string `json:\"blockHash\"`\n\tTransactionIndex string `json:\"transactionIndex\"`\n\tLogIndex string `json:\"logIndex\"`\n}\n\ntype FilterWhisperRes struct {\n\tHash string `json:\"hash\"`\n\tFrom string `json:\"from\"`\n\tTo string `json:\"to\"`\n\tExpiry string `json:\"expiry\"`\n\tSent string `json:\"sent\"`\n\tTtl string `json:\"ttl\"`\n\tTopics string `json:\"topics\"`\n\tPayload string `json:\"payload\"`\n\tWorkProved string `json:\"workProved\"`\n}\n\ntype LogRes struct {\n\tAddress common.Address `json:\"address\"`\n\tTopics []common.Hash `json:\"topics\"`\n\tData []byte `json:\"data\"`\n\tNumber uint64 `json:\"number\"`\n}\n\nfunc NewLogRes(log state.Log) LogRes {\n\tvar l LogRes\n\tl.Topics = make([]common.Hash, len(log.Topics()))\n\tl.Address = log.Address()\n\tl.Data = log.Data()\n\tl.Number = log.Number()\n\tfor j, topic := range log.Topics() {\n\t\tl.Topics[j] = topic\n\t}\n\treturn l\n}\n\nfunc (l *LogRes) MarshalJSON() ([]byte, error) {\n\tvar ext struct {\n\t\tAddress string `json:\"address\"`\n\t\tTopics []string `json:\"topics\"`\n\t\tData string `json:\"data\"`\n\t\tNumber string `json:\"number\"`\n\t}\n\n\text.Address = l.Address.Hex()\n\text.Data = common.ToHex(l.Data)\n\text.Number = common.ToHex(big.NewInt(int64(l.Number)).Bytes())\n\text.Topics = make([]string, len(l.Topics))\n\tfor i, v := range l.Topics {\n\t\text.Topics[i] = v.Hex()\n\t}\n\n\treturn json.Marshal(ext)\n}\n\nfunc NewLogsRes(logs state.Logs) (ls []LogRes) {\n\tls = make([]LogRes, len(logs))\n\n\tfor i, log := range logs {\n\t\tls[i] = NewLogRes(log)\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"package sitemap\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"time\"\n)\n\n\/\/ Index is a structure of \ntype Index struct {\n\tXMLName xml.Name `xml:\"sitemapindex\"`\n\tSitemap []parts `xml:\"sitemap\"`\n}\n\n\/\/ parts is a structure of in \ntype parts struct {\n\tLoc string `xml:\"loc\"`\n\tLastMod string `xml:\"lastmod\"`\n}\n\n\/\/ Sitemap is a structure of \ntype Sitemap struct {\n\tXMLName xml.Name `xml:\"urlset\"`\n\tURL []URL `xml:\"url\"`\n}\n\n\/\/ URL is a structure of in \ntype URL struct {\n\tLoc string `xml:\"loc\"`\n\tLastMod string `xml:\"lastmod\"`\n\tChangeFreq string `xml:\"changefreq\"`\n\tPriority float32 `xml:\"priority\"`\n}\n\n\/\/ fetch is page acquisition function\nvar fetch = func(URL string) ([]byte, error) {\n\tvar body []byte\n\n\tres, err := http.Get(URL)\n\tif err != nil {\n\t\treturn body, err\n\t}\n\tdefer res.Body.Close()\n\n\tbody, err = ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn body, err\n\t}\n\n\treturn body, err\n}\n\n\/\/ Time interval to be used in Index.get\nvar interval = time.Second\n\n\/\/ Get sitemap data from URL\nfunc Get(url string) (Sitemap, error) {\n\tdata, err := fetch(url)\n\tif err != nil {\n\t\treturn Sitemap{}, err\n\t}\n\n\tindex, indexErr := ParseIndex(data)\n\tsitemap, sitemapErr := Parse(data)\n\n\tif indexErr != nil && sitemapErr != nil {\n\t\terr = errors.New(\"URL is not a sitemap or sitemapindex\")\n\t\treturn Sitemap{}, err\n\t}\n\n\tif indexErr == nil {\n\t\tsitemap, err = index.get(data)\n\t\tif err != nil {\n\t\t\treturn Sitemap{}, err\n\t\t}\n\t}\n\n\treturn sitemap, err\n}\n\n\/\/ Get Sitemap data from sitemapindex file\nfunc (s *Index) get(data []byte) (Sitemap, error) {\n\tindex, err := ParseIndex(data)\n\tif err != nil {\n\t\treturn Sitemap{}, err\n\t}\n\n\tvar sitemap Sitemap\n\tfor _, s := range index.Sitemap {\n\t\ttime.Sleep(interval)\n\t\tdata, err := fetch(s.Loc)\n\t\tif err != nil {\n\t\t\treturn sitemap, err\n\t\t}\n\n\t\terr = xml.Unmarshal(data, &sitemap)\n\t\tif err != nil {\n\t\t\treturn sitemap, err\n\t\t}\n\t}\n\n\treturn sitemap, err\n}\n\n\/\/ Parse create Sitemap data from text\nfunc Parse(data []byte) (Sitemap, error) {\n\tvar sitemap Sitemap\n\terr := xml.Unmarshal(data, &sitemap)\n\n\treturn sitemap, err\n}\n\n\/\/ ParseIndex create Index data from text\nfunc ParseIndex(data []byte) (Index, error) {\n\tvar index Index\n\terr := xml.Unmarshal(data, &index)\n\n\treturn index, err\n}\n\n\/\/ SetInterval change Time interval to be used in Index.get\nfunc SetInterval(time time.Duration) {\n\tinterval = time\n}\n\n\/\/ SetFetch change fetch closure\nfunc SetFetch(f func(url string) ([]byte, error)) {\n\tfetch = f\n}\nchange returning page datapackage sitemap\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"time\"\n)\n\n\/\/ Index is a structure of \ntype Index struct {\n\tXMLName xml.Name `xml:\"sitemapindex\"`\n\tSitemap []parts `xml:\"sitemap\"`\n}\n\n\/\/ parts is a structure of in \ntype parts struct {\n\tLoc string `xml:\"loc\"`\n\tLastMod string `xml:\"lastmod\"`\n}\n\n\/\/ Sitemap is a structure of \ntype Sitemap struct {\n\tXMLName xml.Name `xml:\"urlset\"`\n\tURL []URL `xml:\"url\"`\n}\n\n\/\/ URL is a structure of in \ntype URL struct {\n\tLoc string `xml:\"loc\"`\n\tLastMod string `xml:\"lastmod\"`\n\tChangeFreq string `xml:\"changefreq\"`\n\tPriority float32 `xml:\"priority\"`\n}\n\n\/\/ fetch is page acquisition function\nvar fetch = func(URL string) ([]byte, error) {\n\tvar body []byte\n\n\tres, err := http.Get(URL)\n\tif err != nil {\n\t\treturn body, err\n\t}\n\tdefer res.Body.Close()\n\n\treturn ioutil.ReadAll(res.Body)\n}\n\n\/\/ Time interval to be used in Index.get\nvar interval = time.Second\n\n\/\/ Get sitemap data from URL\nfunc Get(url string) (Sitemap, error) {\n\tdata, err := fetch(url)\n\tif err != nil {\n\t\treturn Sitemap{}, err\n\t}\n\n\tindex, indexErr := ParseIndex(data)\n\tsitemap, sitemapErr := Parse(data)\n\n\tif indexErr != nil && sitemapErr != nil {\n\t\terr = errors.New(\"URL is not a sitemap or sitemapindex\")\n\t\treturn Sitemap{}, err\n\t}\n\n\tif indexErr == nil {\n\t\tsitemap, err = index.get(data)\n\t\tif err != nil {\n\t\t\treturn Sitemap{}, err\n\t\t}\n\t}\n\n\treturn sitemap, err\n}\n\n\/\/ Get Sitemap data from sitemapindex file\nfunc (s *Index) get(data []byte) (Sitemap, error) {\n\tindex, err := ParseIndex(data)\n\tif err != nil {\n\t\treturn Sitemap{}, err\n\t}\n\n\tvar sitemap Sitemap\n\tfor _, s := range index.Sitemap {\n\t\ttime.Sleep(interval)\n\t\tdata, err := fetch(s.Loc)\n\t\tif err != nil {\n\t\t\treturn sitemap, err\n\t\t}\n\n\t\terr = xml.Unmarshal(data, &sitemap)\n\t\tif err != nil {\n\t\t\treturn sitemap, err\n\t\t}\n\t}\n\n\treturn sitemap, err\n}\n\n\/\/ Parse create Sitemap data from text\nfunc Parse(data []byte) (Sitemap, error) {\n\tvar sitemap Sitemap\n\terr := xml.Unmarshal(data, &sitemap)\n\n\treturn sitemap, err\n}\n\n\/\/ ParseIndex create Index data from text\nfunc ParseIndex(data []byte) (Index, error) {\n\tvar index Index\n\terr := xml.Unmarshal(data, &index)\n\n\treturn index, err\n}\n\n\/\/ SetInterval change Time interval to be used in Index.get\nfunc SetInterval(time time.Duration) {\n\tinterval = time\n}\n\n\/\/ SetFetch change fetch closure\nfunc SetFetch(f func(url string) ([]byte, error)) {\n\tfetch = f\n}\n<|endoftext|>"} {"text":"\/*\n * Copyright 2014 Xuyuan Pang \n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage glogger\n\nimport \"sync\"\n\ntype register struct {\n\tmapper map[string]interface{}\n\tmu sync.RWMutex\n}\n\nfunc NewRegister() *register {\n\treturn ®ister{\n\t\tmapper: make(map[string]interface{}),\n\t}\n}\n\nfunc (r *register) Register(name string, v interface{}) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tif _, dup := r.mapper[name]; dup {\n\t\tpanic(\"register name: \" + name + \" twice\")\n\t}\n\tr.mapper[name] = v\n}\n\nfunc (r *register) Unregister(name string) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tdelete(r.mapper, name)\n}\n\nfunc (r *register) Get(name string) interface{} {\n\tr.mu.RLock()\n\tdefer r.mu.RUnlock()\n\treturn r.mapper[name]\n}\nadd comment for register\/*\n * Copyright 2014 Xuyuan Pang \n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage glogger\n\nimport \"sync\"\n\n\/\/ register is a thread-safe map\ntype register struct {\n\tmapper map[string]interface{}\n\tmu sync.RWMutex\n}\n\n\/\/ NewRegister returns a new register.\nfunc NewRegister() *register {\n\treturn ®ister{\n\t\tmapper: make(map[string]interface{}),\n\t}\n}\n\n\/\/ Register binds the interface and the name. If this name has been registerd, it panics.\nfunc (r *register) Register(name string, v interface{}) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tif _, dup := r.mapper[name]; dup {\n\t\tpanic(\"register name: \" + name + \" twice\")\n\t}\n\tr.mapper[name] = v\n}\n\n\/\/ Unregister unbinds the interface and the name. It returns the interface or nil\nfunc (r *register) Unregister(name string) interface{} {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tif v, ok := r.mapper[nam]; ok {\n\t\tdelete(r.mapper, name)\n\t\treturn v\n\t}\n\treturn nil\n}\n\n\/\/ Get return an interface registerd with this name.\nfunc (r *register) Get(name string) interface{} {\n\tr.mu.RLock()\n\tdefer r.mu.RUnlock()\n\treturn r.mapper[name]\n}\n<|endoftext|>"} {"text":"package overcurrent\n\nimport (\n\t\"errors\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/efritz\/glock\"\n)\n\ntype (\n\tRegistry interface {\n\t\t\/\/ Configure will register a new breaker instance under the given name using\n\t\t\/\/ the given configuration. A breaker config may not be changed after being\n\t\t\/\/ initialized. It is an error to register the same breaker twice, or try to\n\t\t\/\/ invoke Call or CallAsync with an unregistered breaker.\n\t\tConfigure(name string, configs ...BreakerConfig) error\n\n\t\t\/\/ Call will invoke `Call` on the breaker configured with the given name. If\n\t\t\/\/ the breaker returns a non-nil error, the fallback function is invoked with\n\t\t\/\/ the error as the value. It may be the case that the fallback function is\n\t\t\/\/ invoked without the breaker function failing (e.g. circuit open).\n\t\tCall(name string, f BreakerFunc, fallback FallbackFunc) error\n\n\t\t\/\/ CallAsync will create a channel that receives the error value from an similar\n\t\t\/\/ invocation of Call. See the Breaker docs for more details.\n\t\tCallAsync(name string, f BreakerFunc, fallback FallbackFunc) <-chan error\n\t}\n\n\tregistry struct {\n\t\tbreakers map[string]*wrappedBreaker\n\t\tmutex *sync.RWMutex\n\t\tclock glock.Clock\n\t}\n\n\twrappedBreaker struct {\n\t\tbreaker *circuitBreaker\n\t\tsemaphore *semaphore\n\t}\n\n\tFallbackFunc func(error) error\n)\n\nvar (\n\tErrAlreadyConfigured = errors.New(\"breaker is already configured\")\n\tErrBreakerUnconfigured = errors.New(\"breaker not configured\")\n\tErrMaxConcurrency = errors.New(\"breaker is at max concurrency\")\n)\n\nfunc NewRegistry() Registry {\n\treturn newRegistryWithClock(glock.NewMockClock())\n}\n\nfunc newRegistryWithClock(clock glock.Clock) Registry {\n\treturn ®istry{\n\t\tbreakers: map[string]*wrappedBreaker{},\n\t\tmutex: &sync.RWMutex{},\n\t\tclock: clock,\n\t}\n}\n\nfunc (r *registry) Configure(name string, configs ...BreakerConfig) error {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\n\tif _, ok := r.breakers[name]; ok {\n\t\treturn ErrAlreadyConfigured\n\t}\n\n\tbreaker := newCircuitBreaker(configs...)\n\n\tr.breakers[name] = &wrappedBreaker{\n\t\tbreaker: breaker,\n\t\tsemaphore: newSemaphore(r.clock, breaker.maxConcurrency),\n\t}\n\n\treturn nil\n}\n\nfunc (r *registry) Call(name string, f BreakerFunc, fallback FallbackFunc) error {\n\twrapped, collector, err := r.getWrappedBreaker(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstart := time.Now()\n\terr = r.call(wrapped, collector, f, fallback)\n\telapsed := time.Now().Sub(start)\n\n\tcollector.ReportDuration(EventTypeTotalDuration, elapsed)\n\treturn err\n}\n\nfunc (r *registry) CallAsync(name string, f BreakerFunc, fallback FallbackFunc) <-chan error {\n\treturn toErrChan(func() error { return r.Call(name, f, fallback) })\n}\n\nfunc (r *registry) getWrappedBreaker(name string) (*wrappedBreaker, MetricCollector, error) {\n\tr.mutex.RLock()\n\tdefer r.mutex.RUnlock()\n\n\twrapped, ok := r.breakers[name]\n\tif !ok {\n\t\treturn nil, nil, ErrBreakerUnconfigured\n\t}\n\n\treturn wrapped, wrapped.breaker.collector, nil\n}\n\nfunc (r *registry) call(wrapped *wrappedBreaker, collector MetricCollector, f BreakerFunc, fallback FallbackFunc) error {\n\terr := r.callWithSemaphore(wrapped.breaker, wrapped.semaphore, f)\n\tif err == nil {\n\t\tcollector.Report(EventTypeSuccess)\n\t\treturn nil\n\t}\n\n\tcollector.Report(EventTypeFailure)\n\n\tif err == ErrMaxConcurrency {\n\t\tcollector.Report(EventTypeRejection)\n\t}\n\n\tif fallback == nil {\n\t\treturn err\n\t}\n\n\tif err := fallback(err); err != nil {\n\t\tcollector.Report(EventTypeFallbackFailure)\n\t\treturn err\n\t}\n\n\tcollector.Report(EventTypeFallbackSuccess)\n\treturn nil\n}\n\nfunc (r *registry) callWithSemaphore(breaker *circuitBreaker, semaphore *semaphore, f BreakerFunc) error {\n\tif !semaphore.wait(breaker.maxConcurrencyTimeout) {\n\t\treturn ErrMaxConcurrency\n\t}\n\n\tdefer semaphore.signal()\n\treturn breaker.Call(f)\n}\nFix bad initial clock for registry.package overcurrent\n\nimport (\n\t\"errors\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/efritz\/glock\"\n)\n\ntype (\n\tRegistry interface {\n\t\t\/\/ Configure will register a new breaker instance under the given name using\n\t\t\/\/ the given configuration. A breaker config may not be changed after being\n\t\t\/\/ initialized. It is an error to register the same breaker twice, or try to\n\t\t\/\/ invoke Call or CallAsync with an unregistered breaker.\n\t\tConfigure(name string, configs ...BreakerConfig) error\n\n\t\t\/\/ Call will invoke `Call` on the breaker configured with the given name. If\n\t\t\/\/ the breaker returns a non-nil error, the fallback function is invoked with\n\t\t\/\/ the error as the value. It may be the case that the fallback function is\n\t\t\/\/ invoked without the breaker function failing (e.g. circuit open).\n\t\tCall(name string, f BreakerFunc, fallback FallbackFunc) error\n\n\t\t\/\/ CallAsync will create a channel that receives the error value from an similar\n\t\t\/\/ invocation of Call. See the Breaker docs for more details.\n\t\tCallAsync(name string, f BreakerFunc, fallback FallbackFunc) <-chan error\n\t}\n\n\tregistry struct {\n\t\tbreakers map[string]*wrappedBreaker\n\t\tmutex *sync.RWMutex\n\t\tclock glock.Clock\n\t}\n\n\twrappedBreaker struct {\n\t\tbreaker *circuitBreaker\n\t\tsemaphore *semaphore\n\t}\n\n\tFallbackFunc func(error) error\n)\n\nvar (\n\tErrAlreadyConfigured = errors.New(\"breaker is already configured\")\n\tErrBreakerUnconfigured = errors.New(\"breaker not configured\")\n\tErrMaxConcurrency = errors.New(\"breaker is at max concurrency\")\n)\n\nfunc NewRegistry() Registry {\n\treturn newRegistryWithClock(glock.NewRealClock())\n}\n\nfunc newRegistryWithClock(clock glock.Clock) Registry {\n\treturn ®istry{\n\t\tbreakers: map[string]*wrappedBreaker{},\n\t\tmutex: &sync.RWMutex{},\n\t\tclock: clock,\n\t}\n}\n\nfunc (r *registry) Configure(name string, configs ...BreakerConfig) error {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\n\tif _, ok := r.breakers[name]; ok {\n\t\treturn ErrAlreadyConfigured\n\t}\n\n\tbreaker := newCircuitBreaker(configs...)\n\n\tr.breakers[name] = &wrappedBreaker{\n\t\tbreaker: breaker,\n\t\tsemaphore: newSemaphore(r.clock, breaker.maxConcurrency),\n\t}\n\n\treturn nil\n}\n\nfunc (r *registry) Call(name string, f BreakerFunc, fallback FallbackFunc) error {\n\twrapped, collector, err := r.getWrappedBreaker(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstart := time.Now()\n\terr = r.call(wrapped, collector, f, fallback)\n\telapsed := time.Now().Sub(start)\n\n\tcollector.ReportDuration(EventTypeTotalDuration, elapsed)\n\treturn err\n}\n\nfunc (r *registry) CallAsync(name string, f BreakerFunc, fallback FallbackFunc) <-chan error {\n\treturn toErrChan(func() error { return r.Call(name, f, fallback) })\n}\n\nfunc (r *registry) getWrappedBreaker(name string) (*wrappedBreaker, MetricCollector, error) {\n\tr.mutex.RLock()\n\tdefer r.mutex.RUnlock()\n\n\twrapped, ok := r.breakers[name]\n\tif !ok {\n\t\treturn nil, nil, ErrBreakerUnconfigured\n\t}\n\n\treturn wrapped, wrapped.breaker.collector, nil\n}\n\nfunc (r *registry) call(wrapped *wrappedBreaker, collector MetricCollector, f BreakerFunc, fallback FallbackFunc) error {\n\terr := r.callWithSemaphore(wrapped.breaker, wrapped.semaphore, f)\n\tif err == nil {\n\t\tcollector.Report(EventTypeSuccess)\n\t\treturn nil\n\t}\n\n\tcollector.Report(EventTypeFailure)\n\n\tif err == ErrMaxConcurrency {\n\t\tcollector.Report(EventTypeRejection)\n\t}\n\n\tif fallback == nil {\n\t\treturn err\n\t}\n\n\tif err := fallback(err); err != nil {\n\t\tcollector.Report(EventTypeFallbackFailure)\n\t\treturn err\n\t}\n\n\tcollector.Report(EventTypeFallbackSuccess)\n\treturn nil\n}\n\nfunc (r *registry) callWithSemaphore(breaker *circuitBreaker, semaphore *semaphore, f BreakerFunc) error {\n\tif !semaphore.wait(breaker.maxConcurrencyTimeout) {\n\t\treturn ErrMaxConcurrency\n\t}\n\n\tdefer semaphore.signal()\n\treturn breaker.Call(f)\n}\n<|endoftext|>"} {"text":"package invoices\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n)\n\nvar (\n\ttrim = strings.TrimSpace\n\tsf = fmt.Sprintf\n)\n\n\/\/ math ---------------------------------------------------------\n\n\/\/ imax returns the maximum value\nfunc imax(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}\n\n\/\/ imax returns the minimum value\nfunc imin(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\n\/\/ isum returns the summation\nfunc isum(vals ...int) int {\n\tsum := 0\n\tfor _, v := range vals {\n\t\tsum += v\n\t}\n\treturn sum\n}\n\n\/\/ string ---------------------------------------------------------\n\n\/\/ rpad adds padding to the right of a string.\nfunc rpad(s string, padding int) string {\n\ttemplate := fmt.Sprintf(\"%%-%ds\", padding)\n\treturn fmt.Sprintf(template, s)\n}\n\n\/\/ file ---------------------------------------------------------\n\n\/\/ isFileExist checks whether a file exist\nfunc isFileExist(filename string) bool {\n\tpath := os.ExpandEnv(filename)\n\tisExist := true\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\tisExist = false\n\t}\n\treturn isExist\n}\n\n\/\/ print ---------------------------------------------------------\nAdd new functions...package invoices\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unsafe\"\n)\n\nvar (\n\ttrim = strings.TrimSpace\n\tsf = fmt.Sprintf\n)\n\n\/\/ math ---------------------------------------------------------\n\n\/\/ imax returns the maximum value\nfunc imax(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}\n\n\/\/ imax returns the minimum value\nfunc imin(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\n\/\/ isum returns the summation\nfunc isum(vals ...int) int {\n\tsum := 0\n\tfor _, v := range vals {\n\t\tsum += v\n\t}\n\treturn sum\n}\n\n\/\/ string ---------------------------------------------------------\n\n\/\/ rpad adds padding to the right of a string.\nfunc rpad(s string, padding int) string {\n\ttemplate := fmt.Sprintf(\"%%-%ds\", padding)\n\treturn fmt.Sprintf(template, s)\n}\n\n\/\/ BytesSizeToString convert bytes to a human-readable size\nfunc BytesSizeToString(byteCount int) string {\n\tsuf := []string{\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\"} \/\/Longs run out around EB\n\tif byteCount == 0 {\n\t\treturn \"0\" + suf[0]\n\t}\n\tbytes := math.Abs(float64(byteCount))\n\tplace := int32(math.Floor(math.Log2(bytes) \/ 10))\n\tnum := bytes \/ math.Pow(1024.0, float64(place))\n\tvar strnum string\n\tif place == 0 {\n\t\tstrnum = fmt.Sprintf(\"%.0f\", num) + suf[place]\n\t} else {\n\t\tstrnum = fmt.Sprintf(\"%.1f\", num) + suf[place]\n\t}\n\treturn strnum\n}\n\n\/\/ ConvertBytesToString convert []byte to string\nfunc ConvertBytesToString(bs []byte) string {\n\treturn *(*string)(unsafe.Pointer(&bs))\n}\n\n\/\/ GetColStr return string use in field\nfunc GetColStr(s string, size int, isleft bool) string {\n\t_, _, n := CountChars(s)\n\tspaces := strings.Repeat(\" \", size-n)\n\t\/\/ size := nc*2 + ne \/\/ s 實際佔位數\n\tvar tab string\n\tif isleft {\n\t\ttab = fmt.Sprintf(\"%[1]s%[2]s\", s, spaces)\n\t} else {\n\t\ttab = fmt.Sprintf(\"%[2]s%[1]s\", s, spaces)\n\t}\n\treturn \" \" + tab\n}\n\n\/\/ CountChars returns the number of each other of chinses and english characters\nfunc CountChars(str string) (nc, ne, n int) {\n\tfor _, r := range str {\n\t\tlchar := len(string(r))\n\t\t\/\/ n += lchar\n\t\tif lchar > 1 {\n\t\t\tnc++\n\t\t} else {\n\t\t\tne++\n\t\t}\n\t}\n\tn = 2*nc + ne\n\treturn nc, ne, n\n}\n\n\/\/ IsChineseChar judges whether the chinese character exists ?\nfunc IsChineseChar(str string) bool {\n\t\/\/ n := 0\n\tfor _, r := range str {\n\t\t\/\/ io.Pf(\"%q \", r)\n\t\tif unicode.Is(unicode.Scripts[\"Han\"], r) {\n\t\t\t\/\/ n++\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ ArgsTable prints a nice table with input arguments\n\/\/ Input:\n\/\/ title -- title of table; e.g. INPUT ARGUMENTS\n\/\/ data -- sets of THREE items in the following order:\n\/\/ description, key, value, ...\n\/\/ description, key, value, ...\n\/\/ ...\n\/\/ description, key, value, ...\nfunc ArgsTable(title string, data ...interface{}) string {\n\theads := []string{\"description\", \"key\", \"value\"}\n\treturn ArgsTableN(title, 0, heads, data...)\n}\n\n\/\/ ArgsTableN prints a nice table with input arguments\n\/\/ Input:\n\/\/ title -- title of table; e.g. INPUT ARGUMENTS\n\/\/\t heads -- heads of table; e.g. []string{ col1, col2, ... }\n\/\/\t nledsp -- length of leading spaces in every row\n\/\/ data -- sets of THREE items in the following order:\n\/\/ column1, column2, column3, ...\n\/\/ column1, column2, column3, ...\n\/\/ ...\n\/\/ column1, column2, column3, ...\nfunc ArgsTableN(title string, nledsp int, heads []string, data ...interface{}) string {\n\tSf := fmt.Sprintf\n\tnf := len(heads)\n\tndat := len(data)\n\tif ndat < nf {\n\t\treturn \"\"\n\t}\n\tif nledsp < 0 {\n\t\tnledsp = 0\n\t}\n\tlspaces := StrSpaces(nledsp)\n\tnlines := ndat \/ nf\n\tsizes := make([]int, nf)\n\tfor i := 0; i < nf; i++ {\n\t\t_, _, sizes[i] = CountChars(heads[i])\n\t}\n\tfor i := 0; i < nlines; i++ {\n\t\tif i*nf+(nf-1) >= ndat {\n\t\t\treturn Sf(\"ArgsTable: input arguments are not a multiple of %d\\n\", nf)\n\t\t}\n\t\tfor j := 0; j < nf; j++ {\n\t\t\tstr := Sf(\"%v\", data[i*nf+j])\n\t\t\t_, _, nmix := CountChars(str)\n\t\t\tsizes[j] = imax(sizes[j], nmix)\n\t\t}\n\t}\n\t\/\/ strfmt := Sf(\"%%v %%v %%v\\n\")\n\tn := isum(sizes...) + nf + (nf-1)*2 + 1 \/\/ sizes[0] + sizes[1] + sizes[2] + 3 + 4\n\t_, _, l := CountChars(title)\n\tm := (n - l) \/ 2\n\t\/\/\n\tvar b bytes.Buffer\n\tbw := b.WriteString\n\t\/\/\n\tbw(StrSpaces(m+nledsp) + title + \"\\n\")\n\tbw(lspaces + StrThickLine(n))\n\tisleft := true\n\tsfields := make([]string, nf)\n\tfor i := 0; i < nf; i++ {\n\t\tsfields[i] = GetColStr(heads[i], sizes[i], isleft)\n\t\tswitch i {\n\t\tcase 0:\n\t\t\tbw(Sf(\"%v\", lspaces+sfields[i]))\n\t\tdefault:\n\t\t\tbw(Sf(\" %v\", sfields[i]))\n\t\t}\n\t}\n\tbw(\"\\n\")\n\tbw(lspaces + StrThinLine(n))\n\tfor i := 0; i < nlines; i++ {\n\t\tfor j := 0; j < nf; j++ {\n\t\t\tsfields[j] = GetColStr(Sf(\"%v\", data[i*nf+j]), sizes[j], isleft)\n\t\t\tswitch j {\n\t\t\tcase 0:\n\t\t\t\tbw(Sf(\"%v\", lspaces+sfields[j]))\n\t\t\tdefault:\n\t\t\t\tbw(Sf(\" %v\", sfields[j]))\n\t\t\t}\n\t\t}\n\t\tbw(\"\\n\")\n\t}\n\tbw(lspaces + StrThickLine(n))\n\treturn b.String()\n}\n\n\/\/ StrThickLine returns a thick line (using '=')\nfunc StrThickLine(n int) (l string) {\n\tl = strings.Repeat(\"=\", n)\n\treturn l + \"\\n\"\n}\n\n\/\/ StrThinLine returns a thin line (using '-')\nfunc StrThinLine(n int) (l string) {\n\tl = strings.Repeat(\"-\", n)\n\treturn l + \"\\n\"\n}\n\n\/\/ StrSpaces returns a line with spaces\nfunc StrSpaces(n int) (l string) {\n\tl = strings.Repeat(\" \", n)\n\treturn\n}\n\n\/\/ file ---------------------------------------------------------\n\n\/\/ isFileExist checks whether a file exist\nfunc isFileExist(filename string) bool {\n\tpath := os.ExpandEnv(filename)\n\tisExist := true\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\tisExist = false\n\t}\n\treturn isExist\n}\n\n\/\/ print ---------------------------------------------------------\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Luke Shumaker\n\npackage web\n\nimport (\n\the \"httpentity\"\n\t\"net\/http\"\n\t\"periwinkle\/cfg\"\n\t\"periwinkle\/store\"\n\t\"time\"\n)\n\nfunc Main() error {\n\tmux := http.NewServeMux()\n\tmux.Handle(\"\/v1\/\", &he.Router{\n\t\tPrefix: \"\/v1\/\",\n\t\tRoot: store.DirRoot,\n\t\tMiddlewares: []he.Middleware{postHack{}, database{}, session{}},\n\t\tStacktrace: cfg.Debug,\n\t})\n\tmux.Handle(\"\/webui\/\", http.StripPrefix(\"\/webui\/\", http.FileServer(cfg.WebUiDir)))\n\tserver := &http.Server{\n\t\tAddr: cfg.WebAddr,\n\t\tHandler: mux,\n\t\tReadTimeout: 10 * time.Second,\n\t\tWriteTimeout: 10 * time.Second,\n\t\tMaxHeaderBytes: 1 << 20,\n\t}\n\tserver.ListenAndServe()\n\tpanic(\"not reached\")\n}\nfix error handling for the web listener\/\/ Copyright 2015 Luke Shumaker\n\npackage web\n\nimport (\n\the \"httpentity\"\n\t\"net\/http\"\n\t\"periwinkle\/cfg\"\n\t\"periwinkle\/store\"\n\t\"time\"\n)\n\nfunc Main() error {\n\tmux := http.NewServeMux()\n\tmux.Handle(\"\/v1\/\", &he.Router{\n\t\tPrefix: \"\/v1\/\",\n\t\tRoot: store.DirRoot,\n\t\tMiddlewares: []he.Middleware{postHack{}, database{}, session{}},\n\t\tStacktrace: cfg.Debug,\n\t})\n\tmux.Handle(\"\/webui\/\", http.StripPrefix(\"\/webui\/\", http.FileServer(cfg.WebUiDir)))\n\tserver := &http.Server{\n\t\tAddr: cfg.WebAddr,\n\t\tHandler: mux,\n\t\tReadTimeout: 10 * time.Second,\n\t\tWriteTimeout: 10 * time.Second,\n\t\tMaxHeaderBytes: 1 << 20,\n\t}\n\terr := server.ListenAndServe()\n\tpanic(fmt.Sprintf(\"Could not start HTTP server: %v\", err))\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/pcrawfor\/fayego\/fayeserver\"\n)\n\nfunc main() {\n\tfmt.Println(\"Starting faye server on port 3000\")\n\tfayeserver.Start(\":3002\")\n}\nUpdating portpackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/pcrawfor\/fayego\/fayeserver\"\n)\n\nfunc main() {\n\tfmt.Println(\"Starting faye server on port 3002\")\n\tfayeserver.Start(\":3002\")\n}\n<|endoftext|>"} {"text":"\/*\n Copyright 2020 Docker Compose CLI authors\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage compose\n\nimport (\n\t\"context\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/docker\/compose-cli\/api\/compose\"\n\n\t\"github.com\/docker\/compose-cli\/api\/progress\"\n\n\t\"github.com\/compose-spec\/compose-go\/cli\"\n\t\"github.com\/compose-spec\/compose-go\/types\"\n\tmoby \"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/api\/types\/filters\"\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\nfunc (s *composeService) Down(ctx context.Context, projectName string, options compose.DownOptions) error {\n\tw := progress.ContextWriter(ctx)\n\n\tif options.Project == nil {\n\t\tproject, err := s.projectFromContainerLabels(ctx, projectName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\toptions.Project = project\n\t}\n\n\tvar containers Containers\n\tcontainers, err := s.apiClient.ContainerList(ctx, moby.ContainerListOptions{\n\t\tFilters: filters.NewArgs(projectFilter(options.Project.Name)),\n\t\tAll: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = InReverseDependencyOrder(ctx, options.Project, func(c context.Context, service types.ServiceConfig) error {\n\t\tserviceContainers, others := containers.split(isService(service.Name))\n\t\terr := s.removeContainers(ctx, w, serviceContainers)\n\t\tcontainers = others\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif options.RemoveOrphans && len(containers) > 0 {\n\t\terr := s.removeContainers(ctx, w, containers)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tnetworks, err := s.apiClient.NetworkList(ctx, moby.NetworkListOptions{\n\t\tFilters: filters.NewArgs(\n\t\t\tprojectFilter(projectName),\n\t\t),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\teg, _ := errgroup.WithContext(ctx)\n\tfor _, n := range networks {\n\t\tnetworkID := n.ID\n\t\tnetworkName := n.Name\n\t\teg.Go(func() error {\n\t\t\treturn s.ensureNetworkDown(ctx, networkID, networkName)\n\t\t})\n\t}\n\treturn eg.Wait()\n}\n\nfunc (s *composeService) stopContainers(ctx context.Context, w progress.Writer, containers []moby.Container) error {\n\tfor _, container := range containers {\n\t\ttoStop := container\n\t\teventName := getContainerProgressName(toStop)\n\t\tw.Event(progress.StoppingEvent(eventName))\n\t\terr := s.apiClient.ContainerStop(ctx, toStop.ID, nil)\n\t\tif err != nil {\n\t\t\tw.Event(progress.ErrorMessageEvent(eventName, \"Error while Stopping\"))\n\t\t\treturn err\n\t\t}\n\t\tw.Event(progress.StoppedEvent(eventName))\n\t}\n\treturn nil\n}\n\nfunc (s *composeService) removeContainers(ctx context.Context, w progress.Writer, containers []moby.Container) error {\n\teg, _ := errgroup.WithContext(ctx)\n\tfor _, container := range containers {\n\t\ttoDelete := container\n\t\teg.Go(func() error {\n\t\t\teventName := getContainerProgressName(toDelete)\n\t\t\tw.Event(progress.StoppingEvent(eventName))\n\t\t\terr := s.stopContainers(ctx, w, []moby.Container{toDelete})\n\t\t\tif err != nil {\n\t\t\t\tw.Event(progress.ErrorMessageEvent(eventName, \"Error while Stopping\"))\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tw.Event(progress.RemovingEvent(eventName))\n\t\t\terr = s.apiClient.ContainerRemove(ctx, toDelete.ID, moby.ContainerRemoveOptions{Force: true})\n\t\t\tif err != nil {\n\t\t\t\tw.Event(progress.ErrorMessageEvent(eventName, \"Error while Removing\"))\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tw.Event(progress.RemovedEvent(eventName))\n\t\t\treturn nil\n\t\t})\n\t}\n\treturn eg.Wait()\n}\n\nfunc (s *composeService) projectFromContainerLabels(ctx context.Context, projectName string) (*types.Project, error) {\n\tcontainers, err := s.apiClient.ContainerList(ctx, moby.ContainerListOptions{\n\t\tFilters: filters.NewArgs(\n\t\t\tprojectFilter(projectName),\n\t\t),\n\t\tAll: true,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfakeProject := &types.Project{\n\t\tName: projectName,\n\t}\n\tif len(containers) == 0 {\n\t\treturn fakeProject, nil\n\t}\n\toptions, err := loadProjectOptionsFromLabels(containers[0])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif options.ConfigPaths[0] == \"-\" {\n\t\tfor _, container := range containers {\n\t\t\tfakeProject.Services = append(fakeProject.Services, types.ServiceConfig{\n\t\t\t\tName: container.Labels[serviceLabel],\n\t\t\t})\n\t\t}\n\t\treturn fakeProject, nil\n\t}\n\tproject, err := cli.ProjectFromOptions(options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn project, nil\n}\n\nfunc loadProjectOptionsFromLabels(c moby.Container) (*cli.ProjectOptions, error) {\n\tvar configFiles []string\n\trelativePathConfigFiles := strings.Split(c.Labels[configFilesLabel], \",\")\n\tfor _, c := range relativePathConfigFiles {\n\t\tconfigFiles = append(configFiles, filepath.Base(c))\n\t}\n\treturn cli.NewProjectOptions(configFiles,\n\t\tcli.WithOsEnv,\n\t\tcli.WithWorkingDirectory(c.Labels[workingDirLabel]),\n\t\tcli.WithName(c.Labels[projectLabel]))\n}\nDisplay warning in `docker compose down` if nothing to remove (no container, no network) For reference, `docker-compose` displays `WARNING: Network sentences_default not found`\/*\n Copyright 2020 Docker Compose CLI authors\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage compose\n\nimport (\n\t\"context\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/docker\/compose-cli\/api\/compose\"\n\n\t\"github.com\/docker\/compose-cli\/api\/progress\"\n\n\t\"github.com\/compose-spec\/compose-go\/cli\"\n\t\"github.com\/compose-spec\/compose-go\/types\"\n\tmoby \"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/api\/types\/filters\"\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\nfunc (s *composeService) Down(ctx context.Context, projectName string, options compose.DownOptions) error {\n\tw := progress.ContextWriter(ctx)\n\tresourceToRemove := false\n\n\tif options.Project == nil {\n\t\tproject, err := s.projectFromContainerLabels(ctx, projectName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\toptions.Project = project\n\t}\n\n\tvar containers Containers\n\tcontainers, err := s.apiClient.ContainerList(ctx, moby.ContainerListOptions{\n\t\tFilters: filters.NewArgs(projectFilter(options.Project.Name)),\n\t\tAll: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(containers) > 0 {\n\t\tresourceToRemove = true\n\t}\n\n\terr = InReverseDependencyOrder(ctx, options.Project, func(c context.Context, service types.ServiceConfig) error {\n\t\tserviceContainers, others := containers.split(isService(service.Name))\n\t\terr := s.removeContainers(ctx, w, serviceContainers)\n\t\tcontainers = others\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif options.RemoveOrphans && len(containers) > 0 {\n\t\terr := s.removeContainers(ctx, w, containers)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tnetworks, err := s.apiClient.NetworkList(ctx, moby.NetworkListOptions{\n\t\tFilters: filters.NewArgs(\n\t\t\tprojectFilter(projectName),\n\t\t),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\teg, _ := errgroup.WithContext(ctx)\n\tfor _, n := range networks {\n\t\tresourceToRemove = true\n\t\tnetworkID := n.ID\n\t\tnetworkName := n.Name\n\t\teg.Go(func() error {\n\t\t\treturn s.ensureNetworkDown(ctx, networkID, networkName)\n\t\t})\n\t}\n\tif !resourceToRemove {\n\t\tw.Event(progress.NewEvent(projectName, progress.Done, \"Warning: No resource found to remove\"))\n\t}\n\treturn eg.Wait()\n}\n\nfunc (s *composeService) stopContainers(ctx context.Context, w progress.Writer, containers []moby.Container) error {\n\tfor _, container := range containers {\n\t\ttoStop := container\n\t\teventName := getContainerProgressName(toStop)\n\t\tw.Event(progress.StoppingEvent(eventName))\n\t\terr := s.apiClient.ContainerStop(ctx, toStop.ID, nil)\n\t\tif err != nil {\n\t\t\tw.Event(progress.ErrorMessageEvent(eventName, \"Error while Stopping\"))\n\t\t\treturn err\n\t\t}\n\t\tw.Event(progress.StoppedEvent(eventName))\n\t}\n\treturn nil\n}\n\nfunc (s *composeService) removeContainers(ctx context.Context, w progress.Writer, containers []moby.Container) error {\n\teg, _ := errgroup.WithContext(ctx)\n\tfor _, container := range containers {\n\t\ttoDelete := container\n\t\teg.Go(func() error {\n\t\t\teventName := getContainerProgressName(toDelete)\n\t\t\tw.Event(progress.StoppingEvent(eventName))\n\t\t\terr := s.stopContainers(ctx, w, []moby.Container{toDelete})\n\t\t\tif err != nil {\n\t\t\t\tw.Event(progress.ErrorMessageEvent(eventName, \"Error while Stopping\"))\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tw.Event(progress.RemovingEvent(eventName))\n\t\t\terr = s.apiClient.ContainerRemove(ctx, toDelete.ID, moby.ContainerRemoveOptions{Force: true})\n\t\t\tif err != nil {\n\t\t\t\tw.Event(progress.ErrorMessageEvent(eventName, \"Error while Removing\"))\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tw.Event(progress.RemovedEvent(eventName))\n\t\t\treturn nil\n\t\t})\n\t}\n\treturn eg.Wait()\n}\n\nfunc (s *composeService) projectFromContainerLabels(ctx context.Context, projectName string) (*types.Project, error) {\n\tcontainers, err := s.apiClient.ContainerList(ctx, moby.ContainerListOptions{\n\t\tFilters: filters.NewArgs(\n\t\t\tprojectFilter(projectName),\n\t\t),\n\t\tAll: true,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfakeProject := &types.Project{\n\t\tName: projectName,\n\t}\n\tif len(containers) == 0 {\n\t\treturn fakeProject, nil\n\t}\n\toptions, err := loadProjectOptionsFromLabels(containers[0])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif options.ConfigPaths[0] == \"-\" {\n\t\tfor _, container := range containers {\n\t\t\tfakeProject.Services = append(fakeProject.Services, types.ServiceConfig{\n\t\t\t\tName: container.Labels[serviceLabel],\n\t\t\t})\n\t\t}\n\t\treturn fakeProject, nil\n\t}\n\tproject, err := cli.ProjectFromOptions(options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn project, nil\n}\n\nfunc loadProjectOptionsFromLabels(c moby.Container) (*cli.ProjectOptions, error) {\n\tvar configFiles []string\n\trelativePathConfigFiles := strings.Split(c.Labels[configFilesLabel], \",\")\n\tfor _, c := range relativePathConfigFiles {\n\t\tconfigFiles = append(configFiles, filepath.Base(c))\n\t}\n\treturn cli.NewProjectOptions(configFiles,\n\t\tcli.WithOsEnv,\n\t\tcli.WithWorkingDirectory(c.Labels[workingDirLabel]),\n\t\tcli.WithName(c.Labels[projectLabel]))\n}\n<|endoftext|>"} {"text":"\/\/ +build linux\n\npackage main\n\nimport (\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"log\"\n\t\"log\/syslog\"\n\t\"os\"\n\n\t\"github.com\/godbus\/dbus\"\n)\n\nvar conn *dbus.Conn\n\ntype State struct {\n\tVersion string `json:\"version\"`\n\tID string `json:\"id\"`\n\tPid int `json:\"pid\"`\n\tRoot string `json:\"root\"`\n}\n\nfunc Validate(id string) (string, error) {\n\tfor len(id) < 32 {\n\t\tid += \"0\"\n\t}\n\treturn hex.EncodeToString([]byte(id)), nil\n}\n\n\/\/ RegisterMachine with systemd on the host system\nfunc RegisterMachine(name string, id string, pid int, root_directory string) error {\n\tvar (\n\t\tav []byte\n\t\terr error\n\t)\n\tif conn == nil {\n\t\tconn, err = dbus.SystemBus()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tav, err = hex.DecodeString(id[0:32])\n\tif err != nil {\n\t\treturn err\n\t}\n\tobj := conn.Object(\"org.freedesktop.machine1\", \"\/org\/freedesktop\/machine1\")\n\tservice := os.Getenv(\"container\")\n\tif service == \"\" {\n\t\tservice = \"runc\"\n\t}\n\tlog.Print(\"RegisterMachine: objCall\")\n\treturn obj.Call(\"org.freedesktop.machine1.Manager.RegisterMachine\", 0, name, av, service, \"container\", uint32(pid), root_directory).Err\n\treturn nil\n}\n\n\/\/ TerminateMachine registered with systemd on the host system\nfunc TerminateMachine(name string) error {\n\tvar err error\n\tif conn == nil {\n\t\tconn, err = dbus.SystemBus()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tobj := conn.Object(\"org.freedesktop.machine1\", \"\/org\/freedesktop\/machine1\")\n\treturn obj.Call(\"org.freedesktop.machine1.Manager.TerminateMachine\", 0, name).Err\n\treturn nil\n}\n\nfunc main() {\n\tvar state State\n\tlogwriter, err := syslog.New(syslog.LOG_NOTICE, \"ociRegisterMachine\")\n\tif err == nil {\n\t\tlog.SetOutput(logwriter)\n\t}\n\tcommand := os.Args[1]\n\tlog.Print(\"oci register machine: \", command)\n\tif err := json.NewDecoder(os.Stdin).Decode(&state); err != nil {\n\t\tlog.Fatalf(\"RegisterMachine Failed %v\", err.Error())\n\t}\n\n\tlog.Printf(\"Register machine: %s %d %s %s\", command, state.ID, state.Pid, state.Root)\n\t\/\/ ensure id is a hex string at least 32 chars\n\tpassId, err := Validate(state.ID)\n\tif err != nil {\n\t\tlog.Fatalf(\"RegisterMachine Failed %v\", err.Error())\n\t}\n\n\tswitch command {\n\tcase \"prestart\":\n\t\t{\n\t\t\tif err = RegisterMachine(state.ID, passId, int(state.Pid), state.Root); err != nil {\n\t\t\t\tlog.Fatalf(\"Register machine failed: %v\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\tcase \"poststop\":\n\t\t{\n\t\t\tif err := TerminateMachine(state.ID); err != nil {\n\t\t\t\tlog.Fatalf(\"TerminateMachine failed: %v\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tlog.Fatalf(\"Invalid command %q must be prestart|poststop\", command)\n\t}\n}\nPass in the root path as \/ so journalctl will work\/\/ +build linux\n\npackage main\n\nimport (\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"log\"\n\t\"log\/syslog\"\n\t\"os\"\n\n\t\"github.com\/godbus\/dbus\"\n)\n\nvar conn *dbus.Conn\n\ntype State struct {\n\tVersion string `json:\"version\"`\n\tID string `json:\"id\"`\n\tPid int `json:\"pid\"`\n\tRoot string `json:\"root\"`\n}\n\nfunc Validate(id string) (string, error) {\n\tfor len(id) < 32 {\n\t\tid += \"0\"\n\t}\n\treturn hex.EncodeToString([]byte(id)), nil\n}\n\n\/\/ RegisterMachine with systemd on the host system\nfunc RegisterMachine(name string, id string, pid int, root_directory string) error {\n\tvar (\n\t\tav []byte\n\t\terr error\n\t)\n\tif conn == nil {\n\t\tconn, err = dbus.SystemBus()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tav, err = hex.DecodeString(id[0:32])\n\tif err != nil {\n\t\treturn err\n\t}\n\tobj := conn.Object(\"org.freedesktop.machine1\", \"\/org\/freedesktop\/machine1\")\n\tservice := os.Getenv(\"container\")\n\tif service == \"\" {\n\t\tservice = \"runc\"\n\t}\n\tlog.Print(\"RegisterMachine: objCall\")\n\t\/*\treturn obj.Call(\"org.freedesktop.machine1.Manager.RegisterMachine\", 0, name[0:32], av, service, \"container\", uint32(pid), root_directory).Err\n\t *\/\n\treturn obj.Call(\"org.freedesktop.machine1.Manager.RegisterMachine\", 0, name[0:32], av, service, \"container\", uint32(pid), \"\/\").Err\n\treturn nil\n}\n\n\/\/ TerminateMachine registered with systemd on the host system\nfunc TerminateMachine(name string) error {\n\tvar err error\n\tif conn == nil {\n\t\tconn, err = dbus.SystemBus()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tobj := conn.Object(\"org.freedesktop.machine1\", \"\/org\/freedesktop\/machine1\")\n\treturn obj.Call(\"org.freedesktop.machine1.Manager.TerminateMachine\", 0, name).Err\n\treturn nil\n}\n\nfunc main() {\n\tvar state State\n\tlogwriter, err := syslog.New(syslog.LOG_NOTICE, \"ociRegisterMachine\")\n\tif err == nil {\n\t\tlog.SetOutput(logwriter)\n\t}\n\tcommand := os.Args[1]\n\tlog.Print(\"oci register machine: \", command)\n\tif err := json.NewDecoder(os.Stdin).Decode(&state); err != nil {\n\t\tlog.Fatalf(\"RegisterMachine Failed %v\", err.Error())\n\t}\n\n\tlog.Printf(\"Register machine: %s %d %s %s\", command, state.ID, state.Pid, state.Root)\n\t\/\/ ensure id is a hex string at least 32 chars\n\tpassId, err := Validate(state.ID)\n\tif err != nil {\n\t\tlog.Fatalf(\"RegisterMachine Failed %v\", err.Error())\n\t}\n\n\tswitch command {\n\tcase \"prestart\":\n\t\t{\n\t\t\tif err = RegisterMachine(state.ID, passId, int(state.Pid), state.Root); err != nil {\n\t\t\t\tlog.Fatalf(\"Register machine failed: %v\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\tcase \"poststop\":\n\t\t{\n\t\t\tif err := TerminateMachine(state.ID); err != nil {\n\t\t\t\tlog.Fatalf(\"TerminateMachine failed: %v\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tlog.Fatalf(\"Invalid command %q must be prestart|poststop\", command)\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc inet_ntoa(ipnr int64) net.IP {\n\tvar bytes [4]byte\n\tbytes[3] = byte(ipnr & 0xFF)\n\tbytes[2] = byte((ipnr >> 8) & 0xFF)\n\tbytes[1] = byte((ipnr >> 16) & 0xFF)\n\tbytes[0] = byte((ipnr >> 24) & 0xFF)\n\treturn net.IP(bytes[:])\n}\n\nfunc inet_aton(ipnr net.IP) int64 {\n\tbits := strings.Split(ipnr.String(), \".\")\n\n\tb0, _ := strconv.Atoi(bits[0])\n\tb1, _ := strconv.Atoi(bits[1])\n\tb2, _ := strconv.Atoi(bits[2])\n\tb3, _ := strconv.Atoi(bits[3])\n\n\tvar sum int64\n\tsum += int64(b0) << 24\n\tsum += int64(b1) << 16\n\tsum += int64(b2) << 8\n\tsum += int64(b3)\n\treturn sum\n}\n\ntype IPRange struct {\n\tStartIP int64\n\tEndIP int64\n}\n\nfunc parseIPRange(start, end string) (*IPRange, error) {\n\tstart = strings.TrimSpace(start)\n\tend = strings.TrimSpace(end)\n\n\tif !strings.Contains(end, \".\") {\n\t\tss := strings.Split(start, \".\")\n\t\tst := strings.Join(ss[0:3], \".\")\n\t\tend = st + \".\" + end\n\t\t\/\/\t\tfmt.Printf(\"###%v \", st)\n\t\t\/\/\t\treturn nil, fmt.Errorf(\"Invalid IPRange %s-%s\", start, end)\n\t}\n\t\/\/fmt.Printf(\"##%s %s\\n\",start, end)\n\tsi := net.ParseIP(start)\n\tei := net.ParseIP(end)\n\n\tiprange := new(IPRange)\n\tiprange.StartIP = inet_aton(si)\n\tiprange.EndIP = inet_aton(ei)\n\tif iprange.StartIP > iprange.EndIP {\n\t\treturn nil, fmt.Errorf(\"Invalid IPRange %s-%s\", start, end)\n\t}\n\treturn iprange, nil\n}\n\nfunc parseIPRangeFile(file string) ([]*IPRange, error) {\n\tf, err := os.Open(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tipranges := make([]*IPRange, 0)\n\tscanner := bufio.NewScanner(f)\n\tlineno := 1\n\tfor scanner.Scan() {\n\t\tline := strings.TrimSpace(scanner.Text())\n\t\t\/\/comment start with '#'\n\t\tif strings.HasPrefix(line, \"#\") || len(line) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvar startIP, endIP string\n\t\t\/\/ 1.9.22.0\/24-1.9.22.0\/24\n\t\tif strings.Contains(line, \"-\") && strings.Contains(line, \"\/\") {\n\t\t\tss := strings.Split(line, \"-\")\n\t\t\tif len(ss) != 2 {\n\t\t\t\tlog.Printf(\"[WARNING] Invalid line:[%d] %s in IP Range file:%s\", lineno, line, file)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tiprange1, iprange2 := ss[0], ss[1]\n\t\t\tif strings.Contains(iprange1, \"\/\") {\n\t\t\t\tstartIP = iprange1[:strings.Index(iprange1, \"\/\")]\n\t\t\t} else {\n\t\t\t\t\/\/ 1.9.22.0-1.9.23.0\/24\n\t\t\t\tstartIP = iprange1\n\t\t\t}\n\n\t\t\tif net.ParseIP(startIP) == nil {\n\t\t\t\tlog.Printf(\"[WARNING] Invalid line:[%d] %s in IP Range file:%s\", lineno, line, file)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tip, ipnet, err := net.ParseCIDR(iprange2)\n\t\t\tif nil != err {\n\t\t\t\tlog.Printf(\"[WARNING] Invalid line:[%d] %s in IP Range file:%s\", lineno, line, file)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tones, _ := ipnet.Mask.Size()\n\t\t\tv := inet_aton(ip)\n\t\t\ttmp := uint32(0xFFFFFFFF)\n\t\t\ttmp = tmp >> uint32(ones)\n\t\t\tv = v | int64(tmp)\n\t\t\tendip := inet_ntoa(v)\n\t\t\tendIP = endip.String()\n\t\t} else if strings.Contains(line, \"\/\") {\n\t\t\tip, ipnet, err := net.ParseCIDR(line)\n\t\t\tif nil != err {\n\t\t\t\tlog.Printf(\"[WARNING] Invalid line:[%d] %s in IP Range file:%s\", lineno, line, file)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstartIP = ip.String()\n\t\t\tones, _ := ipnet.Mask.Size()\n\t\t\tv := inet_aton(ip)\n\t\t\ttmp := uint32(0xFFFFFFFF)\n\t\t\ttmp = 0xFFFFFFFF\n\t\t\ttmp = tmp >> uint32(ones)\n\t\t\tv = v | int64(tmp)\n\t\t\tendip := inet_ntoa(v)\n\t\t\tendIP = endip.String()\n\t\t} else if strings.Contains(line, \"-\") {\n\t\t\tss := strings.Split(line, \"-\")\n\t\t\tif len(ss) != 2 {\n\t\t\t\tlog.Printf(\"[WARNING] Invalid line:[%d] %s in IP Range file:%s\", lineno, line, file)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstartIP, endIP = ss[0], ss[1]\n\t\t} else {\n\t\t\tif net.ParseIP(line) == nil {\n\t\t\t\tlog.Printf(\"[WARNING] Invalid line:[%d] %s in IP Range file:%s\", lineno, line, file)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstartIP, endIP = line, line\n\t\t}\n\n\t\tiprange, err := parseIPRange(startIP, endIP)\n\t\tif nil != err {\n\t\t\tlog.Printf(\"[WARNING] Invalid line:[%d] %s in IP Range file:%s\", lineno, line, file)\n\t\t\tcontinue\n\t\t}\n\t\tipranges = append(ipranges, iprange)\n\t\tlineno = lineno + 1\n\t}\n\n\t\/\/ 去重操作\n\t\/*\n\t\t\"1.9.22.0-255\"\n\t\t\"1.9.0.0\/16\"\n\t\t\"1.9.22.0-255\"\n\t\t\"1.9.22.0\/24\"\n\t\t\"1.9.22.0-255\"\n\t\t\"1.9.22.0-1.9.22.100\"\n\t\t\"1.9.22.0-1.9.22.255\"\n\t\t\"1.9.0.0\/16\"\n\t\t\"3.3.3.0\/24\"\n\t\t\"3.3.0.0\/16\"\n\t\t\"3.3.3.0-255\"\n\t\t\"1.1.1.0\/24\"\n\t\t\"1.9.0.0\/16\"\n\t\t\t +\n\t\t\t |\n\t\t\t |\n\t\t\t v\n\t\t&main.IPRange{StartIP:17367040, EndIP:17432575},\n\t\t&main.IPRange{StartIP:50528256, EndIP:50593791},\n\t\t&main.IPRange{StartIP:16843008, EndIP:16843263},\n\t*\/\n\tsort.Slice(ipranges, func(i int, j int) bool {\n\t\treturn ipranges[i].EndIP-ipranges[i].StartIP > ipranges[j].EndIP-ipranges[j].StartIP\n\t})\n\tvar newIpranges []*IPRange\n\tfor _, iprange := range ipranges {\n\t\tif !contains(newIpranges, iprange) {\n\t\t\tnewIpranges = append(newIpranges, iprange)\n\t\t}\n\t}\n\n\t\/\/ 打乱扫描顺序\n\tif len(newIpranges) > 0 {\n\t\trand.Seed(time.Now().Unix())\n\t\tdest := make([]*IPRange, len(newIpranges))\n\t\tperm := rand.Perm(len(newIpranges))\n\t\tfor i, v := range perm {\n\t\t\tdest[v] = newIpranges[i]\n\t\t}\n\t\tnewIpranges = dest\n\t}\n\treturn newIpranges, nil\n}\n\nfunc contains(ipranges []*IPRange, iprange *IPRange) bool {\n\tfor _, x := range ipranges {\n\t\tif x.StartIP <= iprange.StartIP && x.EndIP >= iprange.EndIP {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n修正错误IP段的行号输出package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc inet_ntoa(ipnr int64) net.IP {\n\tvar bytes [4]byte\n\tbytes[3] = byte(ipnr & 0xFF)\n\tbytes[2] = byte((ipnr >> 8) & 0xFF)\n\tbytes[1] = byte((ipnr >> 16) & 0xFF)\n\tbytes[0] = byte((ipnr >> 24) & 0xFF)\n\treturn net.IP(bytes[:])\n}\n\nfunc inet_aton(ipnr net.IP) int64 {\n\tbits := strings.Split(ipnr.String(), \".\")\n\n\tb0, _ := strconv.Atoi(bits[0])\n\tb1, _ := strconv.Atoi(bits[1])\n\tb2, _ := strconv.Atoi(bits[2])\n\tb3, _ := strconv.Atoi(bits[3])\n\n\tvar sum int64\n\tsum += int64(b0) << 24\n\tsum += int64(b1) << 16\n\tsum += int64(b2) << 8\n\tsum += int64(b3)\n\treturn sum\n}\n\ntype IPRange struct {\n\tStartIP int64\n\tEndIP int64\n}\n\nfunc parseIPRange(start, end string) (*IPRange, error) {\n\tstart = strings.TrimSpace(start)\n\tend = strings.TrimSpace(end)\n\n\tif !strings.Contains(end, \".\") {\n\t\tss := strings.Split(start, \".\")\n\t\tst := strings.Join(ss[0:3], \".\")\n\t\tend = st + \".\" + end\n\t\t\/\/\t\tfmt.Printf(\"###%v \", st)\n\t\t\/\/\t\treturn nil, fmt.Errorf(\"Invalid IPRange %s-%s\", start, end)\n\t}\n\t\/\/fmt.Printf(\"##%s %s\\n\",start, end)\n\tsi := net.ParseIP(start)\n\tei := net.ParseIP(end)\n\n\tiprange := new(IPRange)\n\tiprange.StartIP = inet_aton(si)\n\tiprange.EndIP = inet_aton(ei)\n\tif iprange.StartIP > iprange.EndIP {\n\t\treturn nil, fmt.Errorf(\"Invalid IPRange %s-%s\", start, end)\n\t}\n\treturn iprange, nil\n}\n\nfunc parseIPRangeFile(file string) ([]*IPRange, error) {\n\tf, err := os.Open(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tipranges := make([]*IPRange, 0)\n\tscanner := bufio.NewScanner(f)\n\tlineno := 0\n\tfor scanner.Scan() {\n\t\tline := strings.TrimSpace(scanner.Text())\n\t\tlineno++\n\t\t\/\/comment start with '#'\n\t\tif strings.HasPrefix(line, \"#\") || len(line) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvar startIP, endIP string\n\t\t\/\/ 1.9.22.0\/24-1.9.22.0\/24\n\t\tif strings.Contains(line, \"-\") && strings.Contains(line, \"\/\") {\n\t\t\tss := strings.Split(line, \"-\")\n\t\t\tif len(ss) != 2 {\n\t\t\t\tlog.Printf(\"[WARNING] Invalid line:[%d] %s in IP Range file:%s\", lineno, line, file)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tiprange1, iprange2 := ss[0], ss[1]\n\t\t\tif strings.Contains(iprange1, \"\/\") {\n\t\t\t\tstartIP = iprange1[:strings.Index(iprange1, \"\/\")]\n\t\t\t} else {\n\t\t\t\t\/\/ 1.9.22.0-1.9.23.0\/24\n\t\t\t\tstartIP = iprange1\n\t\t\t}\n\n\t\t\tif net.ParseIP(startIP) == nil {\n\t\t\t\tlog.Printf(\"[WARNING] Invalid line:[%d] %s in IP Range file:%s\", lineno, line, file)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tip, ipnet, err := net.ParseCIDR(iprange2)\n\t\t\tif nil != err {\n\t\t\t\tlog.Printf(\"[WARNING] Invalid line:[%d] %s in IP Range file:%s\", lineno, line, file)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tones, _ := ipnet.Mask.Size()\n\t\t\tv := inet_aton(ip)\n\t\t\ttmp := uint32(0xFFFFFFFF)\n\t\t\ttmp = tmp >> uint32(ones)\n\t\t\tv = v | int64(tmp)\n\t\t\tendip := inet_ntoa(v)\n\t\t\tendIP = endip.String()\n\t\t} else if strings.Contains(line, \"\/\") {\n\t\t\tip, ipnet, err := net.ParseCIDR(line)\n\t\t\tif nil != err {\n\t\t\t\tlog.Printf(\"[WARNING] Invalid line:[%d] %s in IP Range file:%s\", lineno, line, file)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstartIP = ip.String()\n\t\t\tones, _ := ipnet.Mask.Size()\n\t\t\tv := inet_aton(ip)\n\t\t\ttmp := uint32(0xFFFFFFFF)\n\t\t\ttmp = 0xFFFFFFFF\n\t\t\ttmp = tmp >> uint32(ones)\n\t\t\tv = v | int64(tmp)\n\t\t\tendip := inet_ntoa(v)\n\t\t\tendIP = endip.String()\n\t\t} else if strings.Contains(line, \"-\") {\n\t\t\tss := strings.Split(line, \"-\")\n\t\t\tif len(ss) != 2 {\n\t\t\t\tlog.Printf(\"[WARNING] Invalid line:[%d] %s in IP Range file:%s\", lineno, line, file)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstartIP, endIP = ss[0], ss[1]\n\t\t} else {\n\t\t\tif net.ParseIP(line) == nil {\n\t\t\t\tlog.Printf(\"[WARNING] Invalid line:[%d] %s in IP Range file:%s\", lineno, line, file)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstartIP, endIP = line, line\n\t\t}\n\n\t\tiprange, err := parseIPRange(startIP, endIP)\n\t\tif nil != err {\n\t\t\tlog.Printf(\"[WARNING] Invalid line:[%d] %s in IP Range file:%s\", lineno, line, file)\n\t\t\tcontinue\n\t\t}\n\t\tipranges = append(ipranges, iprange)\n\t}\n\n\t\/\/ 去重操作\n\t\/*\n\t\t\"1.9.22.0-255\"\n\t\t\"1.9.0.0\/16\"\n\t\t\"1.9.22.0-255\"\n\t\t\"1.9.22.0\/24\"\n\t\t\"1.9.22.0-255\"\n\t\t\"1.9.22.0-1.9.22.100\"\n\t\t\"1.9.22.0-1.9.22.255\"\n\t\t\"1.9.0.0\/16\"\n\t\t\"3.3.3.0\/24\"\n\t\t\"3.3.0.0\/16\"\n\t\t\"3.3.3.0-255\"\n\t\t\"1.1.1.0\/24\"\n\t\t\"1.9.0.0\/16\"\n\t\t\t +\n\t\t\t |\n\t\t\t |\n\t\t\t v\n\t\t&main.IPRange{StartIP:17367040, EndIP:17432575},\n\t\t&main.IPRange{StartIP:50528256, EndIP:50593791},\n\t\t&main.IPRange{StartIP:16843008, EndIP:16843263},\n\t*\/\n\tsort.Slice(ipranges, func(i int, j int) bool {\n\t\treturn ipranges[i].EndIP-ipranges[i].StartIP > ipranges[j].EndIP-ipranges[j].StartIP\n\t})\n\tvar newIpranges []*IPRange\n\tfor _, iprange := range ipranges {\n\t\tif !contains(newIpranges, iprange) {\n\t\t\tnewIpranges = append(newIpranges, iprange)\n\t\t}\n\t}\n\n\t\/\/ 打乱扫描顺序\n\tif len(newIpranges) > 0 {\n\t\trand.Seed(time.Now().Unix())\n\t\tdest := make([]*IPRange, len(newIpranges))\n\t\tperm := rand.Perm(len(newIpranges))\n\t\tfor i, v := range perm {\n\t\t\tdest[v] = newIpranges[i]\n\t\t}\n\t\tnewIpranges = dest\n\t}\n\treturn newIpranges, nil\n}\n\nfunc contains(ipranges []*IPRange, iprange *IPRange) bool {\n\tfor _, x := range ipranges {\n\t\tif x.StartIP <= iprange.StartIP && x.EndIP >= iprange.EndIP {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"package iptables\n\n\/\/ This package is originally from Docker and has been modified for use by the\n\/\/ Flynn project. See the NOTICE and LICENSE files for licensing and copyright\n\/\/ details.\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Action string\n\nconst (\n\tAdd Action = \"-A\"\n\tDelete Action = \"-D\"\n)\n\nvar (\n\tErrIptablesNotFound = errors.New(\"Iptables not found\")\n\tnat = []string{\"-t\", \"nat\"}\n\tsupportsXlock = false\n)\n\ntype Chain struct {\n\tName string\n\tBridge string\n}\n\nfunc init() {\n\tsupportsXlock = exec.Command(\"iptables\", \"--wait\", \"-L\", \"-n\").Run() == nil\n}\n\nfunc NewChain(name, bridge string) (*Chain, error) {\n\tif output, err := Raw(\"-t\", \"nat\", \"-N\", name); err != nil {\n\t\treturn nil, err\n\t} else if len(output) != 0 {\n\t\treturn nil, fmt.Errorf(\"Error creating new iptables chain: %s\", output)\n\t}\n\tchain := &Chain{\n\t\tName: name,\n\t\tBridge: bridge,\n\t}\n\n\tif err := chain.Prerouting(Add, \"-m\", \"addrtype\", \"--dst-type\", \"LOCAL\"); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to inject update PREROUTING chain: %s\", err)\n\t}\n\tif err := chain.Output(Add, \"-m\", \"addrtype\", \"--dst-type\", \"LOCAL\", \"!\", \"--dst\", \"127.0.0.0\/8\"); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to inject update OUTPUT chain: %s\", err)\n\t}\n\treturn chain, nil\n}\n\nfunc RemoveExistingChain(name string) error {\n\tchain := &Chain{\n\t\tName: name,\n\t}\n\treturn chain.Remove()\n}\n\nfunc (c *Chain) Forward(action Action, ip net.IP, port int, proto, destAddr string, destPort int) error {\n\tdaddr := ip.String()\n\tif ip.IsUnspecified() {\n\t\t\/\/ iptables interprets \"0.0.0.0\" as \"0.0.0.0\/32\", whereas we\n\t\t\/\/ want \"0.0.0.0\/0\". \"0\/0\" is correctly interpreted as \"any\n\t\t\/\/ value\" by both iptables and ip6tables.\n\t\tdaddr = \"0\/0\"\n\t}\n\tif output, err := Raw(\"-t\", \"nat\", fmt.Sprint(action), c.Name,\n\t\t\"-p\", proto,\n\t\t\"-d\", daddr,\n\t\t\"--dport\", strconv.Itoa(port),\n\t\t\"-j\", \"DNAT\",\n\t\t\"--to-destination\", net.JoinHostPort(destAddr, strconv.Itoa(destPort))); err != nil && action != Delete {\n\t\treturn err\n\t} else if len(output) != 0 && action != Delete {\n\t\treturn fmt.Errorf(\"Error iptables forward: %s\", output)\n\t}\n\n\tfAction := action\n\tif fAction == Add {\n\t\tfAction = \"-I\"\n\t}\n\tif output, err := Raw(string(fAction), \"FORWARD\",\n\t\t\"!\", \"-i\", c.Bridge,\n\t\t\"-o\", c.Bridge,\n\t\t\"-p\", proto,\n\t\t\"-d\", destAddr,\n\t\t\"--dport\", strconv.Itoa(destPort),\n\t\t\"-j\", \"ACCEPT\"); err != nil && action != Delete {\n\t\treturn err\n\t} else if len(output) != 0 && action != Delete {\n\t\treturn fmt.Errorf(\"Error iptables forward: %s\", output)\n\t}\n\n\tif output, err := Raw(\"-t\", \"nat\", string(fAction), \"POSTROUTING\",\n\t\t\"-p\", proto,\n\t\t\"-s\", destAddr,\n\t\t\"-d\", destAddr,\n\t\t\"--dport\", strconv.Itoa(destPort),\n\t\t\"-j\", \"MASQUERADE\"); err != nil && action != Delete {\n\t\treturn err\n\t} else if len(output) != 0 && action != Delete {\n\t\treturn fmt.Errorf(\"Error iptables forward: %s\", output)\n\t}\n\n\treturn nil\n}\n\nfunc (c *Chain) Prerouting(action Action, args ...string) error {\n\ta := append(nat, fmt.Sprint(action), \"PREROUTING\")\n\tif len(args) > 0 {\n\t\ta = append(a, args...)\n\t}\n\tif output, err := Raw(append(a, \"-j\", c.Name)...); err != nil {\n\t\treturn err\n\t} else if len(output) != 0 {\n\t\treturn fmt.Errorf(\"Error iptables prerouting: %s\", output)\n\t}\n\treturn nil\n}\n\nfunc (c *Chain) Output(action Action, args ...string) error {\n\ta := append(nat, fmt.Sprint(action), \"OUTPUT\")\n\tif len(args) > 0 {\n\t\ta = append(a, args...)\n\t}\n\tif output, err := Raw(append(a, \"-j\", c.Name)...); err != nil {\n\t\treturn err\n\t} else if len(output) != 0 {\n\t\treturn fmt.Errorf(\"Error iptables output: %s\", output)\n\t}\n\treturn nil\n}\n\nfunc (c *Chain) Remove() error {\n\t\/\/ Ignore errors - This could mean the chains were never set up\n\tc.Prerouting(Delete, \"-m\", \"addrtype\", \"--dst-type\", \"LOCAL\")\n\tc.Output(Delete, \"-m\", \"addrtype\", \"--dst-type\", \"LOCAL\", \"!\", \"--dst\", \"127.0.0.0\/8\")\n\tc.Output(Delete, \"-m\", \"addrtype\", \"--dst-type\", \"LOCAL\") \/\/ Created in versions <= 0.1.6\n\n\tc.Prerouting(Delete)\n\tc.Output(Delete)\n\n\tRaw(\"-t\", \"nat\", \"-F\", c.Name)\n\tRaw(\"-t\", \"nat\", \"-X\", c.Name)\n\n\treturn nil\n}\n\n\/\/ Check if an existing rule exists\nfunc Exists(args ...string) bool {\n\tif _, err := Raw(append([]string{\"-C\"}, args...)...); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc Raw(args ...string) ([]byte, error) {\n\tpath, err := exec.LookPath(\"iptables\")\n\tif err != nil {\n\t\treturn nil, ErrIptablesNotFound\n\t}\n\n\tif supportsXlock {\n\t\targs = append([]string{\"--wait\"}, args...)\n\t}\n\n\tif os.Getenv(\"DEBUG\") != \"\" {\n\t\tfmt.Fprintf(os.Stderr, fmt.Sprintf(\"[debug] %s, %v\\n\", path, args))\n\t}\n\n\toutput, err := exec.Command(path, args...).CombinedOutput()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"iptables failed: iptables %v: %s (%s)\", strings.Join(args, \" \"), output, err)\n\t}\n\n\t\/\/ ignore iptables' message about xtables lock\n\tif strings.Contains(string(output), \"waiting for it to exit\") {\n\t\toutput = []byte(\"\")\n\t}\n\n\treturn output, err\n}\nAdd support for forwarding traffic to localhostpackage iptables\n\n\/\/ This package is originally from Docker and has been modified for use by the\n\/\/ Flynn project. See the NOTICE and LICENSE files for licensing and copyright\n\/\/ details.\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Action string\n\nconst (\n\tAdd Action = \"-A\"\n\tDelete Action = \"-D\"\n)\n\nvar (\n\tErrIptablesNotFound = errors.New(\"Iptables not found\")\n\tnat = []string{\"-t\", \"nat\"}\n\tsupportsXlock = false\n)\n\ntype Chain struct {\n\tName string\n\tBridge string\n}\n\nfunc init() {\n\tsupportsXlock = exec.Command(\"iptables\", \"--wait\", \"-L\", \"-n\").Run() == nil\n}\n\nfunc NewChain(name, bridge string) (*Chain, error) {\n\tif output, err := Raw(\"-t\", \"nat\", \"-N\", name); err != nil {\n\t\treturn nil, err\n\t} else if len(output) != 0 {\n\t\treturn nil, fmt.Errorf(\"Error creating new iptables chain: %s\", output)\n\t}\n\tchain := &Chain{\n\t\tName: name,\n\t\tBridge: bridge,\n\t}\n\n\tif err := chain.Prerouting(Add, \"-m\", \"addrtype\", \"--dst-type\", \"LOCAL\"); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to update PREROUTING chain: %s\", err)\n\t}\n\tif err := chain.Output(Add, \"-m\", \"addrtype\", \"--dst-type\", \"LOCAL\"); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to update OUTPUT chain: %s\", err)\n\t}\n\tif _, err := Raw(\"-t\", \"nat\", \"-A\", \"POSTROUTING\", \"-m\", \"addrtype\", \"--src-type\", \"LOCAL\", \"-o\", bridge, \"-j\", \"MASQUERADE\"); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to update POSTROUTING chain: %s\", err)\n\t}\n\treturn chain, nil\n}\n\nfunc RemoveExistingChain(name string) error {\n\tchain := &Chain{\n\t\tName: name,\n\t}\n\treturn chain.Remove()\n}\n\nfunc (c *Chain) Forward(action Action, ip net.IP, port int, proto, destAddr string, destPort int) error {\n\tdaddr := ip.String()\n\tif ip.IsUnspecified() {\n\t\t\/\/ iptables interprets \"0.0.0.0\" as \"0.0.0.0\/32\", whereas we\n\t\t\/\/ want \"0.0.0.0\/0\". \"0\/0\" is correctly interpreted as \"any\n\t\t\/\/ value\" by both iptables and ip6tables.\n\t\tdaddr = \"0\/0\"\n\t}\n\tif output, err := Raw(\"-t\", \"nat\", fmt.Sprint(action), c.Name,\n\t\t\"-p\", proto,\n\t\t\"-d\", daddr,\n\t\t\"--dport\", strconv.Itoa(port),\n\t\t\"-j\", \"DNAT\",\n\t\t\"--to-destination\", net.JoinHostPort(destAddr, strconv.Itoa(destPort))); err != nil && action != Delete {\n\t\treturn err\n\t} else if len(output) != 0 && action != Delete {\n\t\treturn fmt.Errorf(\"Error iptables forward: %s\", output)\n\t}\n\n\tfAction := action\n\tif fAction == Add {\n\t\tfAction = \"-I\"\n\t}\n\tif output, err := Raw(string(fAction), \"FORWARD\",\n\t\t\"!\", \"-i\", c.Bridge,\n\t\t\"-o\", c.Bridge,\n\t\t\"-p\", proto,\n\t\t\"-d\", destAddr,\n\t\t\"--dport\", strconv.Itoa(destPort),\n\t\t\"-j\", \"ACCEPT\"); err != nil && action != Delete {\n\t\treturn err\n\t} else if len(output) != 0 && action != Delete {\n\t\treturn fmt.Errorf(\"Error iptables forward: %s\", output)\n\t}\n\n\tif output, err := Raw(\"-t\", \"nat\", string(fAction), \"POSTROUTING\",\n\t\t\"-p\", proto,\n\t\t\"-s\", destAddr,\n\t\t\"-d\", destAddr,\n\t\t\"--dport\", strconv.Itoa(destPort),\n\t\t\"-j\", \"MASQUERADE\"); err != nil && action != Delete {\n\t\treturn err\n\t} else if len(output) != 0 && action != Delete {\n\t\treturn fmt.Errorf(\"Error iptables forward: %s\", output)\n\t}\n\n\treturn nil\n}\n\nfunc (c *Chain) Prerouting(action Action, args ...string) error {\n\ta := append(nat, fmt.Sprint(action), \"PREROUTING\")\n\tif len(args) > 0 {\n\t\ta = append(a, args...)\n\t}\n\tif output, err := Raw(append(a, \"-j\", c.Name)...); err != nil {\n\t\treturn err\n\t} else if len(output) != 0 {\n\t\treturn fmt.Errorf(\"Error iptables prerouting: %s\", output)\n\t}\n\treturn nil\n}\n\nfunc (c *Chain) Output(action Action, args ...string) error {\n\ta := append(nat, fmt.Sprint(action), \"OUTPUT\")\n\tif len(args) > 0 {\n\t\ta = append(a, args...)\n\t}\n\tif output, err := Raw(append(a, \"-j\", c.Name)...); err != nil {\n\t\treturn err\n\t} else if len(output) != 0 {\n\t\treturn fmt.Errorf(\"Error iptables output: %s\", output)\n\t}\n\treturn nil\n}\n\nfunc (c *Chain) Remove() error {\n\t\/\/ Ignore errors - This could mean the chains were never set up\n\tc.Prerouting(Delete, \"-m\", \"addrtype\", \"--dst-type\", \"LOCAL\")\n\tc.Output(Delete, \"-m\", \"addrtype\", \"--dst-type\", \"LOCAL\")\n\tRaw(\"-t\", \"nat\", \"-D\", \"POSTROUTING\", \"-m\", \"addrtype\", \"--src-type\", \"LOCAL\", \"-o\", c.Bridge, \"-j\", \"MASQUERADE\")\n\n\tc.Prerouting(Delete)\n\tc.Output(Delete)\n\n\tRaw(\"-t\", \"nat\", \"-F\", c.Name)\n\tRaw(\"-t\", \"nat\", \"-X\", c.Name)\n\n\treturn nil\n}\n\n\/\/ Check if an existing rule exists\nfunc Exists(args ...string) bool {\n\tif _, err := Raw(append([]string{\"-C\"}, args...)...); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc Raw(args ...string) ([]byte, error) {\n\tpath, err := exec.LookPath(\"iptables\")\n\tif err != nil {\n\t\treturn nil, ErrIptablesNotFound\n\t}\n\n\tif supportsXlock {\n\t\targs = append([]string{\"--wait\"}, args...)\n\t}\n\n\tif os.Getenv(\"DEBUG\") != \"\" {\n\t\tfmt.Fprintf(os.Stderr, fmt.Sprintf(\"[debug] %s, %v\\n\", path, args))\n\t}\n\n\toutput, err := exec.Command(path, args...).CombinedOutput()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"iptables failed: iptables %v: %s (%s)\", strings.Join(args, \" \"), output, err)\n\t}\n\n\t\/\/ ignore iptables' message about xtables lock\n\tif strings.Contains(string(output), \"waiting for it to exit\") {\n\t\toutput = []byte(\"\")\n\t}\n\n\treturn output, err\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n)\n\nvar IPTablesPath = \"iptables\"\n\nfunc init() {\n\n\terr := CheckIPTables()\n\tif err != nil {\n\t\twd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tIPTablesPath = filepath.Join(wd, IPTablesPath)\n\t}\n\n}\n\n\/\/ NOTEs from messing with iptables proxying:\n\/\/ For external:\n\/\/ iptables -A PREROUTING -t nat -p tcp -m tcp --dport 5555 -j REDIRECT --to-ports 49278\n\/\/ For internal:\n\/\/ iptables -A OUTPUT -t nat -p tcp -m tcp --dport 5555 -j REDIRECT --to-ports 49278\n\/\/ To delete a rule, use -D rather than -A.\n\ntype Action bool\n\nconst (\n\tINSERT Action = true\n\tDELETE = false\n)\n\nfunc CheckIPTables() error {\n\treturn exec.Command(IPTablesPath, \"-L\").Run()\n}\n\n\/\/ Invoke one iptables command.\n\/\/ Expects \"iptables\" in the path to be runnable with reasonable permissions.\nfunc iptables(action Action, chain string, source, target int, ipAddress string) *exec.Cmd {\n\tvar cmd *exec.Cmd\n\n\tswitch action {\n\tcase INSERT:\n\t\tcmd = exec.Command(\n\t\t\tIPTablesPath, \"--insert\", chain, \"1\",\n\t\t\t\"--table\", \"nat\",\n\t\t\t\"--protocol\", \"tcp\",\n\t\t\t\/\/ Prevent redirection of packets already going to the container\n\t\t\t\"--match\", \"tcp\", \"!\", \"--destination\", ipAddress,\n\t\t\t\/\/ Prevent redirection of ports on remote servers\n\t\t\t\/\/ (i.e, don't make google:80 hit our container)\n\t\t\t\"--match\", \"addrtype\", \"--dst-type\", \"LOCAL\",\n\t\t\t\"--dport\", fmt.Sprint(source),\n\t\t\t\"--jump\", \"REDIRECT\",\n\t\t\t\"--to-ports\", fmt.Sprint(target))\n\tcase DELETE:\n\t\tcmd = exec.Command(\n\t\t\tIPTablesPath, \"--delete\", chain,\n\t\t\t\"--table\", \"nat\",\n\t\t\t\"--protocol\", \"tcp\",\n\t\t\t\"--match\", \"tcp\", \"!\", \"--destination\", ipAddress,\n\t\t\t\"--match\", \"addrtype\", \"--dst-type\", \"LOCAL\",\n\t\t\t\"--dport\", fmt.Sprint(source),\n\t\t\t\"--jump\", \"REDIRECT\",\n\t\t\t\"--to-ports\", fmt.Sprint(target))\n\t}\n\tcmd.Stderr = os.Stderr\n\treturn cmd\n}\n\n\/\/ Configure one port redirect from `source` to `target` using iptables.\n\/\/ Returns an error and a function which undoes the change to the firewall.\nfunc ConfigureRedirect(source, target int, ipAddress string) (func(), error) {\n\n\terr := iptables(INSERT, \"PREROUTING\", source, target, ipAddress).Run()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = iptables(INSERT, \"OUTPUT\", source, target, ipAddress).Run()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tremove := func() {\n\t\terr := iptables(DELETE, \"PREROUTING\", source, target, ipAddress).Run()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to remove iptables rule:\", source, target)\n\t\t}\n\t\terr = iptables(DELETE, \"OUTPUT\", source, target, ipAddress).Run()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to remove iptables rule:\", source, target)\n\t\t}\n\t}\n\treturn remove, nil\n}\nEnsure that iptables waits for a lockpackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n)\n\nvar IPTablesPath = \"iptables\"\n\nfunc init() {\n\n\terr := CheckIPTables()\n\tif err != nil {\n\t\tlog.Printf(\"Unable to find iptables, using fallback\")\n\t\twd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tIPTablesPath = filepath.Join(wd, IPTablesPath)\n\t}\n\n}\n\n\/\/ NOTEs from messing with iptables proxying:\n\/\/ For external:\n\/\/ iptables -A PREROUTING -t nat -p tcp -m tcp --dport 5555 -j REDIRECT --to-ports 49278\n\/\/ For internal:\n\/\/ iptables -A OUTPUT -t nat -p tcp -m tcp --dport 5555 -j REDIRECT --to-ports 49278\n\/\/ To delete a rule, use -D rather than -A.\n\ntype Action bool\n\nconst (\n\tINSERT Action = true\n\tDELETE = false\n)\n\nfunc CheckIPTables() error {\n\tcmd := exec.Command(IPTablesPath, \"--list\", \"--wait\")\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\n\/\/ Invoke one iptables command.\n\/\/ Expects \"iptables\" in the path to be runnable with reasonable permissions.\nfunc iptables(action Action, chain string, source, target int, ipAddress string) *exec.Cmd {\n\tvar cmd *exec.Cmd\n\n\tswitch action {\n\tcase INSERT:\n\t\tcmd = exec.Command(\n\t\t\tIPTablesPath, \"--insert\", chain, \"1\",\n\t\t\t\"--table\", \"nat\",\n\t\t\t\"--protocol\", \"tcp\",\n\t\t\t\/\/ Prevent redirection of packets already going to the container\n\t\t\t\"--match\", \"tcp\", \"!\", \"--destination\", ipAddress,\n\t\t\t\/\/ Prevent redirection of ports on remote servers\n\t\t\t\/\/ (i.e, don't make google:80 hit our container)\n\t\t\t\"--match\", \"addrtype\", \"--dst-type\", \"LOCAL\",\n\t\t\t\"--dport\", fmt.Sprint(source),\n\t\t\t\"--jump\", \"REDIRECT\",\n\t\t\t\"--to-ports\", fmt.Sprint(target), \"--wait\")\n\tcase DELETE:\n\t\tcmd = exec.Command(\n\t\t\tIPTablesPath, \"--delete\", chain,\n\t\t\t\"--table\", \"nat\",\n\t\t\t\"--protocol\", \"tcp\",\n\t\t\t\"--match\", \"tcp\", \"!\", \"--destination\", ipAddress,\n\t\t\t\"--match\", \"addrtype\", \"--dst-type\", \"LOCAL\",\n\t\t\t\"--dport\", fmt.Sprint(source),\n\t\t\t\"--jump\", \"REDIRECT\",\n\t\t\t\"--to-ports\", fmt.Sprint(target), \"--wait\")\n\t}\n\tcmd.Stderr = os.Stderr\n\treturn cmd\n}\n\n\/\/ Configure one port redirect from `source` to `target` using iptables.\n\/\/ Returns an error and a function which undoes the change to the firewall.\nfunc ConfigureRedirect(source, target int, ipAddress string) (func(), error) {\n\n\terr := iptables(INSERT, \"PREROUTING\", source, target, ipAddress).Run()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = iptables(INSERT, \"OUTPUT\", source, target, ipAddress).Run()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tremove := func() {\n\t\terr := iptables(DELETE, \"PREROUTING\", source, target, ipAddress).Run()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to remove iptables rule:\", source, target)\n\t\t}\n\t\terr = iptables(DELETE, \"OUTPUT\", source, target, ipAddress).Run()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to remove iptables rule:\", source, target)\n\t\t}\n\t}\n\treturn remove, nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright (c) 2018 Pani Networks\n\/\/ All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\npackage commands\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/romana\/core\/cli\/util\"\n\t\"github.com\/romana\/core\/common\"\n\t\"github.com\/romana\/core\/common\/api\"\n\n\t\"github.com\/go-resty\/resty\"\n\tms \"github.com\/mitchellh\/mapstructure\"\n\tcli \"github.com\/spf13\/cobra\"\n\tconfig \"github.com\/spf13\/viper\"\n)\n\n\/\/ topologyCmd represents the topology commands\nvar topologyCmd = &cli.Command{\n\tUse: \"topology [update|list]\",\n\tShort: \"Update or List topology for romana services.\",\n\tLong: `Update or List topology for romana services.\n\ntopology requires a subcommand, e.g. ` + \"`romana topology list`.\" + `\n\nFor more information, please check http:\/\/docs.romana.io\n`,\n}\n\nfunc init() {\n\ttopologyCmd.AddCommand(topologyListCmd)\n\ttopologyCmd.AddCommand(topologyUpdateCmd)\n}\n\nvar topologyListCmd = &cli.Command{\n\tUse: \"list\",\n\tShort: \"List romana topology.\",\n\tLong: `List romana topology.`,\n\tRunE: topologyList,\n\tSilenceUsage: true,\n}\n\nvar topologyUpdateCmd = &cli.Command{\n\tUse: \"update [file name]\",\n\tShort: \"Update romana topology.\",\n\tLong: `Update romana topology.`,\n\tRunE: topologyUpdate,\n\tSilenceUsage: true,\n}\n\nfunc topologyList(cmd *cli.Command, args []string) error {\n\trootURL := config.GetString(\"RootURL\")\n\tresp, err := resty.R().Get(rootURL + \"\/topology\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif config.GetString(\"Format\") == \"json\" {\n\t\tJSONFormat(resp.Body(), os.Stdout)\n\t} else {\n\t\tw := tabwriter.NewWriter(os.Stdout, 0, 8, 0, '\\t', 0)\n\n\t\tif resp.StatusCode() == http.StatusOK {\n\t\t\tvar topology api.TopologyUpdateRequest\n\t\t\terr := json.Unmarshal(resp.Body(), &topology)\n\t\t\tif err == nil {\n\t\t\t\tfmt.Println(\"Networks\")\n\t\t\t\tfmt.Fprint(w, \"Name\\tCIDR\\tTenants\\n\")\n\t\t\t\tfor _, n := range topology.Networks {\n\t\t\t\t\tfmt.Fprintf(w, \"%s\\t%s\\t%v\\n\",\n\t\t\t\t\t\tn.Name,\n\t\t\t\t\t\tn.CIDR,\n\t\t\t\t\t\tn.Tenants,\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t\tfmt.Fprint(w, \"\\n\")\n\t\t\t\tfor _, t := range topology.Topologies {\n\t\t\t\t\tfmt.Printf(\"Topology for Network\/s: %s\\n\", t.Networks)\n\t\t\t\t\tfmt.Fprint(w, \"Name\\tCIDR\\tNodes\\n\")\n\t\t\t\t\tfor _, m := range t.Map {\n\t\t\t\t\t\tfmt.Fprintf(w, \"%s\\t%s\\t\", m.Name, m.CIDR)\n\t\t\t\t\t\tfor _, n := range m.Groups {\n\t\t\t\t\t\t\tfmt.Fprintf(w, \"%s(%s), \", n.Name, n.IP)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfmt.Fprint(w, \"\\n\")\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Fprint(w, \"\\n\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Error: %s \\n\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tvar e Error\n\t\t\tjson.Unmarshal(resp.Body(), &e)\n\n\t\t\tfmt.Println(\"Host Error\")\n\t\t\tfmt.Fprintf(w, \"Fields\\t%s\\n\", e.Fields)\n\t\t\tfmt.Fprintf(w, \"Message\\t%s\\n\", e.Message)\n\t\t\tfmt.Fprintf(w, \"Status\\t%d\\n\", resp.StatusCode())\n\t\t}\n\t\tw.Flush()\n\t}\n\n\treturn nil\n}\n\n\/\/ topologyUpdate updates romana topology.\n\/\/ The features supported are:\n\/\/ * Topology update through file\n\/\/ * Topology update while taking input from standard\n\/\/ input (STDIN) instead of a file\nfunc topologyUpdate(cmd *cli.Command, args []string) error {\n\tvar buf []byte\n\tvar err error\n\tisFile := true\n\n\tif len(args) == 0 {\n\t\tisFile = false\n\t\tbuf, err = ioutil.ReadAll(os.Stdin)\n\t\tif err != nil {\n\t\t\tutil.UsageError(cmd,\n\t\t\t\t\"TOPOLOGY FILE name or piped input from 'STDIN' expected.\")\n\t\t\treturn fmt.Errorf(\"cannot read 'STDIN': %s\", err)\n\t\t}\n\t} else if len(args) != 1 {\n\t\treturn util.UsageError(cmd,\n\t\t\t\"TOPOLOGY FILE name or piped input from 'STDIN' expected.\")\n\t}\n\n\trootURL := config.GetString(\"RootURL\")\n\n\tvar topology api.TopologyUpdateRequest\n\tif isFile {\n\t\tpBuf, err := ioutil.ReadFile(args[0])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"file error: %s\", err)\n\t\t}\n\t\terr = json.Unmarshal(pBuf, &topology)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\terr = json.Unmarshal(buf, &topology)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tresp, err := resty.R().SetHeader(\"Content-Type\", \"application\/json\").\n\t\tSetBody(topology).Post(rootURL + \"\/topology\")\n\tif err != nil {\n\t\tlog.Printf(\"Error updating topology: %v\\n\", err)\n\t\treturn err\n\t}\n\n\tif config.GetString(\"Format\") == \"json\" {\n\t\tif string(resp.Body()) == \"\" || string(resp.Body()) == \"null\" {\n\t\t\tvar h common.HttpError\n\t\t\tdc := &ms.DecoderConfig{TagName: \"json\", Result: &h}\n\t\t\tdecoder, err := ms.NewDecoder(dc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tm := make(map[string]interface{})\n\t\t\tm[\"details\"] = resp.Status()\n\t\t\tm[\"status_code\"] = resp.StatusCode()\n\t\t\terr = decoder.Decode(m)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tstatus, _ := json.MarshalIndent(h, \"\", \"\\t\")\n\t\t\tfmt.Println(string(status))\n\t\t} else {\n\t\t\tJSONFormat(resp.Body(), os.Stdout)\n\t\t}\n\t} else {\n\t\tif resp.StatusCode() == http.StatusOK {\n\t\t\tfmt.Println(\"Topology updated successfully.\")\n\t\t} else {\n\t\t\tfmt.Printf(\"Error upadting topology: %s\\n\", resp.Status())\n\t\t}\n\t}\n\n\treturn nil\n}\ncli: remove extraneous error message.\/\/ Copyright (c) 2018 Pani Networks\n\/\/ All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\npackage commands\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/romana\/core\/cli\/util\"\n\t\"github.com\/romana\/core\/common\"\n\t\"github.com\/romana\/core\/common\/api\"\n\n\t\"github.com\/go-resty\/resty\"\n\tms \"github.com\/mitchellh\/mapstructure\"\n\tcli \"github.com\/spf13\/cobra\"\n\tconfig \"github.com\/spf13\/viper\"\n)\n\n\/\/ topologyCmd represents the topology commands\nvar topologyCmd = &cli.Command{\n\tUse: \"topology [update|list]\",\n\tShort: \"Update or List topology for romana services.\",\n\tLong: `Update or List topology for romana services.\n\ntopology requires a subcommand, e.g. ` + \"`romana topology list`.\" + `\n\nFor more information, please check http:\/\/docs.romana.io\n`,\n}\n\nfunc init() {\n\ttopologyCmd.AddCommand(topologyListCmd)\n\ttopologyCmd.AddCommand(topologyUpdateCmd)\n}\n\nvar topologyListCmd = &cli.Command{\n\tUse: \"list\",\n\tShort: \"List romana topology.\",\n\tLong: `List romana topology.`,\n\tRunE: topologyList,\n\tSilenceUsage: true,\n}\n\nvar topologyUpdateCmd = &cli.Command{\n\tUse: \"update [file name]\",\n\tShort: \"Update romana topology.\",\n\tLong: `Update romana topology.`,\n\tRunE: topologyUpdate,\n\tSilenceUsage: true,\n}\n\nfunc topologyList(cmd *cli.Command, args []string) error {\n\trootURL := config.GetString(\"RootURL\")\n\tresp, err := resty.R().Get(rootURL + \"\/topology\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif config.GetString(\"Format\") == \"json\" {\n\t\tJSONFormat(resp.Body(), os.Stdout)\n\t} else {\n\t\tw := tabwriter.NewWriter(os.Stdout, 0, 8, 0, '\\t', 0)\n\n\t\tif resp.StatusCode() == http.StatusOK {\n\t\t\tvar topology api.TopologyUpdateRequest\n\t\t\terr := json.Unmarshal(resp.Body(), &topology)\n\t\t\tif err == nil {\n\t\t\t\tfmt.Println(\"Networks\")\n\t\t\t\tfmt.Fprint(w, \"Name\\tCIDR\\tTenants\\n\")\n\t\t\t\tfor _, n := range topology.Networks {\n\t\t\t\t\tfmt.Fprintf(w, \"%s\\t%s\\t%v\\n\",\n\t\t\t\t\t\tn.Name,\n\t\t\t\t\t\tn.CIDR,\n\t\t\t\t\t\tn.Tenants,\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t\tfmt.Fprint(w, \"\\n\")\n\t\t\t\tfor _, t := range topology.Topologies {\n\t\t\t\t\tfmt.Printf(\"Topology for Network\/s: %s\\n\", t.Networks)\n\t\t\t\t\tfmt.Fprint(w, \"Name\\tCIDR\\tNodes\\n\")\n\t\t\t\t\tfor _, m := range t.Map {\n\t\t\t\t\t\tfmt.Fprintf(w, \"%s\\t%s\\t\", m.Name, m.CIDR)\n\t\t\t\t\t\tfor _, n := range m.Groups {\n\t\t\t\t\t\t\tfmt.Fprintf(w, \"%s(%s), \", n.Name, n.IP)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfmt.Fprint(w, \"\\n\")\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Fprint(w, \"\\n\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Error: %s \\n\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tvar e Error\n\t\t\tjson.Unmarshal(resp.Body(), &e)\n\n\t\t\tfmt.Println(\"Host Error\")\n\t\t\tfmt.Fprintf(w, \"Fields\\t%s\\n\", e.Fields)\n\t\t\tfmt.Fprintf(w, \"Message\\t%s\\n\", e.Message)\n\t\t\tfmt.Fprintf(w, \"Status\\t%d\\n\", resp.StatusCode())\n\t\t}\n\t\tw.Flush()\n\t}\n\n\treturn nil\n}\n\n\/\/ topologyUpdate updates romana topology.\n\/\/ The features supported are:\n\/\/ * Topology update through file\n\/\/ * Topology update while taking input from standard\n\/\/ input (STDIN) instead of a file\nfunc topologyUpdate(cmd *cli.Command, args []string) error {\n\tvar buf []byte\n\tvar err error\n\tisFile := true\n\n\tif len(args) == 0 {\n\t\tisFile = false\n\t\tbuf, err = ioutil.ReadAll(os.Stdin)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot read 'STDIN': %s\", err)\n\t\t}\n\t} else if len(args) != 1 {\n\t\treturn util.UsageError(cmd,\n\t\t\t\"TOPOLOGY FILE name or piped input from 'STDIN' expected.\")\n\t}\n\n\trootURL := config.GetString(\"RootURL\")\n\n\tvar topology api.TopologyUpdateRequest\n\tif isFile {\n\t\tpBuf, err := ioutil.ReadFile(args[0])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"file error: %s\", err)\n\t\t}\n\t\terr = json.Unmarshal(pBuf, &topology)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\terr = json.Unmarshal(buf, &topology)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tresp, err := resty.R().SetHeader(\"Content-Type\", \"application\/json\").\n\t\tSetBody(topology).Post(rootURL + \"\/topology\")\n\tif err != nil {\n\t\tlog.Printf(\"Error updating topology: %v\\n\", err)\n\t\treturn err\n\t}\n\n\tif config.GetString(\"Format\") == \"json\" {\n\t\tif string(resp.Body()) == \"\" || string(resp.Body()) == \"null\" {\n\t\t\tvar h common.HttpError\n\t\t\tdc := &ms.DecoderConfig{TagName: \"json\", Result: &h}\n\t\t\tdecoder, err := ms.NewDecoder(dc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tm := make(map[string]interface{})\n\t\t\tm[\"details\"] = resp.Status()\n\t\t\tm[\"status_code\"] = resp.StatusCode()\n\t\t\terr = decoder.Decode(m)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tstatus, _ := json.MarshalIndent(h, \"\", \"\\t\")\n\t\t\tfmt.Println(string(status))\n\t\t} else {\n\t\t\tJSONFormat(resp.Body(), os.Stdout)\n\t\t}\n\t} else {\n\t\tif resp.StatusCode() == http.StatusOK {\n\t\t\tfmt.Println(\"Topology updated successfully.\")\n\t\t} else {\n\t\t\tfmt.Printf(\"Error upadting topology: %s\\n\", resp.Status())\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build ignore\n\n\/\/go:generate go run gen.go\n\n\/\/ This program generates internet protocol constants and tables by\n\/\/ reading IANA protocol registries.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar registries = []struct {\n\turl string\n\tparse func(io.Writer, io.Reader) error\n}{\n\t{\n\t\t\"http:\/\/www.iana.org\/assignments\/icmp-parameters\/icmp-parameters.xml\",\n\t\tparseICMPv4Parameters,\n\t},\n}\n\nfunc main() {\n\tvar bb bytes.Buffer\n\tfmt.Fprintf(&bb, \"\/\/ go generate gen.go\\n\")\n\tfmt.Fprintf(&bb, \"\/\/ GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\\n\\n\")\n\tfmt.Fprintf(&bb, \"package ipv4\\n\\n\")\n\tfor _, r := range registries {\n\t\tresp, err := http.Get(r.url)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\tfmt.Fprintf(os.Stderr, \"got HTTP status code %v for %v\\n\", resp.StatusCode, r.url)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif err := r.parse(&bb, resp.Body); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Fprintf(&bb, \"\\n\")\n\t}\n\tb, err := format.Source(bb.Bytes())\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\tif err := ioutil.WriteFile(\"iana.go\", b, 0644); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc parseICMPv4Parameters(w io.Writer, r io.Reader) error {\n\tdec := xml.NewDecoder(r)\n\tvar icp icmpv4Parameters\n\tif err := dec.Decode(&icp); err != nil {\n\t\treturn err\n\t}\n\tprs := icp.escape()\n\tfmt.Fprintf(w, \"\/\/ %s, Updated: %s\\n\", icp.Title, icp.Updated)\n\tfmt.Fprintf(w, \"const (\\n\")\n\tfor _, pr := range prs {\n\t\tif pr.Descr == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(w, \"ICMPType%s ICMPType = %d\", pr.Descr, pr.Value)\n\t\tfmt.Fprintf(w, \"\/\/ %s\\n\", pr.OrigDescr)\n\t}\n\tfmt.Fprintf(w, \")\\n\\n\")\n\tfmt.Fprintf(w, \"\/\/ %s, Updated: %s\\n\", icp.Title, icp.Updated)\n\tfmt.Fprintf(w, \"var icmpTypes = map[ICMPType]string{\\n\")\n\tfor _, pr := range prs {\n\t\tif pr.Descr == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(w, \"%d: %q,\\n\", pr.Value, strings.ToLower(pr.OrigDescr))\n\t}\n\tfmt.Fprintf(w, \"}\\n\")\n\treturn nil\n}\n\ntype icmpv4Parameters struct {\n\tXMLName xml.Name `xml:\"registry\"`\n\tTitle string `xml:\"title\"`\n\tUpdated string `xml:\"updated\"`\n\tRegistries []struct {\n\t\tTitle string `xml:\"title\"`\n\t\tRecords []struct {\n\t\t\tValue string `xml:\"value\"`\n\t\t\tDescr string `xml:\"description\"`\n\t\t} `xml:\"record\"`\n\t} `xml:\"registry\"`\n}\n\ntype canonICMPv4ParamRecord struct {\n\tOrigDescr string\n\tDescr string\n\tValue int\n}\n\nfunc (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord {\n\tid := -1\n\tfor i, r := range icp.Registries {\n\t\tif strings.Contains(r.Title, \"Type\") || strings.Contains(r.Title, \"type\") {\n\t\t\tid = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif id < 0 {\n\t\treturn nil\n\t}\n\tprs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records))\n\tsr := strings.NewReplacer(\n\t\t\"Messages\", \"\",\n\t\t\"Message\", \"\",\n\t\t\"ICMP\", \"\",\n\t\t\"+\", \"P\",\n\t\t\"-\", \"\",\n\t\t\"\/\", \"\",\n\t\t\".\", \"\",\n\t\t\" \", \"\",\n\t)\n\tfor i, pr := range icp.Registries[id].Records {\n\t\tif strings.Contains(pr.Descr, \"Reserved\") ||\n\t\t\tstrings.Contains(pr.Descr, \"Unassigned\") ||\n\t\t\tstrings.Contains(pr.Descr, \"Deprecated\") ||\n\t\t\tstrings.Contains(pr.Descr, \"Experiment\") ||\n\t\t\tstrings.Contains(pr.Descr, \"experiment\") {\n\t\t\tcontinue\n\t\t}\n\t\tss := strings.Split(pr.Descr, \"\\n\")\n\t\tif len(ss) > 1 {\n\t\t\tprs[i].Descr = strings.Join(ss, \" \")\n\t\t} else {\n\t\t\tprs[i].Descr = ss[0]\n\t\t}\n\t\ts := strings.TrimSpace(prs[i].Descr)\n\t\tprs[i].OrigDescr = s\n\t\tprs[i].Descr = sr.Replace(s)\n\t\tprs[i].Value, _ = strconv.Atoi(pr.Value)\n\t}\n\treturn prs\n}\ngo.net\/ipv4: make use of go generate to create system adaptation files\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build ignore\n\n\/\/go:generate go run gen.go\n\n\/\/ This program generates system adaptation constants and types,\n\/\/ internet protocol constants and tables by reading template files\n\/\/ and IANA protocol registries.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc main() {\n\tif err := genzsys(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\tif err := geniana(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc genzsys() error {\n\tdefs := \"defs_\" + runtime.GOOS + \".go\"\n\tf, err := os.Open(defs)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tf.Close()\n\tcmd := exec.Command(\"go\", \"tool\", \"cgo\", \"-godefs\", defs)\n\tb, err := cmd.Output()\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch runtime.GOOS {\n\tcase \"dragonfly\", \"solaris\":\n\t\t\/\/ The ipv4 pacakge still supports go1.2, and so we\n\t\t\/\/ need to take care of additional platforms in go1.3\n\t\t\/\/ and above for working with go1.2.\n\t\tb = bytes.Replace(b, []byte(\"package ipv4\\n\"), []byte(\"\/\/ +build \"+runtime.GOOS+\"\\n\\npackage ipv4\\n\"), 1)\n\t}\n\tb, err = format.Source(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := ioutil.WriteFile(\"zsys_\"+runtime.GOOS+\".go\", b, 0644); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nvar registries = []struct {\n\turl string\n\tparse func(io.Writer, io.Reader) error\n}{\n\t{\n\t\t\"http:\/\/www.iana.org\/assignments\/icmp-parameters\/icmp-parameters.xml\",\n\t\tparseICMPv4Parameters,\n\t},\n}\n\nfunc geniana() error {\n\tvar bb bytes.Buffer\n\tfmt.Fprintf(&bb, \"\/\/ go generate gen.go\\n\")\n\tfmt.Fprintf(&bb, \"\/\/ GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\\n\\n\")\n\tfmt.Fprintf(&bb, \"package ipv4\\n\\n\")\n\tfor _, r := range registries {\n\t\tresp, err := http.Get(r.url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\treturn fmt.Errorf(\"got HTTP status code %v for %v\\n\", resp.StatusCode, r.url)\n\t\t}\n\t\tif err := r.parse(&bb, resp.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintf(&bb, \"\\n\")\n\t}\n\tb, err := format.Source(bb.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := ioutil.WriteFile(\"iana.go\", b, 0644); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc parseICMPv4Parameters(w io.Writer, r io.Reader) error {\n\tdec := xml.NewDecoder(r)\n\tvar icp icmpv4Parameters\n\tif err := dec.Decode(&icp); err != nil {\n\t\treturn err\n\t}\n\tprs := icp.escape()\n\tfmt.Fprintf(w, \"\/\/ %s, Updated: %s\\n\", icp.Title, icp.Updated)\n\tfmt.Fprintf(w, \"const (\\n\")\n\tfor _, pr := range prs {\n\t\tif pr.Descr == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(w, \"ICMPType%s ICMPType = %d\", pr.Descr, pr.Value)\n\t\tfmt.Fprintf(w, \"\/\/ %s\\n\", pr.OrigDescr)\n\t}\n\tfmt.Fprintf(w, \")\\n\\n\")\n\tfmt.Fprintf(w, \"\/\/ %s, Updated: %s\\n\", icp.Title, icp.Updated)\n\tfmt.Fprintf(w, \"var icmpTypes = map[ICMPType]string{\\n\")\n\tfor _, pr := range prs {\n\t\tif pr.Descr == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(w, \"%d: %q,\\n\", pr.Value, strings.ToLower(pr.OrigDescr))\n\t}\n\tfmt.Fprintf(w, \"}\\n\")\n\treturn nil\n}\n\ntype icmpv4Parameters struct {\n\tXMLName xml.Name `xml:\"registry\"`\n\tTitle string `xml:\"title\"`\n\tUpdated string `xml:\"updated\"`\n\tRegistries []struct {\n\t\tTitle string `xml:\"title\"`\n\t\tRecords []struct {\n\t\t\tValue string `xml:\"value\"`\n\t\t\tDescr string `xml:\"description\"`\n\t\t} `xml:\"record\"`\n\t} `xml:\"registry\"`\n}\n\ntype canonICMPv4ParamRecord struct {\n\tOrigDescr string\n\tDescr string\n\tValue int\n}\n\nfunc (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord {\n\tid := -1\n\tfor i, r := range icp.Registries {\n\t\tif strings.Contains(r.Title, \"Type\") || strings.Contains(r.Title, \"type\") {\n\t\t\tid = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif id < 0 {\n\t\treturn nil\n\t}\n\tprs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records))\n\tsr := strings.NewReplacer(\n\t\t\"Messages\", \"\",\n\t\t\"Message\", \"\",\n\t\t\"ICMP\", \"\",\n\t\t\"+\", \"P\",\n\t\t\"-\", \"\",\n\t\t\"\/\", \"\",\n\t\t\".\", \"\",\n\t\t\" \", \"\",\n\t)\n\tfor i, pr := range icp.Registries[id].Records {\n\t\tif strings.Contains(pr.Descr, \"Reserved\") ||\n\t\t\tstrings.Contains(pr.Descr, \"Unassigned\") ||\n\t\t\tstrings.Contains(pr.Descr, \"Deprecated\") ||\n\t\t\tstrings.Contains(pr.Descr, \"Experiment\") ||\n\t\t\tstrings.Contains(pr.Descr, \"experiment\") {\n\t\t\tcontinue\n\t\t}\n\t\tss := strings.Split(pr.Descr, \"\\n\")\n\t\tif len(ss) > 1 {\n\t\t\tprs[i].Descr = strings.Join(ss, \" \")\n\t\t} else {\n\t\t\tprs[i].Descr = ss[0]\n\t\t}\n\t\ts := strings.TrimSpace(prs[i].Descr)\n\t\tprs[i].OrigDescr = s\n\t\tprs[i].Descr = sr.Replace(s)\n\t\tprs[i].Value, _ = strconv.Atoi(pr.Value)\n\t}\n\treturn prs\n}\n<|endoftext|>"} {"text":"package registry\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"time\"\n\n\t\"github.com\/go-kit\/kit\/log\"\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/weaveworks\/flux\"\n\t\"github.com\/weaveworks\/flux\/registry\/cache\"\n)\n\n\/\/ A client represents an entity that returns manifest and tags\n\/\/ information. It might be a cache, it might be a real registry.\ntype Client interface {\n\tTags(id flux.ImageID) ([]string, error)\n\tManifest(id flux.ImageID) (flux.Image, error)\n\tCancel()\n}\n\n\/\/ ---\n\n\/\/ An implementation of Client that represents a Remote registry.\n\/\/ E.g. docker hub.\ntype Remote struct {\n\tRegistry HerokuRegistryLibrary\n\tCancelFunc context.CancelFunc\n}\n\n\/\/ Return the tags for this repository.\nfunc (a *Remote) Tags(id flux.ImageID) ([]string, error) {\n\treturn a.Registry.Tags(id.NamespaceImage())\n}\n\n\/\/ We need to do some adapting here to convert from the return values\n\/\/ from dockerregistry to our domain types.\nfunc (a *Remote) Manifest(id flux.ImageID) (flux.Image, error) {\n\thistory, err := a.Registry.Manifest(id.NamespaceImage(), id.Tag)\n\tif err != nil || history == nil {\n\t\treturn flux.Image{}, errors.Wrap(err, \"getting remote manifest\")\n\t}\n\n\t\/\/ the manifest includes some v1-backwards-compatibility data,\n\t\/\/ oddly called \"History\", which are layer metadata as JSON\n\t\/\/ strings; these appear most-recent (i.e., topmost layer) first,\n\t\/\/ so happily we can just decode the first entry to get a created\n\t\/\/ time.\n\ttype v1image struct {\n\t\tCreated time.Time `json:\"created\"`\n\t}\n\tvar topmost v1image\n\tvar img flux.Image\n\timg.ID = id\n\tif len(history) > 0 {\n\t\tif err = json.Unmarshal([]byte(history[0].V1Compatibility), &topmost); err == nil {\n\t\t\tif !topmost.Created.IsZero() {\n\t\t\t\timg.CreatedAt = topmost.Created\n\t\t\t}\n\t\t}\n\t}\n\n\treturn img, nil\n}\n\n\/\/ Cancel the remote request\nfunc (a *Remote) Cancel() {\n\ta.CancelFunc()\n}\n\n\/\/ ---\n\n\/\/ An implementation of Client backed by Memcache\ntype Cache struct {\n\tcreds Credentials\n\texpiry time.Duration\n\tcr cache.Reader\n\tlogger log.Logger\n}\n\nfunc (*Cache) Cancel() {\n\treturn\n}\n\nfunc NewCache(creds Credentials, cr cache.Reader, expiry time.Duration, logger log.Logger) Client {\n\treturn &Cache{\n\t\tcreds: creds,\n\t\texpiry: expiry,\n\t\tcr: cr,\n\t\tlogger: logger,\n\t}\n}\n\nfunc (c *Cache) Manifest(id flux.ImageID) (flux.Image, error) {\n\tcreds := c.creds.credsFor(id.Host)\n\tkey, err := cache.NewManifestKey(creds.username, id)\n\tif err != nil {\n\t\treturn flux.Image{}, err\n\t}\n\tval, err := c.cr.GetKey(key)\n\tif err != nil {\n\t\treturn flux.Image{}, err\n\t}\n\tvar img flux.Image\n\terr = json.Unmarshal(val, &img)\n\tif err != nil {\n\t\tc.logger.Log(\"err\", err.Error)\n\t\treturn flux.Image{}, err\n\t}\n\treturn img, nil\n}\n\nfunc (c *Cache) Tags(id flux.ImageID) ([]string, error) {\n\tcreds := c.creds.credsFor(id.Host)\n\tkey, err := cache.NewTagKey(creds.username, id)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tval, err := c.cr.GetKey(key)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tvar tags []string\n\terr = json.Unmarshal(val, &tags)\n\tif err != nil {\n\t\tc.logger.Log(\"err\", err.Error)\n\t\treturn []string{}, err\n\t}\n\treturn tags, nil\n}\nTry to get a schema2 manifest firstpackage registry\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/go-kit\/kit\/log\"\n\tdockerregistry \"github.com\/heroku\/docker-registry-client\/registry\"\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/weaveworks\/flux\"\n\t\"github.com\/weaveworks\/flux\/registry\/cache\"\n)\n\n\/\/ A client represents an entity that returns manifest and tags\n\/\/ information. It might be a cache, it might be a real registry.\ntype Client interface {\n\tTags(id flux.ImageID) ([]string, error)\n\tManifest(id flux.ImageID) (flux.Image, error)\n\tCancel()\n}\n\n\/\/ ---\n\n\/\/ An implementation of Client that represents a Remote registry.\n\/\/ E.g. docker hub.\ntype Remote struct {\n\tRegistry *herokuManifestAdaptor\n\tCancelFunc context.CancelFunc\n}\n\n\/\/ Return the tags for this repository.\nfunc (a *Remote) Tags(id flux.ImageID) ([]string, error) {\n\treturn a.Registry.Tags(id.NamespaceImage())\n}\n\n\/\/ We need to do some adapting here to convert from the return values\n\/\/ from dockerregistry to our domain types.\nfunc (a *Remote) Manifest(id flux.ImageID) (flux.Image, error) {\n\tmanifestV2, err := a.Registry.ManifestV2(id.NamespaceImage(), id.Tag)\n\tif err != nil {\n\t\tif err, ok := err.(*dockerregistry.HttpStatusError); ok {\n\t\t\tif err.Response.StatusCode == http.StatusNotFound {\n\t\t\t\treturn a.ManifestFromV1(id)\n\t\t\t}\n\t\t}\n\t\treturn flux.Image{}, err\n\t}\n\t\/\/ The above request will happily return a bogus, empty manifest\n\t\/\/ if handed something other than a schema2 manifest.\n\tif manifestV2.Config.Digest == \"\" {\n\t\treturn a.ManifestFromV1(id)\n\t}\n\n\t\/\/ schema2 manifests have a reference to a blog that contains the\n\t\/\/ image config. We have to fetch that in order to get the created\n\t\/\/ datetime.\n\tconf := manifestV2.Config\n\treader, err := a.Registry.DownloadLayer(id.NamespaceImage(), conf.Digest)\n\tif err != nil {\n\t\treturn flux.Image{}, err\n\t}\n\tif reader == nil {\n\t\treturn flux.Image{}, fmt.Errorf(\"nil reader from DownloadLayer\")\n\t}\n\n\ttype config struct {\n\t\tCreated time.Time `json:created`\n\t}\n\tvar imageConf config\n\n\terr = json.NewDecoder(reader).Decode(&imageConf)\n\tif err != nil {\n\t\treturn flux.Image{}, err\n\t}\n\treturn flux.Image{\n\t\tID: id,\n\t\tCreatedAt: imageConf.Created,\n\t}, nil\n}\n\nfunc (a *Remote) ManifestFromV1(id flux.ImageID) (flux.Image, error) {\n\thistory, err := a.Registry.Manifest(id.NamespaceImage(), id.Tag)\n\tif err != nil || history == nil {\n\t\treturn flux.Image{}, errors.Wrap(err, \"getting remote manifest\")\n\t}\n\n\t\/\/ the manifest includes some v1-backwards-compatibility data,\n\t\/\/ oddly called \"History\", which are layer metadata as JSON\n\t\/\/ strings; these appear most-recent (i.e., topmost layer) first,\n\t\/\/ so happily we can just decode the first entry to get a created\n\t\/\/ time.\n\ttype v1image struct {\n\t\tCreated time.Time `json:\"created\"`\n\t}\n\tvar topmost v1image\n\tvar img flux.Image\n\timg.ID = id\n\tif len(history) > 0 {\n\t\tif err = json.Unmarshal([]byte(history[0].V1Compatibility), &topmost); err == nil {\n\t\t\tif !topmost.Created.IsZero() {\n\t\t\t\timg.CreatedAt = topmost.Created\n\t\t\t}\n\t\t}\n\t}\n\n\treturn img, nil\n}\n\n\/\/ Cancel the remote request\nfunc (a *Remote) Cancel() {\n\ta.CancelFunc()\n}\n\n\/\/ ---\n\n\/\/ An implementation of Client backed by Memcache\ntype Cache struct {\n\tcreds Credentials\n\texpiry time.Duration\n\tcr cache.Reader\n\tlogger log.Logger\n}\n\nfunc (*Cache) Cancel() {\n\treturn\n}\n\nfunc NewCache(creds Credentials, cr cache.Reader, expiry time.Duration, logger log.Logger) Client {\n\treturn &Cache{\n\t\tcreds: creds,\n\t\texpiry: expiry,\n\t\tcr: cr,\n\t\tlogger: logger,\n\t}\n}\n\nfunc (c *Cache) Manifest(id flux.ImageID) (flux.Image, error) {\n\tcreds := c.creds.credsFor(id.Host)\n\tkey, err := cache.NewManifestKey(creds.username, id)\n\tif err != nil {\n\t\treturn flux.Image{}, err\n\t}\n\tval, err := c.cr.GetKey(key)\n\tif err != nil {\n\t\treturn flux.Image{}, err\n\t}\n\tvar img flux.Image\n\terr = json.Unmarshal(val, &img)\n\tif err != nil {\n\t\tc.logger.Log(\"err\", err.Error)\n\t\treturn flux.Image{}, err\n\t}\n\treturn img, nil\n}\n\nfunc (c *Cache) Tags(id flux.ImageID) ([]string, error) {\n\tcreds := c.creds.credsFor(id.Host)\n\tkey, err := cache.NewTagKey(creds.username, id)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tval, err := c.cr.GetKey(key)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tvar tags []string\n\terr = json.Unmarshal(val, &tags)\n\tif err != nil {\n\t\tc.logger.Log(\"err\", err.Error)\n\t\treturn []string{}, err\n\t}\n\treturn tags, nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2016-2019 The Libsacloud Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage sacloud\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/sacloud\/libsacloud\/v2\/sacloud\/accessor\"\n\t\"github.com\/sacloud\/libsacloud\/v2\/sacloud\/types\"\n)\n\n\/\/ StateWaiter リソースの状態が変わるまで待機する\ntype StateWaiter interface {\n\t\/\/ WaitForState リソースが指定の状態になるまで待つ\n\tWaitForState(context.Context) (interface{}, error)\n\t\/\/ AsyncWaitForState リソースが指定の状態になるまで待つ\n\tAsyncWaitForState(context.Context) (compCh <-chan interface{}, progressCh <-chan interface{}, errorCh <-chan error)\n}\n\nvar (\n\t\/\/ DefaultStatePollingTimeout StatePollWaiterでのデフォルトタイムアウト\n\tDefaultStatePollingTimeout = 20 * time.Minute\n\t\/\/ DefaultStatePollingInterval StatePollWaiterでのデフォルトポーリング間隔\n\tDefaultStatePollingInterval = 5 * time.Second\n)\n\n\/\/ StateReadFunc StatePollWaiterにより利用される、対象リソースの状態を取得するためのfunc\ntype StateReadFunc func() (state interface{}, err error)\n\n\/\/ StateCheckFunc StateReadFuncで得たリソースの情報を元に待ちを継続するか判定するためのfunc\n\/\/\n\/\/ StatePollWaiterのフィールドとして設定する\ntype StateCheckFunc func(target interface{}) (exit bool, err error)\n\n\/\/ UnexpectedAvailabilityError 予期しないAvailabilityとなった場合のerror\ntype UnexpectedAvailabilityError struct {\n\t\/\/ Err エラー詳細\n\tErr error\n}\n\n\/\/ Error errorインターフェース実装\nfunc (e *UnexpectedAvailabilityError) Error() string {\n\treturn fmt.Sprintf(\"resource returns unexpected availability value: %s\", e.Err.Error())\n}\n\n\/\/ UnexpectedInstanceStatusError 予期しないInstanceStatusとなった場合のerror\ntype UnexpectedInstanceStatusError struct {\n\t\/\/ Err エラー詳細\n\tErr error\n}\n\n\/\/ Error errorインターフェース実装\nfunc (e *UnexpectedInstanceStatusError) Error() string {\n\treturn fmt.Sprintf(\"resource returns unexpected instance status value: %s\", e.Err.Error())\n}\n\n\/\/ StatePollingWaiter ポーリングによりリソースの状態が変わるまで待機する\ntype StatePollingWaiter struct {\n\t\/\/ NotFoundRetry Readで404が返ってきた場合のリトライ回数\n\t\/\/\n\t\/\/ アプライアンスなどの一部のリソースでは作成~起動完了までの間に404を返すことがある。\n\t\/\/ これに対応するためこのフィールドにて404発生の許容回数を指定可能にする。\n\tNotFoundRetry int\n\n\t\/\/ ReadFunc 対象リソースの状態を取得するためのfunc\n\t\/\/\n\t\/\/ TargetAvailabilityを指定する場合はAvailabilityHolderを返す必要がある\n\t\/\/ もしAvailabilityHolderを実装しておらず、かつStateCheckFuncも未指定だった場合はタイムアウトまで完了しないため注意\n\tReadFunc StateReadFunc\n\n\t\/\/ TargetAvailability 対象リソースのAvailabilityがこの状態になった場合になるまで待つ\n\t\/\/\n\t\/\/ この値を指定する場合、ReadFuncにてAvailabilityHolderを返す必要がある。\n\t\/\/ AvailabilityがTargetAvailabilityとPendingAvailabilityで指定されていない状態になった場合はUnexpectedAvailabilityErrorを返す\n\t\/\/\n\t\/\/ TargetAvailability(Pending)とTargetInstanceState(Pending)の両方が指定された場合は両方を満たすまで待つ\n\t\/\/ StateCheckFuncとの併用は不可。併用した場合はpanicする。\n\tTargetAvailability []types.EAvailability\n\n\t\/\/ PendingAvailability 対象リソースのAvailabilityがこの状態になった場合は待ちを継続する。\n\t\/\/\n\t\/\/ 詳細はTargetAvailabilityのコメントを参照\n\tPendingAvailability []types.EAvailability\n\n\t\/\/ TargetInstanceStatus 対象リソースのInstanceStatusがこの状態になった場合になるまで待つ\n\t\/\/\n\t\/\/ この値を指定する場合、ReadFuncにてInstanceStatusHolderを返す必要がある。\n\t\/\/ InstanceStatusがTargetInstanceStatusとPendinngInstanceStatusで指定されていない状態になった場合はUnexpectedInstanceStatusErrorを返す\n\t\/\/\n\t\/\/ TargetAvailabilityとTargetInstanceStateの両方が指定された場合は両方を満たすまで待つ\n\t\/\/\n\t\/\/ StateCheckFuncとの併用は不可。併用した場合はpanicする。\n\tTargetInstanceStatus []types.EServerInstanceStatus\n\n\t\/\/ PendingInstanceStatus 対象リソースのInstanceStatusがこの状態になった場合は待ちを継続する。\n\t\/\/\n\t\/\/ 詳細はTargetInstanceStatusのコメントを参照\n\tPendingInstanceStatus []types.EServerInstanceStatus\n\n\t\/\/ StateCheckFunc ReadFuncで得たリソースの情報を元に待ちを継続するかの判定を行うためのfunc\n\t\/\/\n\t\/\/ TargetAvailabilityとTargetInstanceStateとの併用は不可。併用した場合panicする\n\tStateCheckFunc StateCheckFunc\n\n\t\/\/ Timeout タイムアウト\n\tTimeout time.Duration \/\/ タイムアウト\n\t\/\/ PollingInterval ポーリング間隔\n\tPollingInterval time.Duration\n}\n\nfunc (w *StatePollingWaiter) validateFields() {\n\tif w.ReadFunc == nil {\n\t\tpanic(errors.New(\"StatePollingWaiter has invalid setting: ReadFunc is required\"))\n\t}\n\n\tif w.StateCheckFunc != nil && (len(w.TargetAvailability) > 0 || len(w.TargetInstanceStatus) > 0) {\n\t\tpanic(errors.New(\"StatePollingWaiter has invalid setting: StateCheckFunc and TargetAvailability\/TargetInstanceStatus can not use together\"))\n\t}\n\n\tif w.StateCheckFunc == nil && len(w.TargetAvailability) == 0 && len(w.TargetInstanceStatus) == 0 {\n\t\tpanic(errors.New(\"StatePollingWaiter has invalid setting: TargetAvailability or TargetInstanceState must have least 1 items when StateCheckFunc is not set\"))\n\t}\n}\n\nfunc (w *StatePollingWaiter) defaults() {\n\n\tif w.Timeout == time.Duration(0) {\n\t\tw.Timeout = DefaultStatePollingTimeout\n\t}\n\tif w.PollingInterval == time.Duration(0) {\n\t\tw.PollingInterval = DefaultStatePollingInterval\n\t}\n}\n\n\/\/ WaitForState リソースが指定の状態になるまで待つ\nfunc (w *StatePollingWaiter) WaitForState(ctx context.Context) (interface{}, error) {\n\tc, p, e := w.AsyncWaitForState(ctx)\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, ctx.Err()\n\t\tcase lastState := <-c:\n\t\t\treturn lastState, nil\n\t\tcase <-p:\n\t\t\t\/\/ noop\n\t\tcase err := <-e:\n\t\t\treturn nil, err\n\t\t}\n\t}\n}\n\n\/\/ AsyncWaitForState リソースが指定の状態になるまで待つ\nfunc (w *StatePollingWaiter) AsyncWaitForState(ctx context.Context) (compCh <-chan interface{}, progressCh <-chan interface{}, errorCh <-chan error) {\n\n\tw.validateFields()\n\tw.defaults()\n\n\tcompChan := make(chan interface{})\n\tprogChan := make(chan interface{})\n\terrChan := make(chan error)\n\n\tticker := time.NewTicker(w.PollingInterval)\n\n\tgo func() {\n\t\tctx, cancel := context.WithTimeout(ctx, w.Timeout)\n\t\tdefer cancel()\n\n\t\tdefer ticker.Stop()\n\n\t\tdefer close(compChan)\n\t\tdefer close(progChan)\n\t\tdefer close(errChan)\n\n\t\tnotFoundCounter := w.NotFoundRetry\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\terrChan <- ctx.Err()\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tstate, err := w.ReadFunc()\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tif IsNotFoundError(err) {\n\t\t\t\t\t\tnotFoundCounter--\n\t\t\t\t\t\tif notFoundCounter >= 0 {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\terrChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\texit, err := w.handleState(state)\n\t\t\t\tif exit {\n\t\t\t\t\tcompChan <- state\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif state != nil {\n\t\t\t\t\tprogChan <- state\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tcompCh = compChan\n\tprogressCh = progChan\n\terrorCh = errChan\n\treturn\n}\n\nfunc (w *StatePollingWaiter) handleState(state interface{}) (bool, error) {\n\tif w.StateCheckFunc != nil {\n\t\treturn w.StateCheckFunc(state)\n\t}\n\n\tavailabilityHolder, hasAvailability := state.(accessor.Availability)\n\tinstanceStateHolder, hasInstanceState := state.(accessor.InstanceStatus)\n\n\tswitch {\n\tcase hasAvailability && hasInstanceState:\n\n\t\tres1, err := w.handleAvailability(availabilityHolder)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tres2, err := w.handleInstanceState(instanceStateHolder)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn res1 && res2, nil\n\n\tcase hasAvailability:\n\t\treturn w.handleAvailability(availabilityHolder)\n\tcase hasInstanceState:\n\t\treturn w.handleInstanceState(instanceStateHolder)\n\tdefault:\n\t\t\/\/ どちらのインターフェースも実装していない場合、stateが存在するだけでtrueとする\n\t\treturn true, nil\n\t}\n}\n\nfunc (w *StatePollingWaiter) handleAvailability(state accessor.Availability) (bool, error) {\n\tif len(w.TargetAvailability) == 0 {\n\t\treturn true, nil\n\t}\n\tv := state.GetAvailability()\n\tswitch {\n\tcase w.isInAvailability(v, w.TargetAvailability):\n\t\treturn true, nil\n\tcase w.isInAvailability(v, w.PendingAvailability):\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"got unexpected value of Availability: got %q\", v)\n\t}\n}\n\nfunc (w *StatePollingWaiter) handleInstanceState(state accessor.InstanceStatus) (bool, error) {\n\tif len(w.TargetInstanceStatus) == 0 {\n\t\treturn true, nil\n\t}\n\tv := state.GetInstanceStatus()\n\tswitch {\n\tcase w.isInInstanceStatus(v, w.TargetInstanceStatus):\n\t\treturn true, nil\n\tcase w.isInInstanceStatus(v, w.PendingInstanceStatus):\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"got unexpected value of InstanceState: got %q\", v)\n\t}\n}\n\nfunc (w *StatePollingWaiter) isInAvailability(v types.EAvailability, conds []types.EAvailability) bool {\n\tfor _, cond := range conds {\n\t\tif v == cond {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (w *StatePollingWaiter) isInInstanceStatus(v types.EServerInstanceStatus, conds []types.EServerInstanceStatus) bool {\n\tfor _, cond := range conds {\n\t\tif v == cond {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\nlint: nakedret\/\/ Copyright 2016-2019 The Libsacloud Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage sacloud\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/sacloud\/libsacloud\/v2\/sacloud\/accessor\"\n\t\"github.com\/sacloud\/libsacloud\/v2\/sacloud\/types\"\n)\n\n\/\/ StateWaiter リソースの状態が変わるまで待機する\ntype StateWaiter interface {\n\t\/\/ WaitForState リソースが指定の状態になるまで待つ\n\tWaitForState(context.Context) (interface{}, error)\n\t\/\/ AsyncWaitForState リソースが指定の状態になるまで待つ\n\tAsyncWaitForState(context.Context) (compCh <-chan interface{}, progressCh <-chan interface{}, errorCh <-chan error)\n}\n\nvar (\n\t\/\/ DefaultStatePollingTimeout StatePollWaiterでのデフォルトタイムアウト\n\tDefaultStatePollingTimeout = 20 * time.Minute\n\t\/\/ DefaultStatePollingInterval StatePollWaiterでのデフォルトポーリング間隔\n\tDefaultStatePollingInterval = 5 * time.Second\n)\n\n\/\/ StateReadFunc StatePollWaiterにより利用される、対象リソースの状態を取得するためのfunc\ntype StateReadFunc func() (state interface{}, err error)\n\n\/\/ StateCheckFunc StateReadFuncで得たリソースの情報を元に待ちを継続するか判定するためのfunc\n\/\/\n\/\/ StatePollWaiterのフィールドとして設定する\ntype StateCheckFunc func(target interface{}) (exit bool, err error)\n\n\/\/ UnexpectedAvailabilityError 予期しないAvailabilityとなった場合のerror\ntype UnexpectedAvailabilityError struct {\n\t\/\/ Err エラー詳細\n\tErr error\n}\n\n\/\/ Error errorインターフェース実装\nfunc (e *UnexpectedAvailabilityError) Error() string {\n\treturn fmt.Sprintf(\"resource returns unexpected availability value: %s\", e.Err.Error())\n}\n\n\/\/ UnexpectedInstanceStatusError 予期しないInstanceStatusとなった場合のerror\ntype UnexpectedInstanceStatusError struct {\n\t\/\/ Err エラー詳細\n\tErr error\n}\n\n\/\/ Error errorインターフェース実装\nfunc (e *UnexpectedInstanceStatusError) Error() string {\n\treturn fmt.Sprintf(\"resource returns unexpected instance status value: %s\", e.Err.Error())\n}\n\n\/\/ StatePollingWaiter ポーリングによりリソースの状態が変わるまで待機する\ntype StatePollingWaiter struct {\n\t\/\/ NotFoundRetry Readで404が返ってきた場合のリトライ回数\n\t\/\/\n\t\/\/ アプライアンスなどの一部のリソースでは作成~起動完了までの間に404を返すことがある。\n\t\/\/ これに対応するためこのフィールドにて404発生の許容回数を指定可能にする。\n\tNotFoundRetry int\n\n\t\/\/ ReadFunc 対象リソースの状態を取得するためのfunc\n\t\/\/\n\t\/\/ TargetAvailabilityを指定する場合はAvailabilityHolderを返す必要がある\n\t\/\/ もしAvailabilityHolderを実装しておらず、かつStateCheckFuncも未指定だった場合はタイムアウトまで完了しないため注意\n\tReadFunc StateReadFunc\n\n\t\/\/ TargetAvailability 対象リソースのAvailabilityがこの状態になった場合になるまで待つ\n\t\/\/\n\t\/\/ この値を指定する場合、ReadFuncにてAvailabilityHolderを返す必要がある。\n\t\/\/ AvailabilityがTargetAvailabilityとPendingAvailabilityで指定されていない状態になった場合はUnexpectedAvailabilityErrorを返す\n\t\/\/\n\t\/\/ TargetAvailability(Pending)とTargetInstanceState(Pending)の両方が指定された場合は両方を満たすまで待つ\n\t\/\/ StateCheckFuncとの併用は不可。併用した場合はpanicする。\n\tTargetAvailability []types.EAvailability\n\n\t\/\/ PendingAvailability 対象リソースのAvailabilityがこの状態になった場合は待ちを継続する。\n\t\/\/\n\t\/\/ 詳細はTargetAvailabilityのコメントを参照\n\tPendingAvailability []types.EAvailability\n\n\t\/\/ TargetInstanceStatus 対象リソースのInstanceStatusがこの状態になった場合になるまで待つ\n\t\/\/\n\t\/\/ この値を指定する場合、ReadFuncにてInstanceStatusHolderを返す必要がある。\n\t\/\/ InstanceStatusがTargetInstanceStatusとPendinngInstanceStatusで指定されていない状態になった場合はUnexpectedInstanceStatusErrorを返す\n\t\/\/\n\t\/\/ TargetAvailabilityとTargetInstanceStateの両方が指定された場合は両方を満たすまで待つ\n\t\/\/\n\t\/\/ StateCheckFuncとの併用は不可。併用した場合はpanicする。\n\tTargetInstanceStatus []types.EServerInstanceStatus\n\n\t\/\/ PendingInstanceStatus 対象リソースのInstanceStatusがこの状態になった場合は待ちを継続する。\n\t\/\/\n\t\/\/ 詳細はTargetInstanceStatusのコメントを参照\n\tPendingInstanceStatus []types.EServerInstanceStatus\n\n\t\/\/ StateCheckFunc ReadFuncで得たリソースの情報を元に待ちを継続するかの判定を行うためのfunc\n\t\/\/\n\t\/\/ TargetAvailabilityとTargetInstanceStateとの併用は不可。併用した場合panicする\n\tStateCheckFunc StateCheckFunc\n\n\t\/\/ Timeout タイムアウト\n\tTimeout time.Duration \/\/ タイムアウト\n\t\/\/ PollingInterval ポーリング間隔\n\tPollingInterval time.Duration\n}\n\nfunc (w *StatePollingWaiter) validateFields() {\n\tif w.ReadFunc == nil {\n\t\tpanic(errors.New(\"StatePollingWaiter has invalid setting: ReadFunc is required\"))\n\t}\n\n\tif w.StateCheckFunc != nil && (len(w.TargetAvailability) > 0 || len(w.TargetInstanceStatus) > 0) {\n\t\tpanic(errors.New(\"StatePollingWaiter has invalid setting: StateCheckFunc and TargetAvailability\/TargetInstanceStatus can not use together\"))\n\t}\n\n\tif w.StateCheckFunc == nil && len(w.TargetAvailability) == 0 && len(w.TargetInstanceStatus) == 0 {\n\t\tpanic(errors.New(\"StatePollingWaiter has invalid setting: TargetAvailability or TargetInstanceState must have least 1 items when StateCheckFunc is not set\"))\n\t}\n}\n\nfunc (w *StatePollingWaiter) defaults() {\n\n\tif w.Timeout == time.Duration(0) {\n\t\tw.Timeout = DefaultStatePollingTimeout\n\t}\n\tif w.PollingInterval == time.Duration(0) {\n\t\tw.PollingInterval = DefaultStatePollingInterval\n\t}\n}\n\n\/\/ WaitForState リソースが指定の状態になるまで待つ\nfunc (w *StatePollingWaiter) WaitForState(ctx context.Context) (interface{}, error) {\n\tc, p, e := w.AsyncWaitForState(ctx)\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, ctx.Err()\n\t\tcase lastState := <-c:\n\t\t\treturn lastState, nil\n\t\tcase <-p:\n\t\t\t\/\/ noop\n\t\tcase err := <-e:\n\t\t\treturn nil, err\n\t\t}\n\t}\n}\n\n\/\/ AsyncWaitForState リソースが指定の状態になるまで待つ\nfunc (w *StatePollingWaiter) AsyncWaitForState(ctx context.Context) (compCh <-chan interface{}, progressCh <-chan interface{}, errorCh <-chan error) {\n\n\tw.validateFields()\n\tw.defaults()\n\n\tcompChan := make(chan interface{})\n\tprogChan := make(chan interface{})\n\terrChan := make(chan error)\n\n\tticker := time.NewTicker(w.PollingInterval)\n\n\tgo func() {\n\t\tctx, cancel := context.WithTimeout(ctx, w.Timeout)\n\t\tdefer cancel()\n\n\t\tdefer ticker.Stop()\n\n\t\tdefer close(compChan)\n\t\tdefer close(progChan)\n\t\tdefer close(errChan)\n\n\t\tnotFoundCounter := w.NotFoundRetry\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\terrChan <- ctx.Err()\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tstate, err := w.ReadFunc()\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tif IsNotFoundError(err) {\n\t\t\t\t\t\tnotFoundCounter--\n\t\t\t\t\t\tif notFoundCounter >= 0 {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\terrChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\texit, err := w.handleState(state)\n\t\t\t\tif exit {\n\t\t\t\t\tcompChan <- state\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif state != nil {\n\t\t\t\t\tprogChan <- state\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tcompCh = compChan\n\tprogressCh = progChan\n\terrorCh = errChan\n\treturn compCh, progressCh, errorCh\n}\n\nfunc (w *StatePollingWaiter) handleState(state interface{}) (bool, error) {\n\tif w.StateCheckFunc != nil {\n\t\treturn w.StateCheckFunc(state)\n\t}\n\n\tavailabilityHolder, hasAvailability := state.(accessor.Availability)\n\tinstanceStateHolder, hasInstanceState := state.(accessor.InstanceStatus)\n\n\tswitch {\n\tcase hasAvailability && hasInstanceState:\n\n\t\tres1, err := w.handleAvailability(availabilityHolder)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tres2, err := w.handleInstanceState(instanceStateHolder)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn res1 && res2, nil\n\n\tcase hasAvailability:\n\t\treturn w.handleAvailability(availabilityHolder)\n\tcase hasInstanceState:\n\t\treturn w.handleInstanceState(instanceStateHolder)\n\tdefault:\n\t\t\/\/ どちらのインターフェースも実装していない場合、stateが存在するだけでtrueとする\n\t\treturn true, nil\n\t}\n}\n\nfunc (w *StatePollingWaiter) handleAvailability(state accessor.Availability) (bool, error) {\n\tif len(w.TargetAvailability) == 0 {\n\t\treturn true, nil\n\t}\n\tv := state.GetAvailability()\n\tswitch {\n\tcase w.isInAvailability(v, w.TargetAvailability):\n\t\treturn true, nil\n\tcase w.isInAvailability(v, w.PendingAvailability):\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"got unexpected value of Availability: got %q\", v)\n\t}\n}\n\nfunc (w *StatePollingWaiter) handleInstanceState(state accessor.InstanceStatus) (bool, error) {\n\tif len(w.TargetInstanceStatus) == 0 {\n\t\treturn true, nil\n\t}\n\tv := state.GetInstanceStatus()\n\tswitch {\n\tcase w.isInInstanceStatus(v, w.TargetInstanceStatus):\n\t\treturn true, nil\n\tcase w.isInInstanceStatus(v, w.PendingInstanceStatus):\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"got unexpected value of InstanceState: got %q\", v)\n\t}\n}\n\nfunc (w *StatePollingWaiter) isInAvailability(v types.EAvailability, conds []types.EAvailability) bool {\n\tfor _, cond := range conds {\n\t\tif v == cond {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (w *StatePollingWaiter) isInInstanceStatus(v types.EServerInstanceStatus, conds []types.EServerInstanceStatus) bool {\n\tfor _, cond := range conds {\n\t\tif v == cond {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"package jobs\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"strings\"\n\n\t\"strconv\"\n\n\tlog \"github.com\/meifamily\/logrus\"\n\n\t\"github.com\/meifamily\/ptt-alertor\/crawler\"\n\t\"github.com\/meifamily\/ptt-alertor\/models\"\n\t\"github.com\/meifamily\/ptt-alertor\/models\/article\"\n\t\"github.com\/meifamily\/ptt-alertor\/models\/pushsum\"\n\t\"github.com\/meifamily\/ptt-alertor\/models\/subscription\"\n\t\"github.com\/meifamily\/ptt-alertor\/models\/user\"\n)\n\n\/\/ change overdueHour must change cronjob replacepushsumkey in the mean time\nconst overdueHour = 48 * time.Hour\nconst pauseCheckPushSum = 5 * time.Minute\n\nvar psCker *pushSumChecker\nvar pscOnce sync.Once\n\ntype pushSumChecker struct {\n\tChecker\n\tch chan pushSumChecker\n}\n\nfunc NewPushSumChecker() *pushSumChecker {\n\tpscOnce.Do(func() {\n\t\tpsCker = &pushSumChecker{}\n\t\tpsCker.done = make(chan struct{})\n\t\tpsCker.ch = make(chan pushSumChecker)\n\t})\n\treturn psCker\n}\n\nfunc (psc pushSumChecker) String() string {\n\ttextMap := map[string]string{\n\t\t\"pushup\": \"推文數\",\n\t\t\"pushdown\": \"噓文數\",\n\t}\n\tsubType := textMap[psc.subType]\n\treturn fmt.Sprintf(\"%s@%s\\r\\n看板:%s;%s:%s%s\", psc.word, psc.board, psc.board, subType, psc.word, psc.articles.StringWithPushSum())\n}\n\ntype BoardArticles struct {\n\tboard string\n\tarticles article.Articles\n}\n\nfunc (psc pushSumChecker) Stop() {\n\tpsc.done <- struct{}{}\n\tlog.Info(\"Pushsum Checker Stop\")\n}\n\nfunc (psc pushSumChecker) Run() {\n\tbaCh := make(chan BoardArticles)\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tboards := pushsum.List()\n\t\t\t\tfor _, board := range boards {\n\t\t\t\t\tba := BoardArticles{board: board}\n\t\t\t\t\tpsc.crawlArticles(ba, baCh)\n\t\t\t\t}\n\t\t\t\ttime.Sleep(pauseCheckPushSum)\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase ba := <-baCh:\n\t\t\tpsc.board = ba.board\n\t\t\tif len(ba.articles) > 0 {\n\t\t\t\tgo psc.checkSubscribers(ba)\n\t\t\t}\n\t\tcase pscker := <-psc.ch:\n\t\t\tckCh <- pscker\n\t\tcase <-psc.done:\n\t\t\tcancel()\n\t\t\tfor len(baCh) > 0 {\n\t\t\t\t<-baCh\n\t\t\t}\n\t\t\tfor len(psc.ch) > 0 {\n\t\t\t\t<-psc.ch\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (psc pushSumChecker) crawlArticles(ba BoardArticles, baCh chan BoardArticles) {\n\tcurrentPage, err := crawler.CurrentPage(ba.board)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"board\": ba.board,\n\t\t}).WithError(err).Error(\"Get CurrentPage Failed\")\n\t\tbaCh <- ba\n\t\treturn\n\t}\n\nPage:\n\tfor page := currentPage; page > 0; page-- {\n\t\tarticles, _ := crawler.BuildArticles(ba.board, page)\n\t\tfor i := len(articles) - 1; i > 0; i-- {\n\t\t\ta := articles[i]\n\t\t\tif a.ID == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tloc := time.FixedZone(\"CST\", 8*60*60)\n\t\t\tt, err := time.ParseInLocation(\"1\/02\", a.Date, loc)\n\t\t\tnow := time.Now()\n\t\t\tnowDate := now.Truncate(24 * time.Hour)\n\t\t\tif t.Month() > now.Month() {\n\t\t\t\tt = t.AddDate(now.Year()-1, 0, 0)\n\t\t\t} else {\n\t\t\t\tt = t.AddDate(now.Year(), 0, 0)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"board\": ba.board,\n\t\t\t\t\t\"page\": page,\n\t\t\t\t}).WithError(err).Error(\"Parse DateTime Error\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif nowDate.After(t.Add(overdueHour)) {\n\t\t\t\tbreak Page\n\t\t\t}\n\t\t\tba.articles = append(ba.articles, a)\n\t\t}\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"board\": ba.board,\n\t\t\"total\": len(ba.articles),\n\t}).Info(\"PushSum Crawl Finish\")\n\n\tbaCh <- ba\n}\n\nfunc (psc pushSumChecker) checkSubscribers(ba BoardArticles) {\n\tsubs := pushsum.ListSubscribers(ba.board)\n\tfor _, account := range subs {\n\t\tu := models.User.Find(account)\n\t\tpsc.Profile = u.Profile\n\t\tgo psc.checkPushSum(u, ba, checkUp)\n\t\tgo psc.checkPushSum(u, ba, checkDown)\n\t}\n}\n\ntype checkPushSumFn func(*pushSumChecker, subscription.Subscription, article.Articles) (article.Articles, []int)\n\nfunc checkUp(psc *pushSumChecker, sub subscription.Subscription, articles article.Articles) (upArticles article.Articles, ids []int) {\n\tpsc.word = strconv.Itoa(sub.Up)\n\tpsc.subType = \"pushup\"\n\tif sub.Up != 0 {\n\t\tfor _, a := range articles {\n\t\t\tif a.PushSum >= sub.Up {\n\t\t\t\tupArticles = append(upArticles, a)\n\t\t\t\tids = append(ids, a.ID)\n\t\t\t}\n\t\t}\n\t}\n\treturn upArticles, ids\n}\n\nfunc checkDown(psc *pushSumChecker, sub subscription.Subscription, articles article.Articles) (downArticles article.Articles, ids []int) {\n\tdown := sub.Down * -1\n\tpsc.word = strconv.Itoa(down)\n\tpsc.subType = \"pushdown\"\n\tif sub.Down != 0 {\n\t\tfor _, a := range articles {\n\t\t\tif a.PushSum <= down {\n\t\t\t\tdownArticles = append(downArticles, a)\n\t\t\t\tids = append(ids, a.ID)\n\t\t\t}\n\t\t}\n\t}\n\treturn downArticles, ids\n}\n\nfunc (psc pushSumChecker) checkPushSum(u user.User, ba BoardArticles, checkFn checkPushSumFn) {\n\tvar articles article.Articles\n\tvar ids []int\n\tfor _, sub := range u.Subscribes {\n\t\tif strings.EqualFold(sub.Board, ba.board) {\n\t\t\tarticles, ids = checkFn(&psc, sub, ba.articles)\n\t\t}\n\t}\n\tif len(articles) > 0 {\n\t\tpsc.articles = psc.toSendArticles(ids, articles)\n\t\tif len(psc.articles) > 0 {\n\t\t\tpsc.ch <- psc\n\t\t}\n\t}\n}\n\nfunc (psc pushSumChecker) toSendArticles(ids []int, articles article.Articles) article.Articles {\n\tkindMap := map[string]string{\n\t\t\"pushup\": \"up\",\n\t\t\"pushdown\": \"down\",\n\t}\n\tids = pushsum.DiffList(psc.Profile.Account, psc.board, kindMap[psc.subType], ids...)\n\tdiffIds := make(map[int]bool)\n\tfor _, id := range ids {\n\t\tdiffIds[id] = true\n\t}\n\tsendArticles := make(article.Articles, 0)\n\tfor _, a := range articles {\n\t\tif diffIds[a.ID] {\n\t\t\tsendArticles = append(sendArticles, a)\n\t\t}\n\t}\n\treturn sendArticles\n}\n:zap: crawl pushsum page parallelpackage jobs\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"strings\"\n\n\t\"strconv\"\n\n\tlog \"github.com\/meifamily\/logrus\"\n\n\t\"github.com\/meifamily\/ptt-alertor\/crawler\"\n\t\"github.com\/meifamily\/ptt-alertor\/models\"\n\t\"github.com\/meifamily\/ptt-alertor\/models\/article\"\n\t\"github.com\/meifamily\/ptt-alertor\/models\/pushsum\"\n\t\"github.com\/meifamily\/ptt-alertor\/models\/subscription\"\n\t\"github.com\/meifamily\/ptt-alertor\/models\/user\"\n)\n\n\/\/ NewPushSumKeyReplacer Job schedule must longer than overduehour\nconst overdueHour = 48 * time.Hour\n\nvar psCker *pushSumChecker\nvar pscOnce sync.Once\n\ntype pushSumChecker struct {\n\tChecker\n\tch chan pushSumChecker\n\tduration time.Duration\n}\n\nfunc NewPushSumChecker() *pushSumChecker {\n\tpscOnce.Do(func() {\n\t\tpsCker = &pushSumChecker{\n\t\t\tduration: 3 * time.Second,\n\t\t}\n\t\tpsCker.done = make(chan struct{})\n\t\tpsCker.ch = make(chan pushSumChecker)\n\t})\n\treturn psCker\n}\n\nfunc (psc pushSumChecker) String() string {\n\ttextMap := map[string]string{\n\t\t\"pushup\": \"推文數\",\n\t\t\"pushdown\": \"噓文數\",\n\t}\n\tsubType := textMap[psc.subType]\n\treturn fmt.Sprintf(\"%s@%s\\r\\n看板:%s;%s:%s%s\", psc.word, psc.board, psc.board, subType, psc.word, psc.articles.StringWithPushSum())\n}\n\ntype BoardArticles struct {\n\tboard string\n\tarticles article.Articles\n}\n\nfunc (psc pushSumChecker) Stop() {\n\tpsc.done <- struct{}{}\n\tlog.Info(\"Pushsum Checker Stop\")\n}\n\nfunc (psc pushSumChecker) Run() {\n\tbaCh := make(chan BoardArticles)\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tboards := pushsum.List()\n\t\t\t\tfor _, board := range boards {\n\t\t\t\t\tba := BoardArticles{board: board}\n\t\t\t\t\ttime.Sleep(psc.duration)\n\t\t\t\t\tgo psc.crawlArticles(ba, baCh)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase ba := <-baCh:\n\t\t\tpsc.board = ba.board\n\t\t\tif len(ba.articles) > 0 {\n\t\t\t\tgo psc.checkSubscribers(ba)\n\t\t\t}\n\t\tcase pscker := <-psc.ch:\n\t\t\tckCh <- pscker\n\t\tcase <-psc.done:\n\t\t\tcancel()\n\t\t\tfor len(baCh) > 0 {\n\t\t\t\t<-baCh\n\t\t\t}\n\t\t\tfor len(psc.ch) > 0 {\n\t\t\t\t<-psc.ch\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (psc pushSumChecker) crawlArticles(ba BoardArticles, baCh chan BoardArticles) {\n\tcurrentPage, err := crawler.CurrentPage(ba.board)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"board\": ba.board,\n\t\t}).WithError(err).Error(\"Get CurrentPage Failed\")\n\t\tbaCh <- ba\n\t\treturn\n\t}\n\nPage:\n\tfor page := currentPage; page > 0; page-- {\n\t\tarticles, _ := crawler.BuildArticles(ba.board, page)\n\t\tfor i := len(articles) - 1; i > 0; i-- {\n\t\t\ta := articles[i]\n\t\t\tif a.ID == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tloc := time.FixedZone(\"CST\", 8*60*60)\n\t\t\tt, err := time.ParseInLocation(\"1\/02\", a.Date, loc)\n\t\t\tnow := time.Now()\n\t\t\tnowDate := now.Truncate(24 * time.Hour)\n\t\t\tif t.Month() > now.Month() {\n\t\t\t\tt = t.AddDate(now.Year()-1, 0, 0)\n\t\t\t} else {\n\t\t\t\tt = t.AddDate(now.Year(), 0, 0)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"board\": ba.board,\n\t\t\t\t\t\"page\": page,\n\t\t\t\t}).WithError(err).Error(\"Parse DateTime Error\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif nowDate.After(t.Add(overdueHour)) {\n\t\t\t\tbreak Page\n\t\t\t}\n\t\t\tba.articles = append(ba.articles, a)\n\t\t}\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"board\": ba.board,\n\t\t\"total\": len(ba.articles),\n\t}).Info(\"PushSum Crawl Finish\")\n\n\tbaCh <- ba\n}\n\nfunc (psc pushSumChecker) checkSubscribers(ba BoardArticles) {\n\tsubs := pushsum.ListSubscribers(ba.board)\n\tfor _, account := range subs {\n\t\tu := models.User.Find(account)\n\t\tpsc.Profile = u.Profile\n\t\tgo psc.checkPushSum(u, ba, checkUp)\n\t\tgo psc.checkPushSum(u, ba, checkDown)\n\t}\n}\n\ntype checkPushSumFn func(*pushSumChecker, subscription.Subscription, article.Articles) (article.Articles, []int)\n\nfunc checkUp(psc *pushSumChecker, sub subscription.Subscription, articles article.Articles) (upArticles article.Articles, ids []int) {\n\tpsc.word = strconv.Itoa(sub.Up)\n\tpsc.subType = \"pushup\"\n\tif sub.Up != 0 {\n\t\tfor _, a := range articles {\n\t\t\tif a.PushSum >= sub.Up {\n\t\t\t\tupArticles = append(upArticles, a)\n\t\t\t\tids = append(ids, a.ID)\n\t\t\t}\n\t\t}\n\t}\n\treturn upArticles, ids\n}\n\nfunc checkDown(psc *pushSumChecker, sub subscription.Subscription, articles article.Articles) (downArticles article.Articles, ids []int) {\n\tdown := sub.Down * -1\n\tpsc.word = strconv.Itoa(down)\n\tpsc.subType = \"pushdown\"\n\tif sub.Down != 0 {\n\t\tfor _, a := range articles {\n\t\t\tif a.PushSum <= down {\n\t\t\t\tdownArticles = append(downArticles, a)\n\t\t\t\tids = append(ids, a.ID)\n\t\t\t}\n\t\t}\n\t}\n\treturn downArticles, ids\n}\n\nfunc (psc pushSumChecker) checkPushSum(u user.User, ba BoardArticles, checkFn checkPushSumFn) {\n\tvar articles article.Articles\n\tvar ids []int\n\tfor _, sub := range u.Subscribes {\n\t\tif strings.EqualFold(sub.Board, ba.board) {\n\t\t\tarticles, ids = checkFn(&psc, sub, ba.articles)\n\t\t}\n\t}\n\tif len(articles) > 0 {\n\t\tpsc.articles = psc.toSendArticles(ids, articles)\n\t\tif len(psc.articles) > 0 {\n\t\t\tpsc.ch <- psc\n\t\t}\n\t}\n}\n\nfunc (psc pushSumChecker) toSendArticles(ids []int, articles article.Articles) article.Articles {\n\tkindMap := map[string]string{\n\t\t\"pushup\": \"up\",\n\t\t\"pushdown\": \"down\",\n\t}\n\tids = pushsum.DiffList(psc.Profile.Account, psc.board, kindMap[psc.subType], ids...)\n\tdiffIds := make(map[int]bool)\n\tfor _, id := range ids {\n\t\tdiffIds[id] = true\n\t}\n\tsendArticles := make(article.Articles, 0)\n\tfor _, a := range articles {\n\t\tif diffIds[a.ID] {\n\t\t\tsendArticles = append(sendArticles, a)\n\t\t}\n\t}\n\treturn sendArticles\n}\n<|endoftext|>"} {"text":"package iris_test\n\nimport (\n\t\"strconv\"\n\t\"testing\"\n\n\t\"gopkg.in\/kataras\/iris.v6\"\n\t\"gopkg.in\/kataras\/iris.v6\/adaptors\/gorillamux\"\n\t\"gopkg.in\/kataras\/iris.v6\/httptest\"\n)\n\nfunc newGorillaMuxAPP() *iris.Framework {\n\tapp := iris.New()\n\tapp.Adapt(gorillamux.New())\n\n\treturn app\n}\n\nfunc TestGorillaMuxSimple(t *testing.T) {\n\tapp := newGorillaMuxAPP()\n\n\ttestRoutes := []testRoute{\n\t\t\/\/ FOUND - registered\n\t\t{\"GET\", \"\/test_get\", \"\/test_get\", \"\", \"hello, get!\", 200, true, nil, nil},\n\t\t{\"POST\", \"\/test_post\", \"\/test_post\", \"\", \"hello, post!\", 200, true, nil, nil},\n\t\t{\"PUT\", \"\/test_put\", \"\/test_put\", \"\", \"hello, put!\", 200, true, nil, nil},\n\t\t{\"DELETE\", \"\/test_delete\", \"\/test_delete\", \"\", \"hello, delete!\", 200, true, nil, nil},\n\t\t{\"HEAD\", \"\/test_head\", \"\/test_head\", \"\", \"hello, head!\", 200, true, nil, nil},\n\t\t{\"OPTIONS\", \"\/test_options\", \"\/test_options\", \"\", \"hello, options!\", 200, true, nil, nil},\n\t\t{\"CONNECT\", \"\/test_connect\", \"\/test_connect\", \"\", \"hello, connect!\", 200, true, nil, nil},\n\t\t{\"PATCH\", \"\/test_patch\", \"\/test_patch\", \"\", \"hello, patch!\", 200, true, nil, nil},\n\t\t{\"TRACE\", \"\/test_trace\", \"\/test_trace\", \"\", \"hello, trace!\", 200, true, nil, nil},\n\t\t\/\/ NOT FOUND - not registered\n\t\t{\"GET\", \"\/test_get_nofound\", \"\/test_get_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"POST\", \"\/test_post_nofound\", \"\/test_post_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"PUT\", \"\/test_put_nofound\", \"\/test_put_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"DELETE\", \"\/test_delete_nofound\", \"\/test_delete_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"HEAD\", \"\/test_head_nofound\", \"\/test_head_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"OPTIONS\", \"\/test_options_nofound\", \"\/test_options_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"CONNECT\", \"\/test_connect_nofound\", \"\/test_connect_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"PATCH\", \"\/test_patch_nofound\", \"\/test_patch_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"TRACE\", \"\/test_trace_nofound\", \"\/test_trace_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t\/\/ Parameters\n\t\t{\"GET\", \"\/test_get_parameter1\/{name}\", \"\/test_get_parameter1\/iris\", \"\", \"name=iris\", 200, true, []param{{\"name\", \"iris\"}}, nil},\n\t\t{\"GET\", \"\/test_get_parameter2\/{name}\/details\/{something}\", \"\/test_get_parameter2\/iris\/details\/anything\", \"\", \"name=iris,something=anything\", 200, true, []param{{\"name\", \"iris\"}, {\"something\", \"anything\"}}, nil},\n\t\t{\"GET\", \"\/test_get_parameter2\/{name}\/details\/{something}\/{else:.*}\", \"\/test_get_parameter2\/iris\/details\/anything\/elsehere\", \"\", \"name=iris,something=anything,else=elsehere\", 200, true, []param{{\"name\", \"iris\"}, {\"something\", \"anything\"}, {\"else\", \"elsehere\"}}, nil},\n\t\t\/\/ URL Parameters\n\t\t{\"GET\", \"\/test_get_urlparameter1\/first\", \"\/test_get_urlparameter1\/first\", \"name=irisurl\", \"name=irisurl\", 200, true, nil, []param{{\"name\", \"irisurl\"}}},\n\t\t{\"GET\", \"\/test_get_urlparameter2\/second\", \"\/test_get_urlparameter2\/second\", \"name=irisurl&something=anything\", \"name=irisurl,something=anything\", 200, true, nil, []param{{\"name\", \"irisurl\"}, {\"something\", \"anything\"}}},\n\t\t{\"GET\", \"\/test_get_urlparameter2\/first\/second\/third\", \"\/test_get_urlparameter2\/first\/second\/third\", \"name=irisurl&something=anything&else=elsehere\", \"name=irisurl,something=anything,else=elsehere\", 200, true, nil, []param{{\"name\", \"irisurl\"}, {\"something\", \"anything\"}, {\"else\", \"elsehere\"}}},\n\t}\n\n\tfor idx := range testRoutes {\n\t\tr := testRoutes[idx]\n\t\tif r.Register {\n\t\t\tapp.HandleFunc(r.Method, r.Path, func(ctx *iris.Context) {\n\t\t\t\tctx.SetStatusCode(r.Status)\n\t\t\t\tif r.Params != nil && len(r.Params) > 0 {\n\t\t\t\t\tctx.Writef(ctx.ParamsSentence())\n\t\t\t\t} else if r.URLParams != nil && len(r.URLParams) > 0 {\n\t\t\t\t\tif len(r.URLParams) != len(ctx.URLParams()) {\n\t\t\t\t\t\tt.Fatalf(\"Error when comparing length of url parameters %d != %d\", len(r.URLParams), len(ctx.URLParams()))\n\t\t\t\t\t}\n\t\t\t\t\tparamsKeyVal := \"\"\n\t\t\t\t\t\/\/\/TODO:\n\t\t\t\t\t\/\/ Gorilla mux saves and gets its vars by map, so no specific order\n\t\t\t\t\t\/\/\n\t\t\t\t\t\/\/ I should change this test below:\n\t\t\t\t\tfor idxp, p := range r.URLParams {\n\t\t\t\t\t\tval := ctx.URLParam(p.Key)\n\t\t\t\t\t\tparamsKeyVal += p.Key + \"=\" + val + \",\"\n\t\t\t\t\t\tif idxp == len(r.URLParams)-1 {\n\t\t\t\t\t\t\tparamsKeyVal = paramsKeyVal[0 : len(paramsKeyVal)-1]\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tctx.Writef(paramsKeyVal)\n\t\t\t\t} else {\n\t\t\t\t\tctx.Writef(r.Body)\n\t\t\t\t}\n\n\t\t\t})\n\t\t}\n\t}\n\n\te := httptest.New(app, t)\n\n\t\/\/ run the tests (1)\n\tfor idx := range testRoutes {\n\t\tr := testRoutes[idx]\n\t\te.Request(r.Method, r.RequestPath).WithQueryString(r.RequestQuery).\n\t\t\tExpect().\n\t\t\tStatus(r.Status).Body().Equal(r.Body)\n\t}\n\n}\n\nfunc TestGorillaMuxSimpleParty(t *testing.T) {\n\tapp := newGorillaMuxAPP()\n\n\th := func(ctx *iris.Context) { ctx.WriteString(ctx.Host() + ctx.Path()) }\n\n\tif testEnableSubdomain {\n\t\tsubdomainParty := app.Party(testSubdomain + \".\")\n\t\t{\n\t\t\tsubdomainParty.Get(\"\/\", h)\n\t\t\tsubdomainParty.Get(\"\/path1\", h)\n\t\t\tsubdomainParty.Get(\"\/path2\", h)\n\t\t\tsubdomainParty.Get(\"\/namedpath\/{param1}\/something\/{param2}\", h)\n\t\t\tsubdomainParty.Get(\"\/namedpath\/{param1}\/something\/{param2}\/else\", h)\n\t\t}\n\t}\n\n\t\/\/ simple\n\tp := app.Party(\"\/party1\")\n\t{\n\t\tp.Get(\"\/\", h)\n\t\tp.Get(\"\/path1\", h)\n\t\tp.Get(\"\/path2\", h)\n\t\tp.Get(\"\/namedpath\/{param1}\/something\/{param2}\", h)\n\t\tp.Get(\"\/namedpath\/{param1}\/something\/{param2}\/else\", h)\n\t}\n\n\tapp.Config.VHost = \"0.0.0.0:\" + strconv.Itoa(getRandomNumber(2222, 2399))\n\t\/\/ app.Config.Tester.Debug = true\n\t\/\/ app.Config.Tester.ExplicitURL = true\n\te := httptest.New(app, t)\n\n\trequest := func(reqPath string) {\n\n\t\te.Request(\"GET\", reqPath).\n\t\t\tExpect().\n\t\t\tStatus(iris.StatusOK).Body().Equal(app.Config.VHost + reqPath)\n\t}\n\n\t\/\/ run the tests\n\trequest(\"\/party1\/\")\n\trequest(\"\/party1\/path1\")\n\trequest(\"\/party1\/path2\")\n\trequest(\"\/party1\/namedpath\/theparam1\/something\/theparam2\")\n\trequest(\"\/party1\/namedpath\/theparam1\/something\/theparam2\/else\")\n\n\tif testEnableSubdomain {\n\t\tes := subdomainTester(e, app)\n\t\tsubdomainRequest := func(reqPath string) {\n\t\t\tes.Request(\"GET\", reqPath).\n\t\t\t\tExpect().\n\t\t\t\tStatus(iris.StatusOK).Body().Equal(testSubdomainHost(app.Config.VHost) + reqPath)\n\t\t}\n\n\t\tsubdomainRequest(\"\/\")\n\t\tsubdomainRequest(\"\/path1\")\n\t\tsubdomainRequest(\"\/path2\")\n\t\tsubdomainRequest(\"\/namedpath\/theparam1\/something\/theparam2\")\n\t\tsubdomainRequest(\"\/namedpath\/theparam1\/something\/theparam2\/else\")\n\t}\n}\n\nfunc TestGorillaMuxPathEscape(t *testing.T) {\n\tapp := newGorillaMuxAPP()\n\n\tapp.Get(\"\/details\/{name}\", func(ctx *iris.Context) {\n\t\tname := ctx.Param(\"name\")\n\t\thighlight := ctx.URLParam(\"highlight\")\n\t\tctx.Writef(\"name=%s,highlight=%s\", name, highlight)\n\t})\n\n\te := httptest.New(app, t)\n\n\te.GET(\"\/details\/Sakamoto desu ga\").\n\t\tWithQuery(\"highlight\", \"text\").\n\t\tExpect().Status(iris.StatusOK).Body().Equal(\"name=Sakamoto desu ga,highlight=text\")\n}\n\nfunc TestGorillaMuxParamDecodedDecodeURL(t *testing.T) {\n\tapp := newGorillaMuxAPP()\n\n\tapp.Get(\"\/encoding\/{url}\", func(ctx *iris.Context) {\n\t\turl := iris.DecodeURL(ctx.ParamDecoded(\"url\"))\n\t\tctx.SetStatusCode(iris.StatusOK)\n\t\tctx.WriteString(url)\n\t})\n\n\te := httptest.New(app, t)\n\n\te.GET(\"\/encoding\/http%3A%2F%2Fsome-url.com\").Expect().Status(iris.StatusOK).Body().Equal(\"http:\/\/some-url.com\")\n}\n\nfunc TestGorillaMuxRouteURLPath(t *testing.T) {\n\tapp := iris.New()\n\tapp.Adapt(gorillamux.New())\n\n\tapp.None(\"\/profile\/{user_id}\/{ref}\/{anything:.*}\", nil).ChangeName(\"profile\")\n\tapp.Boot()\n\n\texpected := \"\/profile\/42\/iris-go\/something\"\n\n\tif got := app.Path(\"profile\", \"user_id\", 42, \"ref\", \"iris-go\", \"anything\", \"something\"); got != expected {\n\t\tt.Fatalf(\"gorillamux' reverse routing 'URLPath' error: expected %s but got %s\", expected, got)\n\t}\n}\ntest gorillamux params- order doesn't matters, todo done.package iris_test\n\nimport (\n\t\"strconv\"\n\t\"testing\"\n\n\t\"gopkg.in\/kataras\/iris.v6\"\n\t\"gopkg.in\/kataras\/iris.v6\/adaptors\/gorillamux\"\n\t\"gopkg.in\/kataras\/iris.v6\/httptest\"\n)\n\nfunc newGorillaMuxAPP() *iris.Framework {\n\tapp := iris.New()\n\tapp.Adapt(gorillamux.New())\n\n\treturn app\n}\n\nfunc TestGorillaMuxSimple(t *testing.T) {\n\tapp := newGorillaMuxAPP()\n\n\ttestRoutes := []testRoute{\n\t\t\/\/ FOUND - registered\n\t\t{\"GET\", \"\/test_get\", \"\/test_get\", \"\", \"hello, get!\", 200, true, nil, nil},\n\t\t{\"POST\", \"\/test_post\", \"\/test_post\", \"\", \"hello, post!\", 200, true, nil, nil},\n\t\t{\"PUT\", \"\/test_put\", \"\/test_put\", \"\", \"hello, put!\", 200, true, nil, nil},\n\t\t{\"DELETE\", \"\/test_delete\", \"\/test_delete\", \"\", \"hello, delete!\", 200, true, nil, nil},\n\t\t{\"HEAD\", \"\/test_head\", \"\/test_head\", \"\", \"hello, head!\", 200, true, nil, nil},\n\t\t{\"OPTIONS\", \"\/test_options\", \"\/test_options\", \"\", \"hello, options!\", 200, true, nil, nil},\n\t\t{\"CONNECT\", \"\/test_connect\", \"\/test_connect\", \"\", \"hello, connect!\", 200, true, nil, nil},\n\t\t{\"PATCH\", \"\/test_patch\", \"\/test_patch\", \"\", \"hello, patch!\", 200, true, nil, nil},\n\t\t{\"TRACE\", \"\/test_trace\", \"\/test_trace\", \"\", \"hello, trace!\", 200, true, nil, nil},\n\t\t\/\/ NOT FOUND - not registered\n\t\t{\"GET\", \"\/test_get_nofound\", \"\/test_get_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"POST\", \"\/test_post_nofound\", \"\/test_post_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"PUT\", \"\/test_put_nofound\", \"\/test_put_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"DELETE\", \"\/test_delete_nofound\", \"\/test_delete_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"HEAD\", \"\/test_head_nofound\", \"\/test_head_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"OPTIONS\", \"\/test_options_nofound\", \"\/test_options_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"CONNECT\", \"\/test_connect_nofound\", \"\/test_connect_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"PATCH\", \"\/test_patch_nofound\", \"\/test_patch_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t{\"TRACE\", \"\/test_trace_nofound\", \"\/test_trace_nofound\", \"\", \"Not Found\", 404, false, nil, nil},\n\t\t\/\/ Parameters\n\t\t{\"GET\", \"\/test_get_parameter1\/{name}\", \"\/test_get_parameter1\/iris\", \"\", \"name=iris\", 200, true, []param{{\"name\", \"iris\"}}, nil},\n\t\t{\"GET\", \"\/test_get_parameter2\/{name}\/details\/{something}\", \"\/test_get_parameter2\/iris\/details\/anything\", \"\", \"name=iris,something=anything\", 200, true, []param{{\"name\", \"iris\"}, {\"something\", \"anything\"}}, nil},\n\t\t{\"GET\", \"\/test_get_parameter2\/{name}\/details\/{something}\/{else:.*}\", \"\/test_get_parameter2\/iris\/details\/anything\/elsehere\", \"\", \"name=iris,something=anything,else=elsehere\", 200, true, []param{{\"name\", \"iris\"}, {\"something\", \"anything\"}, {\"else\", \"elsehere\"}}, nil},\n\t\t\/\/ URL Parameters\n\t\t{\"GET\", \"\/test_get_urlparameter1\/first\", \"\/test_get_urlparameter1\/first\", \"name=irisurl\", \"name=irisurl\", 200, true, nil, []param{{\"name\", \"irisurl\"}}},\n\t\t{\"GET\", \"\/test_get_urlparameter2\/second\", \"\/test_get_urlparameter2\/second\", \"name=irisurl&something=anything\", \"name=irisurl,something=anything\", 200, true, nil, []param{{\"name\", \"irisurl\"}, {\"something\", \"anything\"}}},\n\t\t{\"GET\", \"\/test_get_urlparameter2\/first\/second\/third\", \"\/test_get_urlparameter2\/first\/second\/third\", \"name=irisurl&something=anything&else=elsehere\", \"name=irisurl,something=anything,else=elsehere\", 200, true, nil, []param{{\"name\", \"irisurl\"}, {\"something\", \"anything\"}, {\"else\", \"elsehere\"}}},\n\t}\n\n\tfor idx := range testRoutes {\n\t\tr := testRoutes[idx]\n\t\tif r.Register {\n\t\t\tapp.HandleFunc(r.Method, r.Path, func(ctx *iris.Context) {\n\t\t\t\tctx.SetStatusCode(r.Status)\n\t\t\t\tif r.Params != nil && len(r.Params) > 0 {\n\t\t\t\t\tctx.Writef(ctx.ParamsSentence())\n\t\t\t\t} else if r.URLParams != nil && len(r.URLParams) > 0 {\n\t\t\t\t\tif len(r.URLParams) != len(ctx.URLParams()) {\n\t\t\t\t\t\tt.Fatalf(\"Error when comparing length of url parameters %d != %d\", len(r.URLParams), len(ctx.URLParams()))\n\t\t\t\t\t}\n\t\t\t\t\tparamsKeyVal := \"\"\n\n\t\t\t\t\tfor idxp, p := range r.URLParams {\n\t\t\t\t\t\tval := ctx.URLParam(p.Key)\n\t\t\t\t\t\tparamsKeyVal += p.Key + \"=\" + val + \",\"\n\t\t\t\t\t\tif idxp == len(r.URLParams)-1 {\n\t\t\t\t\t\t\tparamsKeyVal = paramsKeyVal[0 : len(paramsKeyVal)-1]\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tctx.Writef(paramsKeyVal)\n\t\t\t\t} else {\n\t\t\t\t\tctx.Writef(r.Body)\n\t\t\t\t}\n\n\t\t\t})\n\t\t}\n\t}\n\n\te := httptest.New(app, t)\n\n\t\/\/ run the tests (1)\n\tfor idx := range testRoutes {\n\t\tr := testRoutes[idx]\n\t\te.Request(r.Method, r.RequestPath).WithQueryString(r.RequestQuery).\n\t\t\tExpect().\n\t\t\t\/\/ compare just the Len because gorillamux gets and sets the vars as map, so the values are unorderded.\n\t\t\tStatus(r.Status).Body().Length().Equal(len(r.Body))\n\t}\n\n}\n\nfunc TestGorillaMuxSimpleParty(t *testing.T) {\n\tapp := newGorillaMuxAPP()\n\n\th := func(ctx *iris.Context) { ctx.WriteString(ctx.Host() + ctx.Path()) }\n\n\tif testEnableSubdomain {\n\t\tsubdomainParty := app.Party(testSubdomain + \".\")\n\t\t{\n\t\t\tsubdomainParty.Get(\"\/\", h)\n\t\t\tsubdomainParty.Get(\"\/path1\", h)\n\t\t\tsubdomainParty.Get(\"\/path2\", h)\n\t\t\tsubdomainParty.Get(\"\/namedpath\/{param1}\/something\/{param2}\", h)\n\t\t\tsubdomainParty.Get(\"\/namedpath\/{param1}\/something\/{param2}\/else\", h)\n\t\t}\n\t}\n\n\t\/\/ simple\n\tp := app.Party(\"\/party1\")\n\t{\n\t\tp.Get(\"\/\", h)\n\t\tp.Get(\"\/path1\", h)\n\t\tp.Get(\"\/path2\", h)\n\t\tp.Get(\"\/namedpath\/{param1}\/something\/{param2}\", h)\n\t\tp.Get(\"\/namedpath\/{param1}\/something\/{param2}\/else\", h)\n\t}\n\n\tapp.Config.VHost = \"0.0.0.0:\" + strconv.Itoa(getRandomNumber(2222, 2399))\n\t\/\/ app.Config.Tester.Debug = true\n\t\/\/ app.Config.Tester.ExplicitURL = true\n\te := httptest.New(app, t)\n\n\trequest := func(reqPath string) {\n\n\t\te.Request(\"GET\", reqPath).\n\t\t\tExpect().\n\t\t\tStatus(iris.StatusOK).Body().Equal(app.Config.VHost + reqPath)\n\t}\n\n\t\/\/ run the tests\n\trequest(\"\/party1\/\")\n\trequest(\"\/party1\/path1\")\n\trequest(\"\/party1\/path2\")\n\trequest(\"\/party1\/namedpath\/theparam1\/something\/theparam2\")\n\trequest(\"\/party1\/namedpath\/theparam1\/something\/theparam2\/else\")\n\n\tif testEnableSubdomain {\n\t\tes := subdomainTester(e, app)\n\t\tsubdomainRequest := func(reqPath string) {\n\t\t\tes.Request(\"GET\", reqPath).\n\t\t\t\tExpect().\n\t\t\t\tStatus(iris.StatusOK).Body().Equal(testSubdomainHost(app.Config.VHost) + reqPath)\n\t\t}\n\n\t\tsubdomainRequest(\"\/\")\n\t\tsubdomainRequest(\"\/path1\")\n\t\tsubdomainRequest(\"\/path2\")\n\t\tsubdomainRequest(\"\/namedpath\/theparam1\/something\/theparam2\")\n\t\tsubdomainRequest(\"\/namedpath\/theparam1\/something\/theparam2\/else\")\n\t}\n}\n\nfunc TestGorillaMuxPathEscape(t *testing.T) {\n\tapp := newGorillaMuxAPP()\n\n\tapp.Get(\"\/details\/{name}\", func(ctx *iris.Context) {\n\t\tname := ctx.Param(\"name\")\n\t\thighlight := ctx.URLParam(\"highlight\")\n\t\tctx.Writef(\"name=%s,highlight=%s\", name, highlight)\n\t})\n\n\te := httptest.New(app, t)\n\n\te.GET(\"\/details\/Sakamoto desu ga\").\n\t\tWithQuery(\"highlight\", \"text\").\n\t\tExpect().Status(iris.StatusOK).Body().Equal(\"name=Sakamoto desu ga,highlight=text\")\n}\n\nfunc TestGorillaMuxParamDecodedDecodeURL(t *testing.T) {\n\tapp := newGorillaMuxAPP()\n\n\tapp.Get(\"\/encoding\/{url}\", func(ctx *iris.Context) {\n\t\turl := iris.DecodeURL(ctx.ParamDecoded(\"url\"))\n\t\tctx.SetStatusCode(iris.StatusOK)\n\t\tctx.WriteString(url)\n\t})\n\n\te := httptest.New(app, t)\n\n\te.GET(\"\/encoding\/http%3A%2F%2Fsome-url.com\").Expect().Status(iris.StatusOK).Body().Equal(\"http:\/\/some-url.com\")\n}\n\nfunc TestGorillaMuxRouteURLPath(t *testing.T) {\n\tapp := iris.New()\n\tapp.Adapt(gorillamux.New())\n\n\tapp.None(\"\/profile\/{user_id}\/{ref}\/{anything:.*}\", nil).ChangeName(\"profile\")\n\tapp.Boot()\n\n\texpected := \"\/profile\/42\/iris-go\/something\"\n\n\tif got := app.Path(\"profile\", \"user_id\", 42, \"ref\", \"iris-go\", \"anything\", \"something\"); got != expected {\n\t\tt.Fatalf(\"gorillamux' reverse routing 'URLPath' error: expected %s but got %s\", expected, got)\n\t}\n}\n<|endoftext|>"} {"text":"package veneur\n\nimport (\n\t\"math\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestCounterEmpty(t *testing.T) {\n\n\tc := NewCounter(\"a.b.c\", []string{\"a:b\"})\n\tc.Sample(1, 1.0)\n\n\tassert.Equal(t, \"a.b.c\", c.name, \"Name\")\n\tassert.Len(t, c.tags, 1, \"Tag length\")\n\tassert.Equal(t, c.tags[0], \"a:b\", \"Tag contents\")\n\n\tmetrics := c.Flush(10 * time.Second)\n\tassert.Len(t, metrics, 1, \"Flushes 1 metric\")\n\n\tm1 := metrics[0]\n\tassert.Equal(t, int32(10), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"rate\", m1.MetricType, \"Type\")\n\tassert.Len(t, c.tags, 1, \"Tag length\")\n\tassert.Equal(t, c.tags[0], \"a:b\", \"Tag contents\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, 0.1, m1.Value[0][1], \"Metric value\")\n}\n\nfunc TestCounterRate(t *testing.T) {\n\n\tc := NewCounter(\"a.b.c\", []string{\"a:b\"})\n\n\tc.Sample(5, 1.0)\n\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tmetrics := c.Flush(10 * time.Second)\n\tassert.Equal(t, 0.5, metrics[0].Value[0][1], \"Metric value\")\n}\n\nfunc TestCounterSampleRate(t *testing.T) {\n\n\tc := NewCounter(\"a.b.c\", []string{\"a:b\"})\n\n\tc.Sample(5, 0.5)\n\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tmetrics := c.Flush(10 * time.Second)\n\tassert.Equal(t, float64(1), metrics[0].Value[0][1], \"Metric value\")\n}\n\nfunc TestGauge(t *testing.T) {\n\n\tg := NewGauge(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", g.name, \"Name\")\n\tassert.Len(t, g.tags, 1, \"Tag length\")\n\tassert.Equal(t, g.tags[0], \"a:b\", \"Tag contents\")\n\n\tg.Sample(5, 1.0)\n\n\tmetrics := g.Flush()\n\tassert.Len(t, metrics, 1, \"Flushed metric count\")\n\n\tm1 := metrics[0]\n\t\/\/ Interval is not meaningful for this\n\tassert.Equal(t, int32(0), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m1.MetricType, \"Type\")\n\ttags := m1.Tags\n\tassert.Len(t, tags, 1, \"Tag length\")\n\tassert.Equal(t, tags[0], \"a:b\", \"Tag contents\")\n\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(5), m1.Value[0][1], \"Value\")\n}\n\nfunc TestSet(t *testing.T) {\n\ts := NewSet(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", s.name, \"Name\")\n\tassert.Len(t, s.tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", s.tags[0], \"First tag\")\n\n\ts.Sample(\"5\", 1.0)\n\n\ts.Sample(\"5\", 1.0)\n\n\ts.Sample(\"123\", 1.0)\n\n\ts.Sample(\"2147483647\", 1.0)\n\ts.Sample(\"-2147483648\", 1.0)\n\n\tmetrics := s.Flush()\n\tassert.Len(t, metrics, 1, \"Flush\")\n\n\tm1 := metrics[0]\n\t\/\/ Interval is not meaningful for this\n\tassert.Equal(t, int32(0), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m1.MetricType, \"Type\")\n\tassert.Len(t, m1.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m1.Tags[0], \"First tag\")\n\tassert.Equal(t, float64(4), m1.Value[0][1], \"Value\")\n}\n\nfunc TestSetMerge(t *testing.T) {\n\trand.Seed(time.Now().Unix())\n\n\ts := NewSet(\"a.b.c\", []string{\"a:b\"})\n\tfor i := 0; i < 100; i++ {\n\t\ts.Sample(strconv.Itoa(rand.Int()), 1.0)\n\t}\n\tassert.Equal(t, uint64(100), s.hll.Count(), \"counts did not match\")\n\n\tjm, err := s.Export()\n\tassert.NoError(t, err, \"should have exported successfully\")\n\n\ts2 := NewSet(\"a.b.c\", []string{\"a:b\"})\n\tassert.NoError(t, s2.Combine(jm.Value), \"should have combined successfully\")\n\tassert.Equal(t, s.hll.Count(), s2.hll.Count(), \"counts did not match after merging\")\n}\n\nfunc TestHisto(t *testing.T) {\n\n\th := NewHist(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", h.name, \"Name\")\n\tassert.Len(t, h.tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", h.tags[0], \"First tag\")\n\n\th.Sample(5, 1.0)\n\th.Sample(10, 1.0)\n\th.Sample(15, 1.0)\n\th.Sample(20, 1.0)\n\th.Sample(25, 1.0)\n\n\tmetrics := h.Flush(10*time.Second, []float64{0.50})\n\t\/\/ We get lots of metrics back for histograms!\n\tassert.Len(t, metrics, 4, \"Flushed metrics length\")\n\n\t\/\/ the max\n\tm2 := metrics[0]\n\tassert.Equal(t, \"a.b.c.max\", m2.Name, \"Name\")\n\tassert.Equal(t, int32(0), m2.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m2.MetricType, \"Type\")\n\tassert.Len(t, m2.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m2.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(25), m2.Value[0][1], \"Value\")\n\n\t\/\/ the min\n\tm3 := metrics[1]\n\tassert.Equal(t, \"a.b.c.min\", m3.Name, \"Name\")\n\tassert.Equal(t, int32(0), m3.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m3.MetricType, \"Type\")\n\tassert.Len(t, m3.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m3.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(5), m3.Value[0][1], \"Value\")\n\n\t\/\/ the count\n\tm1 := metrics[2]\n\tassert.Equal(t, \"a.b.c.count\", m1.Name, \"Name\")\n\tassert.Equal(t, int32(10), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"rate\", m1.MetricType, \"Type\")\n\tassert.Len(t, m1.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m1.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(0.5), m1.Value[0][1], \"Value\")\n\n\t\/\/ And the percentile\n\tm4 := metrics[3]\n\tassert.Equal(t, \"a.b.c.50percentile\", m4.Name, \"Name\")\n\tassert.Equal(t, int32(0), m4.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m4.MetricType, \"Type\")\n\tassert.Len(t, m4.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m4.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(15), m4.Value[0][1], \"Value\")\n}\n\nfunc TestHistoSampleRate(t *testing.T) {\n\n\th := NewHist(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", h.name, \"Name\")\n\tassert.Len(t, h.tags, 1, \"Tag length\")\n\tassert.Equal(t, h.tags[0], \"a:b\", \"Tag contents\")\n\n\th.Sample(5, 0.5)\n\th.Sample(10, 0.5)\n\th.Sample(15, 0.5)\n\th.Sample(20, 0.5)\n\th.Sample(25, 0.5)\n\n\tmetrics := h.Flush(10*time.Second, []float64{0.50})\n\tassert.Len(t, metrics, 4, \"Metrics flush length\")\n\n\t\/\/ First the max\n\tm1 := metrics[0]\n\tassert.Equal(t, \"a.b.c.max\", m1.Name, \"Max name\")\n\tassert.Equal(t, float64(25), m1.Value[0][1], \"Sampled max as rate\")\n\n\tcount := metrics[2]\n\tassert.Equal(t, \"a.b.c.count\", count.Name, \"count name\")\n\tassert.Equal(t, float64(1), count.Value[0][1], \"count value\")\n}\n\nfunc TestHistoMerge(t *testing.T) {\n\trand.Seed(time.Now().Unix())\n\n\th := NewHist(\"a.b.c\", []string{\"a:b\"})\n\tfor i := 0; i < 100; i++ {\n\t\th.Sample(rand.NormFloat64(), 1.0)\n\t}\n\n\tjm, err := h.Export()\n\tassert.NoError(t, err, \"should have exported successfully\")\n\n\th2 := NewHist(\"a.b.c\", []string{\"a:b\"})\n\tassert.NoError(t, h2.Combine(jm.Value), \"should have combined successfully\")\n\tassert.InEpsilon(t, h.value.Quantile(0.5), h2.value.Quantile(0.5), 0.02, \"50th percentiles did not match after merging\")\n\tassert.InDelta(t, 0, h2.localWeight, 0.02, \"merged histogram should have count of zero\")\n\tassert.True(t, math.IsInf(h2.localMin, +1), \"merged histogram should have local minimum of +inf\")\n\tassert.True(t, math.IsInf(h2.localMax, -1), \"merged histogram should have local minimum of -inf\")\n\n\th2.Sample(1.0, 1.0)\n\tassert.InDelta(t, 1.0, h2.localWeight, 0.02, \"merged histogram should have count of 1 after adding a value\")\n\tassert.InDelta(t, 1.0, h2.localMin, 0.02, \"merged histogram should have min of 1 after adding a value\")\n\tassert.InDelta(t, 1.0, h2.localMax, 0.02, \"merged histogram should have max of 1 after adding a value\")\n}\nReduce flakiness of hyperloglog testpackage veneur\n\nimport (\n\t\"math\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestCounterEmpty(t *testing.T) {\n\n\tc := NewCounter(\"a.b.c\", []string{\"a:b\"})\n\tc.Sample(1, 1.0)\n\n\tassert.Equal(t, \"a.b.c\", c.name, \"Name\")\n\tassert.Len(t, c.tags, 1, \"Tag length\")\n\tassert.Equal(t, c.tags[0], \"a:b\", \"Tag contents\")\n\n\tmetrics := c.Flush(10 * time.Second)\n\tassert.Len(t, metrics, 1, \"Flushes 1 metric\")\n\n\tm1 := metrics[0]\n\tassert.Equal(t, int32(10), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"rate\", m1.MetricType, \"Type\")\n\tassert.Len(t, c.tags, 1, \"Tag length\")\n\tassert.Equal(t, c.tags[0], \"a:b\", \"Tag contents\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, 0.1, m1.Value[0][1], \"Metric value\")\n}\n\nfunc TestCounterRate(t *testing.T) {\n\n\tc := NewCounter(\"a.b.c\", []string{\"a:b\"})\n\n\tc.Sample(5, 1.0)\n\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tmetrics := c.Flush(10 * time.Second)\n\tassert.Equal(t, 0.5, metrics[0].Value[0][1], \"Metric value\")\n}\n\nfunc TestCounterSampleRate(t *testing.T) {\n\n\tc := NewCounter(\"a.b.c\", []string{\"a:b\"})\n\n\tc.Sample(5, 0.5)\n\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tmetrics := c.Flush(10 * time.Second)\n\tassert.Equal(t, float64(1), metrics[0].Value[0][1], \"Metric value\")\n}\n\nfunc TestGauge(t *testing.T) {\n\n\tg := NewGauge(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", g.name, \"Name\")\n\tassert.Len(t, g.tags, 1, \"Tag length\")\n\tassert.Equal(t, g.tags[0], \"a:b\", \"Tag contents\")\n\n\tg.Sample(5, 1.0)\n\n\tmetrics := g.Flush()\n\tassert.Len(t, metrics, 1, \"Flushed metric count\")\n\n\tm1 := metrics[0]\n\t\/\/ Interval is not meaningful for this\n\tassert.Equal(t, int32(0), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m1.MetricType, \"Type\")\n\ttags := m1.Tags\n\tassert.Len(t, tags, 1, \"Tag length\")\n\tassert.Equal(t, tags[0], \"a:b\", \"Tag contents\")\n\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(5), m1.Value[0][1], \"Value\")\n}\n\nfunc TestSet(t *testing.T) {\n\ts := NewSet(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", s.name, \"Name\")\n\tassert.Len(t, s.tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", s.tags[0], \"First tag\")\n\n\ts.Sample(\"5\", 1.0)\n\n\ts.Sample(\"5\", 1.0)\n\n\ts.Sample(\"123\", 1.0)\n\n\ts.Sample(\"2147483647\", 1.0)\n\ts.Sample(\"-2147483648\", 1.0)\n\n\tmetrics := s.Flush()\n\tassert.Len(t, metrics, 1, \"Flush\")\n\n\tm1 := metrics[0]\n\t\/\/ Interval is not meaningful for this\n\tassert.Equal(t, int32(0), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m1.MetricType, \"Type\")\n\tassert.Len(t, m1.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m1.Tags[0], \"First tag\")\n\tassert.Equal(t, float64(4), m1.Value[0][1], \"Value\")\n}\n\nfunc TestSetMerge(t *testing.T) {\n\trand.Seed(time.Now().Unix())\n\n\ts := NewSet(\"a.b.c\", []string{\"a:b\"})\n\tfor i := 0; i < 100; i++ {\n\t\ts.Sample(strconv.Itoa(rand.Int()), 1.0)\n\t}\n\tassert.Equal(t, uint64(100), s.hll.Count(), \"counts did not match\")\n\n\tjm, err := s.Export()\n\tassert.NoError(t, err, \"should have exported successfully\")\n\n\ts2 := NewSet(\"a.b.c\", []string{\"a:b\"})\n\tassert.NoError(t, s2.Combine(jm.Value), \"should have combined successfully\")\n\t\/\/ HLLs are approximate, and we've seen error of +-1 here in the past, so\n\t\/\/ we're giving the test some room for error to reduce flakes\n\tcountDifference := int(s.hll.Count()) - int(s2.hll.Count())\n\tassert.True(t, -1 < countDifference && countDifference < 1, \"counts did not match after merging\")\n}\n\nfunc TestHisto(t *testing.T) {\n\n\th := NewHist(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", h.name, \"Name\")\n\tassert.Len(t, h.tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", h.tags[0], \"First tag\")\n\n\th.Sample(5, 1.0)\n\th.Sample(10, 1.0)\n\th.Sample(15, 1.0)\n\th.Sample(20, 1.0)\n\th.Sample(25, 1.0)\n\n\tmetrics := h.Flush(10*time.Second, []float64{0.50})\n\t\/\/ We get lots of metrics back for histograms!\n\tassert.Len(t, metrics, 4, \"Flushed metrics length\")\n\n\t\/\/ the max\n\tm2 := metrics[0]\n\tassert.Equal(t, \"a.b.c.max\", m2.Name, \"Name\")\n\tassert.Equal(t, int32(0), m2.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m2.MetricType, \"Type\")\n\tassert.Len(t, m2.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m2.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(25), m2.Value[0][1], \"Value\")\n\n\t\/\/ the min\n\tm3 := metrics[1]\n\tassert.Equal(t, \"a.b.c.min\", m3.Name, \"Name\")\n\tassert.Equal(t, int32(0), m3.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m3.MetricType, \"Type\")\n\tassert.Len(t, m3.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m3.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(5), m3.Value[0][1], \"Value\")\n\n\t\/\/ the count\n\tm1 := metrics[2]\n\tassert.Equal(t, \"a.b.c.count\", m1.Name, \"Name\")\n\tassert.Equal(t, int32(10), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"rate\", m1.MetricType, \"Type\")\n\tassert.Len(t, m1.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m1.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(0.5), m1.Value[0][1], \"Value\")\n\n\t\/\/ And the percentile\n\tm4 := metrics[3]\n\tassert.Equal(t, \"a.b.c.50percentile\", m4.Name, \"Name\")\n\tassert.Equal(t, int32(0), m4.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m4.MetricType, \"Type\")\n\tassert.Len(t, m4.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m4.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(15), m4.Value[0][1], \"Value\")\n}\n\nfunc TestHistoSampleRate(t *testing.T) {\n\n\th := NewHist(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", h.name, \"Name\")\n\tassert.Len(t, h.tags, 1, \"Tag length\")\n\tassert.Equal(t, h.tags[0], \"a:b\", \"Tag contents\")\n\n\th.Sample(5, 0.5)\n\th.Sample(10, 0.5)\n\th.Sample(15, 0.5)\n\th.Sample(20, 0.5)\n\th.Sample(25, 0.5)\n\n\tmetrics := h.Flush(10*time.Second, []float64{0.50})\n\tassert.Len(t, metrics, 4, \"Metrics flush length\")\n\n\t\/\/ First the max\n\tm1 := metrics[0]\n\tassert.Equal(t, \"a.b.c.max\", m1.Name, \"Max name\")\n\tassert.Equal(t, float64(25), m1.Value[0][1], \"Sampled max as rate\")\n\n\tcount := metrics[2]\n\tassert.Equal(t, \"a.b.c.count\", count.Name, \"count name\")\n\tassert.Equal(t, float64(1), count.Value[0][1], \"count value\")\n}\n\nfunc TestHistoMerge(t *testing.T) {\n\trand.Seed(time.Now().Unix())\n\n\th := NewHist(\"a.b.c\", []string{\"a:b\"})\n\tfor i := 0; i < 100; i++ {\n\t\th.Sample(rand.NormFloat64(), 1.0)\n\t}\n\n\tjm, err := h.Export()\n\tassert.NoError(t, err, \"should have exported successfully\")\n\n\th2 := NewHist(\"a.b.c\", []string{\"a:b\"})\n\tassert.NoError(t, h2.Combine(jm.Value), \"should have combined successfully\")\n\tassert.InEpsilon(t, h.value.Quantile(0.5), h2.value.Quantile(0.5), 0.02, \"50th percentiles did not match after merging\")\n\tassert.InDelta(t, 0, h2.localWeight, 0.02, \"merged histogram should have count of zero\")\n\tassert.True(t, math.IsInf(h2.localMin, +1), \"merged histogram should have local minimum of +inf\")\n\tassert.True(t, math.IsInf(h2.localMax, -1), \"merged histogram should have local minimum of -inf\")\n\n\th2.Sample(1.0, 1.0)\n\tassert.InDelta(t, 1.0, h2.localWeight, 0.02, \"merged histogram should have count of 1 after adding a value\")\n\tassert.InDelta(t, 1.0, h2.localMin, 0.02, \"merged histogram should have min of 1 after adding a value\")\n\tassert.InDelta(t, 1.0, h2.localMax, 0.02, \"merged histogram should have max of 1 after adding a value\")\n}\n<|endoftext|>"} {"text":"package veneur\n\nimport (\n\t\"math\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestCounterEmpty(t *testing.T) {\n\n\tc := NewCounter(\"a.b.c\", []string{\"a:b\"})\n\tc.Sample(1, 1.0)\n\n\tassert.Equal(t, \"a.b.c\", c.name, \"Name\")\n\tassert.Len(t, c.tags, 1, \"Tag length\")\n\tassert.Equal(t, c.tags[0], \"a:b\", \"Tag contents\")\n\n\tmetrics := c.Flush(10 * time.Second)\n\tassert.Len(t, metrics, 1, \"Flushes 1 metric\")\n\n\tm1 := metrics[0]\n\tassert.Equal(t, int32(10), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"rate\", m1.MetricType, \"Type\")\n\tassert.Len(t, c.tags, 1, \"Tag length\")\n\tassert.Equal(t, c.tags[0], \"a:b\", \"Tag contents\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, 0.1, m1.Value[0][1], \"Metric value\")\n}\n\nfunc TestCounterRate(t *testing.T) {\n\n\tc := NewCounter(\"a.b.c\", []string{\"a:b\"})\n\n\tc.Sample(5, 1.0)\n\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tmetrics := c.Flush(10 * time.Second)\n\tassert.Equal(t, 0.5, metrics[0].Value[0][1], \"Metric value\")\n}\n\nfunc TestCounterSampleRate(t *testing.T) {\n\n\tc := NewCounter(\"a.b.c\", []string{\"a:b\"})\n\n\tc.Sample(5, 0.5)\n\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tmetrics := c.Flush(10 * time.Second)\n\tassert.Equal(t, float64(1), metrics[0].Value[0][1], \"Metric value\")\n}\n\nfunc TestGauge(t *testing.T) {\n\n\tg := NewGauge(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", g.name, \"Name\")\n\tassert.Len(t, g.tags, 1, \"Tag length\")\n\tassert.Equal(t, g.tags[0], \"a:b\", \"Tag contents\")\n\n\tg.Sample(5, 1.0)\n\n\tmetrics := g.Flush()\n\tassert.Len(t, metrics, 1, \"Flushed metric count\")\n\n\tm1 := metrics[0]\n\t\/\/ Interval is not meaningful for this\n\tassert.Equal(t, int32(0), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m1.MetricType, \"Type\")\n\ttags := m1.Tags\n\tassert.Len(t, tags, 1, \"Tag length\")\n\tassert.Equal(t, tags[0], \"a:b\", \"Tag contents\")\n\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(5), m1.Value[0][1], \"Value\")\n}\n\nfunc TestSet(t *testing.T) {\n\ts := NewSet(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", s.name, \"Name\")\n\tassert.Len(t, s.tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", s.tags[0], \"First tag\")\n\n\ts.Sample(\"5\", 1.0)\n\n\ts.Sample(\"5\", 1.0)\n\n\ts.Sample(\"123\", 1.0)\n\n\ts.Sample(\"2147483647\", 1.0)\n\ts.Sample(\"-2147483648\", 1.0)\n\n\tmetrics := s.Flush()\n\tassert.Len(t, metrics, 1, \"Flush\")\n\n\tm1 := metrics[0]\n\t\/\/ Interval is not meaningful for this\n\tassert.Equal(t, int32(0), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m1.MetricType, \"Type\")\n\tassert.Len(t, m1.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m1.Tags[0], \"First tag\")\n\tassert.Equal(t, float64(4), m1.Value[0][1], \"Value\")\n}\n\nfunc TestSetMerge(t *testing.T) {\n\trand.Seed(time.Now().Unix())\n\n\ts := NewSet(\"a.b.c\", []string{\"a:b\"})\n\tfor i := 0; i < 100; i++ {\n\t\ts.Sample(strconv.Itoa(rand.Int()), 1.0)\n\t}\n\tassert.Equal(t, uint64(100), s.hll.Count(), \"counts did not match\")\n\n\tjm, err := s.Export()\n\tassert.NoError(t, err, \"should have exported successfully\")\n\n\ts2 := NewSet(\"a.b.c\", []string{\"a:b\"})\n\tassert.NoError(t, s2.Combine(jm.Value), \"should have combined successfully\")\n\t\/\/ HLLs are approximate, and we've seen error of +-1 here in the past, so\n\t\/\/ we're giving the test some room for error to reduce flakes\n\tcountDifference := int(s.hll.Count()) - int(s2.hll.Count())\n\tassert.True(t, -1 < countDifference && countDifference < 1, \"counts did not match after merging\")\n}\n\nfunc TestHisto(t *testing.T) {\n\n\th := NewHist(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", h.name, \"Name\")\n\tassert.Len(t, h.tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", h.tags[0], \"First tag\")\n\n\th.Sample(5, 1.0)\n\th.Sample(10, 1.0)\n\th.Sample(15, 1.0)\n\th.Sample(20, 1.0)\n\th.Sample(25, 1.0)\n\n\tmetrics := h.Flush(10*time.Second, []float64{0.50})\n\t\/\/ We get lots of metrics back for histograms!\n\tassert.Len(t, metrics, 4, \"Flushed metrics length\")\n\n\t\/\/ the max\n\tm2 := metrics[0]\n\tassert.Equal(t, \"a.b.c.max\", m2.Name, \"Name\")\n\tassert.Equal(t, int32(0), m2.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m2.MetricType, \"Type\")\n\tassert.Len(t, m2.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m2.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(25), m2.Value[0][1], \"Value\")\n\n\t\/\/ the min\n\tm3 := metrics[1]\n\tassert.Equal(t, \"a.b.c.min\", m3.Name, \"Name\")\n\tassert.Equal(t, int32(0), m3.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m3.MetricType, \"Type\")\n\tassert.Len(t, m3.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m3.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(5), m3.Value[0][1], \"Value\")\n\n\t\/\/ the count\n\tm1 := metrics[2]\n\tassert.Equal(t, \"a.b.c.count\", m1.Name, \"Name\")\n\tassert.Equal(t, int32(10), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"rate\", m1.MetricType, \"Type\")\n\tassert.Len(t, m1.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m1.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(0.5), m1.Value[0][1], \"Value\")\n\n\t\/\/ And the percentile\n\tm4 := metrics[3]\n\tassert.Equal(t, \"a.b.c.50percentile\", m4.Name, \"Name\")\n\tassert.Equal(t, int32(0), m4.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m4.MetricType, \"Type\")\n\tassert.Len(t, m4.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m4.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(15), m4.Value[0][1], \"Value\")\n}\n\nfunc TestHistoSampleRate(t *testing.T) {\n\n\th := NewHist(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", h.name, \"Name\")\n\tassert.Len(t, h.tags, 1, \"Tag length\")\n\tassert.Equal(t, h.tags[0], \"a:b\", \"Tag contents\")\n\n\th.Sample(5, 0.5)\n\th.Sample(10, 0.5)\n\th.Sample(15, 0.5)\n\th.Sample(20, 0.5)\n\th.Sample(25, 0.5)\n\n\tmetrics := h.Flush(10*time.Second, []float64{0.50})\n\tassert.Len(t, metrics, 4, \"Metrics flush length\")\n\n\t\/\/ First the max\n\tm1 := metrics[0]\n\tassert.Equal(t, \"a.b.c.max\", m1.Name, \"Max name\")\n\tassert.Equal(t, float64(25), m1.Value[0][1], \"Sampled max as rate\")\n\n\tcount := metrics[2]\n\tassert.Equal(t, \"a.b.c.count\", count.Name, \"count name\")\n\tassert.Equal(t, float64(1), count.Value[0][1], \"count value\")\n}\n\nfunc TestHistoMerge(t *testing.T) {\n\trand.Seed(time.Now().Unix())\n\n\th := NewHist(\"a.b.c\", []string{\"a:b\"})\n\tfor i := 0; i < 100; i++ {\n\t\th.Sample(rand.NormFloat64(), 1.0)\n\t}\n\n\tjm, err := h.Export()\n\tassert.NoError(t, err, \"should have exported successfully\")\n\n\th2 := NewHist(\"a.b.c\", []string{\"a:b\"})\n\tassert.NoError(t, h2.Combine(jm.Value), \"should have combined successfully\")\n\tassert.InEpsilon(t, h.value.Quantile(0.5), h2.value.Quantile(0.5), 0.02, \"50th percentiles did not match after merging\")\n\tassert.InDelta(t, 0, h2.localWeight, 0.02, \"merged histogram should have count of zero\")\n\tassert.True(t, math.IsInf(h2.localMin, +1), \"merged histogram should have local minimum of +inf\")\n\tassert.True(t, math.IsInf(h2.localMax, -1), \"merged histogram should have local minimum of -inf\")\n\n\th2.Sample(1.0, 1.0)\n\tassert.InDelta(t, 1.0, h2.localWeight, 0.02, \"merged histogram should have count of 1 after adding a value\")\n\tassert.InDelta(t, 1.0, h2.localMin, 0.02, \"merged histogram should have min of 1 after adding a value\")\n\tassert.InDelta(t, 1.0, h2.localMax, 0.02, \"merged histogram should have max of 1 after adding a value\")\n}\nAdd difference to error message in TestSetMergepackage veneur\n\nimport (\n\t\"math\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestCounterEmpty(t *testing.T) {\n\n\tc := NewCounter(\"a.b.c\", []string{\"a:b\"})\n\tc.Sample(1, 1.0)\n\n\tassert.Equal(t, \"a.b.c\", c.name, \"Name\")\n\tassert.Len(t, c.tags, 1, \"Tag length\")\n\tassert.Equal(t, c.tags[0], \"a:b\", \"Tag contents\")\n\n\tmetrics := c.Flush(10 * time.Second)\n\tassert.Len(t, metrics, 1, \"Flushes 1 metric\")\n\n\tm1 := metrics[0]\n\tassert.Equal(t, int32(10), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"rate\", m1.MetricType, \"Type\")\n\tassert.Len(t, c.tags, 1, \"Tag length\")\n\tassert.Equal(t, c.tags[0], \"a:b\", \"Tag contents\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, 0.1, m1.Value[0][1], \"Metric value\")\n}\n\nfunc TestCounterRate(t *testing.T) {\n\n\tc := NewCounter(\"a.b.c\", []string{\"a:b\"})\n\n\tc.Sample(5, 1.0)\n\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tmetrics := c.Flush(10 * time.Second)\n\tassert.Equal(t, 0.5, metrics[0].Value[0][1], \"Metric value\")\n}\n\nfunc TestCounterSampleRate(t *testing.T) {\n\n\tc := NewCounter(\"a.b.c\", []string{\"a:b\"})\n\n\tc.Sample(5, 0.5)\n\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tmetrics := c.Flush(10 * time.Second)\n\tassert.Equal(t, float64(1), metrics[0].Value[0][1], \"Metric value\")\n}\n\nfunc TestGauge(t *testing.T) {\n\n\tg := NewGauge(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", g.name, \"Name\")\n\tassert.Len(t, g.tags, 1, \"Tag length\")\n\tassert.Equal(t, g.tags[0], \"a:b\", \"Tag contents\")\n\n\tg.Sample(5, 1.0)\n\n\tmetrics := g.Flush()\n\tassert.Len(t, metrics, 1, \"Flushed metric count\")\n\n\tm1 := metrics[0]\n\t\/\/ Interval is not meaningful for this\n\tassert.Equal(t, int32(0), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m1.MetricType, \"Type\")\n\ttags := m1.Tags\n\tassert.Len(t, tags, 1, \"Tag length\")\n\tassert.Equal(t, tags[0], \"a:b\", \"Tag contents\")\n\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(5), m1.Value[0][1], \"Value\")\n}\n\nfunc TestSet(t *testing.T) {\n\ts := NewSet(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", s.name, \"Name\")\n\tassert.Len(t, s.tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", s.tags[0], \"First tag\")\n\n\ts.Sample(\"5\", 1.0)\n\n\ts.Sample(\"5\", 1.0)\n\n\ts.Sample(\"123\", 1.0)\n\n\ts.Sample(\"2147483647\", 1.0)\n\ts.Sample(\"-2147483648\", 1.0)\n\n\tmetrics := s.Flush()\n\tassert.Len(t, metrics, 1, \"Flush\")\n\n\tm1 := metrics[0]\n\t\/\/ Interval is not meaningful for this\n\tassert.Equal(t, int32(0), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m1.MetricType, \"Type\")\n\tassert.Len(t, m1.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m1.Tags[0], \"First tag\")\n\tassert.Equal(t, float64(4), m1.Value[0][1], \"Value\")\n}\n\nfunc TestSetMerge(t *testing.T) {\n\trand.Seed(time.Now().Unix())\n\n\ts := NewSet(\"a.b.c\", []string{\"a:b\"})\n\tfor i := 0; i < 100; i++ {\n\t\ts.Sample(strconv.Itoa(rand.Int()), 1.0)\n\t}\n\tassert.Equal(t, uint64(100), s.hll.Count(), \"counts did not match\")\n\n\tjm, err := s.Export()\n\tassert.NoError(t, err, \"should have exported successfully\")\n\n\ts2 := NewSet(\"a.b.c\", []string{\"a:b\"})\n\tassert.NoError(t, s2.Combine(jm.Value), \"should have combined successfully\")\n\t\/\/ HLLs are approximate, and we've seen error of +-1 here in the past, so\n\t\/\/ we're giving the test some room for error to reduce flakes\n\tcount1 := int(s.hll.Count())\n\tcount2 := int(s2.hll.Count())\n\tcountDifference := count1 - count2\n\tassert.True(t, -1 < countDifference && countDifference < 1, \"counts did not match after merging (%d and %d)\", count1, count2)\n}\n\nfunc TestHisto(t *testing.T) {\n\n\th := NewHist(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", h.name, \"Name\")\n\tassert.Len(t, h.tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", h.tags[0], \"First tag\")\n\n\th.Sample(5, 1.0)\n\th.Sample(10, 1.0)\n\th.Sample(15, 1.0)\n\th.Sample(20, 1.0)\n\th.Sample(25, 1.0)\n\n\tmetrics := h.Flush(10*time.Second, []float64{0.50})\n\t\/\/ We get lots of metrics back for histograms!\n\tassert.Len(t, metrics, 4, \"Flushed metrics length\")\n\n\t\/\/ the max\n\tm2 := metrics[0]\n\tassert.Equal(t, \"a.b.c.max\", m2.Name, \"Name\")\n\tassert.Equal(t, int32(0), m2.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m2.MetricType, \"Type\")\n\tassert.Len(t, m2.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m2.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(25), m2.Value[0][1], \"Value\")\n\n\t\/\/ the min\n\tm3 := metrics[1]\n\tassert.Equal(t, \"a.b.c.min\", m3.Name, \"Name\")\n\tassert.Equal(t, int32(0), m3.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m3.MetricType, \"Type\")\n\tassert.Len(t, m3.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m3.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(5), m3.Value[0][1], \"Value\")\n\n\t\/\/ the count\n\tm1 := metrics[2]\n\tassert.Equal(t, \"a.b.c.count\", m1.Name, \"Name\")\n\tassert.Equal(t, int32(10), m1.Interval, \"Interval\")\n\tassert.Equal(t, \"rate\", m1.MetricType, \"Type\")\n\tassert.Len(t, m1.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m1.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(0.5), m1.Value[0][1], \"Value\")\n\n\t\/\/ And the percentile\n\tm4 := metrics[3]\n\tassert.Equal(t, \"a.b.c.50percentile\", m4.Name, \"Name\")\n\tassert.Equal(t, int32(0), m4.Interval, \"Interval\")\n\tassert.Equal(t, \"gauge\", m4.MetricType, \"Type\")\n\tassert.Len(t, m4.Tags, 1, \"Tag count\")\n\tassert.Equal(t, \"a:b\", m4.Tags[0], \"First tag\")\n\t\/\/ The counter returns an array with a single tuple of timestamp,value\n\tassert.Equal(t, float64(15), m4.Value[0][1], \"Value\")\n}\n\nfunc TestHistoSampleRate(t *testing.T) {\n\n\th := NewHist(\"a.b.c\", []string{\"a:b\"})\n\n\tassert.Equal(t, \"a.b.c\", h.name, \"Name\")\n\tassert.Len(t, h.tags, 1, \"Tag length\")\n\tassert.Equal(t, h.tags[0], \"a:b\", \"Tag contents\")\n\n\th.Sample(5, 0.5)\n\th.Sample(10, 0.5)\n\th.Sample(15, 0.5)\n\th.Sample(20, 0.5)\n\th.Sample(25, 0.5)\n\n\tmetrics := h.Flush(10*time.Second, []float64{0.50})\n\tassert.Len(t, metrics, 4, \"Metrics flush length\")\n\n\t\/\/ First the max\n\tm1 := metrics[0]\n\tassert.Equal(t, \"a.b.c.max\", m1.Name, \"Max name\")\n\tassert.Equal(t, float64(25), m1.Value[0][1], \"Sampled max as rate\")\n\n\tcount := metrics[2]\n\tassert.Equal(t, \"a.b.c.count\", count.Name, \"count name\")\n\tassert.Equal(t, float64(1), count.Value[0][1], \"count value\")\n}\n\nfunc TestHistoMerge(t *testing.T) {\n\trand.Seed(time.Now().Unix())\n\n\th := NewHist(\"a.b.c\", []string{\"a:b\"})\n\tfor i := 0; i < 100; i++ {\n\t\th.Sample(rand.NormFloat64(), 1.0)\n\t}\n\n\tjm, err := h.Export()\n\tassert.NoError(t, err, \"should have exported successfully\")\n\n\th2 := NewHist(\"a.b.c\", []string{\"a:b\"})\n\tassert.NoError(t, h2.Combine(jm.Value), \"should have combined successfully\")\n\tassert.InEpsilon(t, h.value.Quantile(0.5), h2.value.Quantile(0.5), 0.02, \"50th percentiles did not match after merging\")\n\tassert.InDelta(t, 0, h2.localWeight, 0.02, \"merged histogram should have count of zero\")\n\tassert.True(t, math.IsInf(h2.localMin, +1), \"merged histogram should have local minimum of +inf\")\n\tassert.True(t, math.IsInf(h2.localMax, -1), \"merged histogram should have local minimum of -inf\")\n\n\th2.Sample(1.0, 1.0)\n\tassert.InDelta(t, 1.0, h2.localWeight, 0.02, \"merged histogram should have count of 1 after adding a value\")\n\tassert.InDelta(t, 1.0, h2.localMin, 0.02, \"merged histogram should have min of 1 after adding a value\")\n\tassert.InDelta(t, 1.0, h2.localMax, 0.02, \"merged histogram should have max of 1 after adding a value\")\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"example-apps\/proxy\/handlers\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n)\n\nfunc launchHandler(port int, downloadHandler, digHandler, timedDigHandler, pingHandler, proxyHandler, statsHandler, uploadHandler, echoSourceIPHandler http.Handler) {\n\tmux := http.NewServeMux()\n\tmux.Handle(\"\/download\/\", downloadHandler)\n\tmux.Handle(\"\/dig\/\", digHandler)\n\tmux.Handle(\"\/timed_dig\/\", timedDigHandler)\n\tmux.Handle(\"\/ping\/\", pingHandler)\n\tmux.Handle(\"\/proxy\/\", proxyHandler)\n\tmux.Handle(\"\/stats\", statsHandler)\n\tmux.Handle(\"\/upload\", uploadHandler)\n\tmux.Handle(\"\/echosourceip\", echoSourceIPHandler)\n\tmux.Handle(\"\/\", &handlers.InfoHandler{\n\t\tPort: port,\n\t})\n\thttp.ListenAndServe(fmt.Sprintf(\"0.0.0.0:%d\", port), mux)\n}\n\nfunc main() {\n\tsystemPortString := os.Getenv(\"PORT\")\n\tsystemPort, err := strconv.Atoi(systemPortString)\n\tif err != nil {\n\t\tlog.Fatal(\"invalid required env var PORT\")\n\t}\n\n\tstats := &handlers.Stats{\n\t\tLatency: []float64{},\n\t}\n\tdownloadHandler := &handlers.DownloadHandler{}\n\tpingHandler := &handlers.PingHandler{}\n\tdigHandler := &handlers.DigHandler{}\n\ttimedDigHandler := &handlers.TimedDigHandler{}\n\tproxyHandler := &handlers.ProxyHandler{\n\t\tStats: stats,\n\t}\n\tstatsHandler := &handlers.StatsHandler{\n\t\tStats: stats,\n\t}\n\tuploadHandler := &handlers.UploadHandler{}\n\n\techoSourceIPHandler := &handlers.EchoSourceIPHandler{}\n\n\tlaunchHandler(systemPort, downloadHandler, digHandler, timedDigHandler, pingHandler, proxyHandler, statsHandler, uploadHandler, echoSourceIPHandler)\n}\nAdd udp only dig handler to proxy mainpackage main\n\nimport (\n\t\"example-apps\/proxy\/handlers\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n)\n\nfunc launchHandler(port int, downloadHandler, digHandler, digUDPHandler, timedDigHandler, pingHandler, proxyHandler, statsHandler, uploadHandler, echoSourceIPHandler http.Handler) {\n\tmux := http.NewServeMux()\n\tmux.Handle(\"\/download\/\", downloadHandler)\n\tmux.Handle(\"\/dig\/\", digHandler)\n\tmux.Handle(\"\/digudp\/\", digUDPHandler)\n\tmux.Handle(\"\/timed_dig\/\", timedDigHandler)\n\tmux.Handle(\"\/ping\/\", pingHandler)\n\tmux.Handle(\"\/proxy\/\", proxyHandler)\n\tmux.Handle(\"\/stats\", statsHandler)\n\tmux.Handle(\"\/upload\", uploadHandler)\n\tmux.Handle(\"\/echosourceip\", echoSourceIPHandler)\n\tmux.Handle(\"\/\", &handlers.InfoHandler{\n\t\tPort: port,\n\t})\n\thttp.ListenAndServe(fmt.Sprintf(\"0.0.0.0:%d\", port), mux)\n}\n\nfunc main() {\n\tsystemPortString := os.Getenv(\"PORT\")\n\tsystemPort, err := strconv.Atoi(systemPortString)\n\tif err != nil {\n\t\tlog.Fatal(\"invalid required env var PORT\")\n\t}\n\n\tstats := &handlers.Stats{\n\t\tLatency: []float64{},\n\t}\n\tdownloadHandler := &handlers.DownloadHandler{}\n\tpingHandler := &handlers.PingHandler{}\n\tdigHandler := &handlers.DigHandler{}\n\tdigUDPHandler := &handlers.DigUDPHandler{}\n\ttimedDigHandler := &handlers.TimedDigHandler{}\n\tproxyHandler := &handlers.ProxyHandler{\n\t\tStats: stats,\n\t}\n\tstatsHandler := &handlers.StatsHandler{\n\t\tStats: stats,\n\t}\n\tuploadHandler := &handlers.UploadHandler{}\n\n\techoSourceIPHandler := &handlers.EchoSourceIPHandler{}\n\n\tlaunchHandler(systemPort, downloadHandler, digHandler, digUDPHandler, timedDigHandler, pingHandler, proxyHandler, statsHandler, uploadHandler, echoSourceIPHandler)\n}\n<|endoftext|>"} {"text":"package terraform_vix\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\n\t\"github.com\/c4milo\/govix\"\n\t\"github.com\/c4milo\/terraform_vix\/helper\"\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/hashicorp\/terraform\/flatmap\"\n\t\"github.com\/hashicorp\/terraform\/helper\/config\"\n\t\"github.com\/hashicorp\/terraform\/helper\/diff\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nfunc resource_vix_vm_validation() *config.Validator {\n\treturn &config.Validator{\n\t\tRequired: []string{\n\t\t\t\"image.*\",\n\t\t\t\"image.*.url\",\n\t\t\t\"image.*.checksum\",\n\t\t\t\"image.*.checksum_type\",\n\t\t},\n\t\tOptional: []string{\n\t\t\t\"description\",\n\t\t\t\"image.*.password\",\n\t\t\t\"cpus\",\n\t\t\t\"memory\",\n\t\t\t\"hardware_version\",\n\t\t\t\"network_driver\",\n\t\t\t\"networks.*\",\n\t\t\t\"sharedfolders\",\n\t\t},\n\t}\n}\n\nfunc resource_vix_vm_create(\n\ts *terraform.ResourceState,\n\td *terraform.ResourceDiff,\n\tmeta interface{}) (*terraform.ResourceState, error) {\n\t\/\/ Merge the diff into the state so that we have all the attributes\n\t\/\/ properly.\n\trs := s.MergeDiff(d)\n\n\tname := \"coreos\"\n\tdescription := rs.Attributes[\"description\"]\n\tcpus, err := strconv.ParseUint(rs.Attributes[\"cpus\"], 0, 8)\n\tmemory := rs.Attributes[\"memory\"]\n\thwversion, err := strconv.ParseUint(rs.Attributes[\"hardware_version\"], 0, 8)\n\tnetdrv := rs.Attributes[\"network_driver\"]\n\tsharedfolders, err := strconv.ParseBool(rs.Attributes[\"sharedfolders\"])\n\tvar networks []string\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif raw := flatmap.Expand(rs.Attributes, \"networks\"); raw != nil {\n\t\tif nets, ok := raw.([]interface{}); ok {\n\t\t\tfor _, net := range nets {\n\t\t\t\tstr, ok := net.(string)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tnetworks = append(networks, str)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ This is nasty but there doesn't seem to be a cleaner way to extract stuff\n\t\/\/ from the TF configuration\n\timage := flatmap.Expand(rs.Attributes, \"image\").([]interface{})[0].(map[string]interface{})\n\n\tlog.Printf(\"[DEBUG] networks => %v\", networks)\n\n\tif len(networks) == 0 {\n\t\tnetworks = append(networks, \"bridged\")\n\t}\n\n\tlog.Printf(\"[DEBUG] name => %s\", name)\n\tlog.Printf(\"[DEBUG] description => %s\", description)\n\tlog.Printf(\"[DEBUG] image => %v\", image)\n\tlog.Printf(\"[DEBUG] cpus => %d\", cpus)\n\tlog.Printf(\"[DEBUG] memory => %s\", memory)\n\tlog.Printf(\"[DEBUG] hwversion => %d\", hwversion)\n\tlog.Printf(\"[DEBUG] netdrv => %s\", netdrv)\n\tlog.Printf(\"[DEBUG] sharedfolders => %t\", sharedfolders)\n\n\tusr, err := user.Current()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ FIXME(c4milo): There is an issue here whenever count is greater than 1\n\t\/\/ please see: https:\/\/github.com\/hashicorp\/terraform\/issues\/141\n\tvmPath := filepath.Join(usr.HomeDir, fmt.Sprintf(\".terraform\/vix\/vms\/%s\", name))\n\timagePath := filepath.Join(usr.HomeDir, fmt.Sprintf(\".terraform\/vix\/images\"))\n\n\timageConfig := helper.Image{\n\t\tURL: image[\"url\"].(string),\n\t\tChecksum: image[\"checksum\"].(string),\n\t\tChecksumType: image[\"checksum_type\"].(string),\n\t\tDownloadPath: imagePath,\n\t}\n\n\tfile, err := helper.FetchImage(imageConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\terr = helper.UnpackImage(file, vmPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Gets VIX instance\n\tp := meta.(*ResourceProvider)\n\tclient := p.client\n\n\t\/\/ TODO(c4milo): Lookup VMX file in imagePath\n\tlog.Printf(\"[INFO] Opening virtual machine from %s\", imagePath)\n\n\tvm, err := client.OpenVm(imagePath, image[\"password\"].(string))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer client.Disconnect()\n\n\tmemoryInMb, err := humanize.ParseBytes(memory)\n\tif err != nil {\n\t\tlog.Printf(\"[WARN] Unable to set memory size, defaulting to 1g: %s\", err)\n\t\tmemoryInMb = 1024\n\t} else {\n\t\tmemoryInMb \/= 1024\n\t}\n\n\tlog.Printf(\"[DEBUG] Setting memory size to %d megabytes\", memoryInMb)\n\tvm.SetMemorySize(uint(memoryInMb))\n\n\tlog.Printf(\"[DEBUG] Setting vcpus to %d\", cpus)\n\tvm.SetNumberVcpus(uint8(cpus))\n\n\tfor _, netType := range networks {\n\t\tadapter := &vix.NetworkAdapter{\n\t\t\tVSwitch: vix.VSwitch{},\n\t\t\tStartConnected: true,\n\t\t}\n\n\t\tswitch netdrv {\n\t\tcase \"e1000\":\n\t\t\tadapter.Vdevice = vix.NETWORK_DEVICE_E1000\n\t\tcase \"vmxnet3\":\n\t\t\tadapter.Vdevice = vix.NETWORK_DEVICE_VMXNET3\n\t\tdefault:\n\t\t\tadapter.Vdevice = vix.NETWORK_DEVICE_E1000\n\t\t}\n\n\t\tswitch netType {\n\t\tcase \"hostonly\":\n\t\t\tadapter.ConnType = vix.NETWORK_HOSTONLY\n\t\tcase \"bridged\":\n\t\t\tadapter.ConnType = vix.NETWORK_BRIDGED\n\t\tcase \"nat\":\n\t\t\tadapter.ConnType = vix.NETWORK_NAT\n\t\tdefault:\n\t\t\tadapter.ConnType = vix.NETWORK_CUSTOM\n\n\t\t}\n\n\t\terr = vm.AddNetworkAdapter(adapter)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ TODO(c4milo): Set hardware version\n\n\tlog.Println(\"[INFO] Powering virtual machine on...\")\n\terr = vm.PowerOn(vix.VMPOWEROP_NORMAL)\n\tif err != nil {\n\t\treturn rs, err\n\t}\n\n\t\/\/ rs.ConnInfo[\"type\"] = \"ssh\"\n\t\/\/ rs.ConnInfo[\"host\"] = ?\n\n\treturn rs, nil\n}\n\nfunc resource_vix_vm_update(\n\ts *terraform.ResourceState,\n\td *terraform.ResourceDiff,\n\tmeta interface{}) (*terraform.ResourceState, error) {\n\t\/\/p := meta.(*ResourceProvider)\n\n\treturn nil, nil\n}\n\nfunc resource_vix_vm_destroy(\n\ts *terraform.ResourceState,\n\tmeta interface{}) error {\n\t\/\/ p := meta.(*ResourceProvider)\n\t\/\/ client := p.client\n\n\treturn nil\n}\n\nfunc resource_vix_vm_diff(\n\ts *terraform.ResourceState,\n\tc *terraform.ResourceConfig,\n\tmeta interface{}) (*terraform.ResourceDiff, error) {\n\n\tb := &diff.ResourceBuilder{\n\t\t\/\/ We have to choose whether a change in an attribute triggers a new\n\t\t\/\/ resource creation or updates the existing resource.\n\t\tAttrs: map[string]diff.AttrType{\n\t\t\t\"description\": diff.AttrTypeUpdate,\n\t\t\t\"image\": diff.AttrTypeCreate,\n\t\t\t\"cpus\": diff.AttrTypeUpdate,\n\t\t\t\"memory\": diff.AttrTypeUpdate,\n\t\t\t\"networks\": diff.AttrTypeUpdate,\n\t\t\t\"hardware_version\": diff.AttrTypeUpdate,\n\t\t\t\"network_driver\": diff.AttrTypeUpdate,\n\t\t\t\"sharedfolders\": diff.AttrTypeUpdate,\n\t\t},\n\n\t\tComputedAttrs: []string{\n\t\t\t\"ip_address\",\n\t\t},\n\t}\n\n\treturn b.Diff(s, c)\n}\n\nfunc resource_vix_vm_refresh(\n\ts *terraform.ResourceState,\n\tmeta interface{}) (*terraform.ResourceState, error) {\n\n\treturn nil, nil\n}\n\nfunc resource_vix_vm_update_state(\n\ts *terraform.ResourceState,\n\tvm *vix.VM) (*terraform.ResourceState, error) {\n\n\treturn nil, nil\n}\nUses new API for fetching imagespackage terraform_vix\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\n\t\"github.com\/c4milo\/govix\"\n\t\"github.com\/c4milo\/terraform_vix\/helper\"\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/hashicorp\/terraform\/flatmap\"\n\t\"github.com\/hashicorp\/terraform\/helper\/config\"\n\t\"github.com\/hashicorp\/terraform\/helper\/diff\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nfunc resource_vix_vm_validation() *config.Validator {\n\treturn &config.Validator{\n\t\tRequired: []string{\n\t\t\t\"image.*\",\n\t\t\t\"image.*.url\",\n\t\t\t\"image.*.checksum\",\n\t\t\t\"image.*.checksum_type\",\n\t\t},\n\t\tOptional: []string{\n\t\t\t\"description\",\n\t\t\t\"image.*.password\",\n\t\t\t\"cpus\",\n\t\t\t\"memory\",\n\t\t\t\"hardware_version\",\n\t\t\t\"network_driver\",\n\t\t\t\"networks.*\",\n\t\t\t\"sharedfolders\",\n\t\t},\n\t}\n}\n\nfunc resource_vix_vm_create(\n\ts *terraform.ResourceState,\n\td *terraform.ResourceDiff,\n\tmeta interface{}) (*terraform.ResourceState, error) {\n\t\/\/ Merge the diff into the state so that we have all the attributes\n\t\/\/ properly.\n\trs := s.MergeDiff(d)\n\n\tname := \"coreos\"\n\tdescription := rs.Attributes[\"description\"]\n\tcpus, err := strconv.ParseUint(rs.Attributes[\"cpus\"], 0, 8)\n\tmemory := rs.Attributes[\"memory\"]\n\thwversion, err := strconv.ParseUint(rs.Attributes[\"hardware_version\"], 0, 8)\n\tnetdrv := rs.Attributes[\"network_driver\"]\n\tsharedfolders, err := strconv.ParseBool(rs.Attributes[\"sharedfolders\"])\n\tvar networks []string\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif raw := flatmap.Expand(rs.Attributes, \"networks\"); raw != nil {\n\t\tif nets, ok := raw.([]interface{}); ok {\n\t\t\tfor _, net := range nets {\n\t\t\t\tstr, ok := net.(string)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tnetworks = append(networks, str)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ This is nasty but there doesn't seem to be a cleaner way to extract stuff\n\t\/\/ from the TF configuration\n\timage := flatmap.Expand(rs.Attributes, \"image\").([]interface{})[0].(map[string]interface{})\n\n\tlog.Printf(\"[DEBUG] networks => %v\", networks)\n\n\tif len(networks) == 0 {\n\t\tnetworks = append(networks, \"bridged\")\n\t}\n\n\tlog.Printf(\"[DEBUG] name => %s\", name)\n\tlog.Printf(\"[DEBUG] description => %s\", description)\n\tlog.Printf(\"[DEBUG] image => %v\", image)\n\tlog.Printf(\"[DEBUG] cpus => %d\", cpus)\n\tlog.Printf(\"[DEBUG] memory => %s\", memory)\n\tlog.Printf(\"[DEBUG] hwversion => %d\", hwversion)\n\tlog.Printf(\"[DEBUG] netdrv => %s\", netdrv)\n\tlog.Printf(\"[DEBUG] sharedfolders => %t\", sharedfolders)\n\n\tusr, err := user.Current()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ FIXME(c4milo): There is an issue here whenever count is greater than 1\n\t\/\/ please see: https:\/\/github.com\/hashicorp\/terraform\/issues\/141\n\tvmPath := filepath.Join(usr.HomeDir, fmt.Sprintf(\".terraform\/vix\/vms\/%s\", name))\n\timagePath := filepath.Join(usr.HomeDir, fmt.Sprintf(\".terraform\/vix\/images\"))\n\n\timageConfig := helper.FetchConfig{\n\t\tURL: image[\"url\"].(string),\n\t\tChecksum: image[\"checksum\"].(string),\n\t\tChecksumType: image[\"checksum_type\"].(string),\n\t\tDownloadPath: imagePath,\n\t}\n\n\tfile, err := helper.FetchFile(imageConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\terr = helper.UnpackFile(file, vmPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Gets VIX instance\n\tp := meta.(*ResourceProvider)\n\tclient := p.client\n\n\t\/\/ TODO(c4milo): Lookup VMX file in imagePath\n\tlog.Printf(\"[INFO] Opening virtual machine from %s\", imagePath)\n\n\tvm, err := client.OpenVm(imagePath, image[\"password\"].(string))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer client.Disconnect()\n\n\tmemoryInMb, err := humanize.ParseBytes(memory)\n\tif err != nil {\n\t\tlog.Printf(\"[WARN] Unable to set memory size, defaulting to 1g: %s\", err)\n\t\tmemoryInMb = 1024\n\t} else {\n\t\tmemoryInMb \/= 1024\n\t}\n\n\tlog.Printf(\"[DEBUG] Setting memory size to %d megabytes\", memoryInMb)\n\tvm.SetMemorySize(uint(memoryInMb))\n\n\tlog.Printf(\"[DEBUG] Setting vcpus to %d\", cpus)\n\tvm.SetNumberVcpus(uint8(cpus))\n\n\tfor _, netType := range networks {\n\t\tadapter := &vix.NetworkAdapter{\n\t\t\tVSwitch: vix.VSwitch{},\n\t\t\tStartConnected: true,\n\t\t}\n\n\t\tswitch netdrv {\n\t\tcase \"e1000\":\n\t\t\tadapter.Vdevice = vix.NETWORK_DEVICE_E1000\n\t\tcase \"vmxnet3\":\n\t\t\tadapter.Vdevice = vix.NETWORK_DEVICE_VMXNET3\n\t\tdefault:\n\t\t\tadapter.Vdevice = vix.NETWORK_DEVICE_E1000\n\t\t}\n\n\t\tswitch netType {\n\t\tcase \"hostonly\":\n\t\t\tadapter.ConnType = vix.NETWORK_HOSTONLY\n\t\tcase \"bridged\":\n\t\t\tadapter.ConnType = vix.NETWORK_BRIDGED\n\t\tcase \"nat\":\n\t\t\tadapter.ConnType = vix.NETWORK_NAT\n\t\tdefault:\n\t\t\tadapter.ConnType = vix.NETWORK_CUSTOM\n\n\t\t}\n\n\t\terr = vm.AddNetworkAdapter(adapter)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ TODO(c4milo): Set hardware version\n\n\tlog.Println(\"[INFO] Powering virtual machine on...\")\n\terr = vm.PowerOn(vix.VMPOWEROP_NORMAL)\n\tif err != nil {\n\t\treturn rs, err\n\t}\n\n\t\/\/ rs.ConnInfo[\"type\"] = \"ssh\"\n\t\/\/ rs.ConnInfo[\"host\"] = ?\n\n\treturn rs, nil\n}\n\nfunc resource_vix_vm_update(\n\ts *terraform.ResourceState,\n\td *terraform.ResourceDiff,\n\tmeta interface{}) (*terraform.ResourceState, error) {\n\t\/\/p := meta.(*ResourceProvider)\n\n\treturn nil, nil\n}\n\nfunc resource_vix_vm_destroy(\n\ts *terraform.ResourceState,\n\tmeta interface{}) error {\n\t\/\/ p := meta.(*ResourceProvider)\n\t\/\/ client := p.client\n\n\treturn nil\n}\n\nfunc resource_vix_vm_diff(\n\ts *terraform.ResourceState,\n\tc *terraform.ResourceConfig,\n\tmeta interface{}) (*terraform.ResourceDiff, error) {\n\n\tb := &diff.ResourceBuilder{\n\t\t\/\/ We have to choose whether a change in an attribute triggers a new\n\t\t\/\/ resource creation or updates the existing resource.\n\t\tAttrs: map[string]diff.AttrType{\n\t\t\t\"description\": diff.AttrTypeUpdate,\n\t\t\t\"image\": diff.AttrTypeCreate,\n\t\t\t\"cpus\": diff.AttrTypeUpdate,\n\t\t\t\"memory\": diff.AttrTypeUpdate,\n\t\t\t\"networks\": diff.AttrTypeUpdate,\n\t\t\t\"hardware_version\": diff.AttrTypeUpdate,\n\t\t\t\"network_driver\": diff.AttrTypeUpdate,\n\t\t\t\"sharedfolders\": diff.AttrTypeUpdate,\n\t\t},\n\n\t\tComputedAttrs: []string{\n\t\t\t\"ip_address\",\n\t\t},\n\t}\n\n\treturn b.Diff(s, c)\n}\n\nfunc resource_vix_vm_refresh(\n\ts *terraform.ResourceState,\n\tmeta interface{}) (*terraform.ResourceState, error) {\n\n\treturn nil, nil\n}\n\nfunc resource_vix_vm_update_state(\n\ts *terraform.ResourceState,\n\tvm *vix.VM) (*terraform.ResourceState, error) {\n\n\treturn nil, nil\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\n\t\"github.com\/MJKWoolnough\/gopherjs\/overlay\"\n\t\"github.com\/MJKWoolnough\/gopherjs\/tabs\"\n\t\"github.com\/MJKWoolnough\/gopherjs\/xjs\"\n\t\"honnef.co\/go\/js\/dom\"\n)\n\nfunc maps(c dom.Element) {\n\txjs.RemoveChildren(c)\n\tmapsDiv := xjs.CreateElement(\"div\")\n\tdefer c.AppendChild(mapsDiv)\n\tlist, err := MapList()\n\tif err != nil {\n\t\txjs.SetInnerText(mapsDiv, err.Error())\n\t\treturn\n\t}\n\n\tnewButton := xjs.CreateElement(\"input\").(*dom.HTMLInputElement)\n\tnewButton.Type = \"button\"\n\tnewButton.Value = \"New Map\"\n\tnewButton.AddEventListener(\"click\", false, newMap(c))\n\n\tmapsDiv.AppendChild(newButton)\n\n\tfor _, m := range list {\n\t\tsd := xjs.CreateElement(\"div\")\n\t\txjs.SetInnerText(sd, m.Name)\n\t\tsd.AddEventListener(\"click\", false, viewMap(m))\n\t\tmapsDiv.AppendChild(sd)\n\t}\n\tc.AppendChild(mapsDiv)\n}\n\nfunc newMap(c dom.Element) func(dom.Event) {\n\treturn func(dom.Event) {\n\t\tf := xjs.CreateElement(\"div\")\n\t\to := overlay.New(f)\n\t\tf.AppendChild(xjs.SetInnerText(xjs.CreateElement(\"h1\"), \"New Map\"))\n\t\tf.AppendChild(tabs.MakeTabs([]tabs.Tab{\n\t\t\t{\"Create\", createMap(o)},\n\t\t\t{\"Upload\/Download\", uploadMap(o)},\n\t\t\t{\"Generate\", generate},\n\t\t}))\n\t\to.OnClose(func() {\n\t\t\tmaps(c)\n\t\t})\n\t\tc.AppendChild(o)\n\t}\n}\n\nvar gameModes = [...]string{\"Survival\", \"Creative\", \"Adventure\", \"Hardcore\", \"Spectator\"}\n\nfunc createMap(o overlay.Overlay) func(dom.Element) {\n\tc := xjs.CreateElement(\"div\")\n\tnameLabel := xjs.CreateElement(\"label\").(*dom.HTMLLabelElement)\n\tnameLabel.For = \"name\"\n\txjs.SetInnerText(nameLabel, \"Level Name\")\n\n\tname := xjs.CreateElement(\"input\").(*dom.HTMLInputElement)\n\tname.Type = \"text\"\n\tname.SetID(\"name\")\n\n\tgameModeLabel := xjs.CreateElement(\"label\").(*dom.HTMLLabelElement)\n\tgameModeLabel.For = \"gameMode\"\n\txjs.SetInnerText(gameModeLabel, \"Game Mode\")\n\n\tgameMode := xjs.CreateElement(\"select\").(*dom.HTMLSelectElement)\n\tfor k, v := range gameModes {\n\t\to := xjs.CreateElement(\"option\").(*dom.HTMLOptionElement)\n\t\to.Value = strconv.Itoa(k)\n\t\txjs.SetInnerText(o, v)\n\t\tgameMode.AppendChild(o)\n\t}\n\n\tseedLabel := xjs.CreateElement(\"label\").(*dom.HTMLLabelElement)\n\tseedLabel.For = \"seed\"\n\txjs.SetInnerText(seedLabel, \"Level Seed\")\n\n\tseed := xjs.CreateElement(\"input\").(*dom.HTMLInputElement)\n\tseed.Type = \"text\"\n\tseed.SetID(\"seed\")\n\tseed.Value = \"\"\n\n\tstructuresLabel := xjs.CreateElement(\"label\").(*dom.HTMLLabelElement)\n\tstructuresLabel.For = \"structures\"\n\txjs.SetInnerText(structuresLabel, \"Generate Structures\")\n\n\tstructures := xjs.CreateElement(\"input\").(*dom.HTMLInputElement)\n\tstructures.Type = \"checkbox\"\n\tstructures.Checked = true\n\tstructures.SetID(\"structures\")\n\n\tcheatsLabel := xjs.CreateElement(\"label\").(*dom.HTMLLabelElement)\n\tcheatsLabel.For = \"cheats\"\n\txjs.SetInnerText(cheatsLabel, \"Allow Cheats\")\n\n\tcheats := xjs.CreateElement(\"input\").(*dom.HTMLInputElement)\n\tcheats.Type = \"checkbox\"\n\tcheats.Checked = false\n\tcheats.SetID(\"cheats\")\n\n\tc.AppendChild(nameLabel)\n\tc.AppendChild(name)\n\tc.AppendChild(xjs.CreateElement(\"br\"))\n\tc.AppendChild(gameModeLabel)\n\tc.AppendChild(gameMode)\n\tc.AppendChild(xjs.CreateElement(\"br\"))\n\tc.AppendChild(seedLabel)\n\tc.AppendChild(seed)\n\tc.AppendChild(xjs.CreateElement(\"br\"))\n\tc.AppendChild(structuresLabel)\n\tc.AppendChild(structures)\n\tc.AppendChild(xjs.CreateElement(\"br\"))\n\tc.AppendChild(cheatsLabel)\n\tc.AppendChild(cheats)\n\tc.AppendChild(xjs.CreateElement(\"br\"))\n\tc.AppendChild(xjs.CreateElement(\"br\"))\n\n\tdataParser := func(mode int) func() (DefaultMap, error) {\n\t\treturn func() (DefaultMap, error) {\n\t\t\tdata := DefaultMap{\n\t\t\t\tMode: mode,\n\t\t\t}\n\t\t\tvar err error\n\t\t\tdata.Name = name.Value\n\t\t\tsi := gameMode.SelectedIndex\n\t\t\tif si < 0 || si >= len(gameModes) {\n\t\t\t\treturn data, ErrInvalidGameMode\n\t\t\t}\n\t\t\tif seed.Value == \"\" {\n\t\t\t\tseed.Value = \"0\"\n\t\t\t}\n\t\t\tdata.Seed, err = strconv.ParseInt(seed.Value, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn data, err\n\t\t\t}\n\t\t\tdata.Structures = structures.Checked\n\t\t\tdata.Cheats = cheats.Checked\n\t\t\treturn data, nil\n\t\t}\n\t}\n\n\tc.AppendChild(tabs.MakeTabs([]tabs.Tab{\n\t\t{\"Default\", createMapMode(0, o, dataParser(0))},\n\t\t{\"Super Flat\", createSuperFlatMap(o, dataParser(1))},\n\t\t{\"Large Biomes\", createMapMode(2, o, dataParser(2))},\n\t\t{\"Amplified\", createMapMode(3, o, dataParser(3))},\n\t\t{\"Customised\", createCustomisedMap(o, dataParser(4))},\n\t}))\n\treturn func(d dom.Element) {\n\t\td.AppendChild(c)\n\t}\n}\n\nvar worldTypes = [...]string{\n\t\"The standard minecraft map generation.\",\n\t\"A simple generator allowing customised levels of blocks.\",\n\t\"The standard minecraft map generation, but tweaked to allow for much larger biomes.\",\n\t\"The standard minecraft map generation, but tweaked to stretch the land upwards.\",\n\t\"A completely customiseable generator.\",\n}\n\nfunc createMapMode(mode int, o overlay.Overlay, dataParser func() (DefaultMap, error)) func(dom.Element) {\n\tsubmit := xjs.CreateElement(\"input\").(*dom.HTMLInputElement)\n\tsubmit.Type = \"button\"\n\tsubmit.Value = \"Create Map\"\n\tsubmit.AddEventListener(\"click\", false, func(dom.Event) {\n\t\tdata, err := dataParser()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tgo func() {\n\t\t\terr = CreateDefaultMap(data)\n\t\t\tif err != nil {\n\t\t\t\tdom.GetWindow().Alert(err.Error())\n\t\t\t}\n\t\t\to.Close()\n\t\t}()\n\t})\n\treturn func(c dom.Element) {\n\t\td := xjs.CreateElement(\"div\")\n\t\txjs.SetPreText(d, worldTypes[mode])\n\t\tc.AppendChild(d)\n\t\tc.AppendChild(xjs.CreateElement(\"br\"))\n\t\tc.AppendChild(submit)\n\t}\n}\n\nfunc createSuperFlatMap(o overlay.Overlay, dataParser func() (DefaultMap, error)) func(dom.Element) {\n\td := xjs.CreateElement(\"div\")\n\treturn func(c dom.Element) {\n\t\tc.AppendChild(d)\n\t}\n}\n\nfunc createCustomisedMap(o overlay.Overlay, dataParser func() (DefaultMap, error)) func(dom.Element) {\n\td := xjs.CreateElement(\"div\")\n\treturn func(c dom.Element) {\n\t\tc.AppendChild(d)\n\t}\n\t\/\/ Sea Level - 0-255\n\t\/\/ Caves, Strongholds, Villages, Mineshafts, Temples, Ocean Monuments, Ravines\n\t\/\/ Dungeons + Count 1-100\n\t\/\/ Water Lakes + Rarity 1-100\n\t\/\/ Lava Lakes + Rarity 1-100\n\t\/\/ Lava Oceans\n\t\/\/ Biome - All\/Choose\n\t\/\/ Biome Size 1-8\n\t\/\/ River Size 1-5\n\t\/\/ Ores -> Dirt\/Gravel\/Granite\/Diorite\/Andesite\/Coal Ore\/Iron Ore\/Gold Ore\/Redstone Ore\/Diamond Ore\/Lapis Lazuli Ore ->\n\t\/\/ Spawn Size - 1-50\n\t\/\/ Spawn Tries - 0-40\n\t\/\/ Min-Height - 0-255\n\t\/\/ Max-Height - 0-255\n\t\/\/ Advanced ->\n\t\/\/ Main Noise Scale X - 1-5000\n\t\/\/ Main Noise Scale Y - 1-5000\n\t\/\/ Main Noise Scale Z - 1-5000\n\t\/\/ Depth Noise Scale X - 1-2000\n\t\/\/ Depth Noise Scale Y - 1-2000\n\t\/\/ Depth Noise Scale Z - 1-2000\n\t\/\/ Depth Base Size - 1-25\n\t\/\/ Coordinate Scale - 1-6000\n\t\/\/ Height Scale - 1-6000\n\t\/\/ Height Stretch - 0.01-50\n\t\/\/ Upper Limit Scale - 1-5000\n\t\/\/ Lower Limit Scale - 1-5000\n\t\/\/ Biome Depth Weight - 1-20\n\t\/\/ Biome Depth Offset - 1-20\n\t\/\/ Biome Scale Weight - 1-20\n\t\/\/ Biome Scale Offset - 1-20\n\n}\n\nfunc uploadMap(o overlay.Overlay) func(dom.Element) {\n\treturn func(c dom.Element) {\n\t}\n}\n\nfunc viewMap(m Map) func(dom.Event) {\n\treturn func(dom.Event) {\n\t\tservers, err := ServerList()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\td := xjs.CreateElement(\"div\")\n\t\tod := overlay.New(d)\n\t\td.AppendChild(xjs.SetInnerText(xjs.CreateElement(\"h1\"), \"Map Details\"))\n\n\t\tnameLabel := xjs.CreateElement(\"label\").(*dom.HTMLLabelElement)\n\t\tnameLabel.For = \"name\"\n\t\txjs.SetInnerText(nameLabel, \"Name\")\n\t\tname := xjs.CreateElement(\"input\").(*dom.HTMLInputElement)\n\t\txjs.SetInnerText(nameLabel, \"Name\")\n\t\tname.SetID(\"name\")\n\t\tname.Value = m.Name\n\t\tname.Type = \"text\"\n\n\t\tserverLabel := xjs.CreateElement(\"label\").(*dom.HTMLLabelElement)\n\t\tserverLabel.For = \"server\"\n\t\txjs.SetInnerText(serverLabel, \"Server\")\n\t\tserverEditable := true\n\t\tvar (\n\t\t\tselServer Server\n\t\t\tserver dom.Element\n\t\t)\n\t\tif m.Server != -1 {\n\t\t\tfor _, s := range servers {\n\t\t\t\tif s.ID == m.Server {\n\t\t\t\t\tselServer = s\n\t\t\t\t\tserverEditable = !s.IsRunning()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif serverEditable {\n\t\t\tsel := xjs.CreateElement(\"select\").(*dom.HTMLSelectElement)\n\t\t\tsel.SetID(\"server\")\n\t\t\tfor _, s := range servers {\n\t\t\t\tif s.Map != -1 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\to := xjs.CreateElement(\"option\").(*dom.HTMLOptionElement)\n\t\t\t\to.Value = strconv.Itoa(s.ID)\n\t\t\t\txjs.SetInnerText(o, s.Name)\n\t\t\t\tif s.ID == m.Server {\n\t\t\t\t\to.Selected = true\n\t\t\t\t}\n\t\t\t\tsel.AppendChild(o)\n\t\t\t}\n\t\t\tserver = sel\n\t\t} else {\n\t\t\tserver.AppendChild(xjs.SetInnerText(xjs.CreateElement(\"div\"), selServer.Name))\n\t\t}\n\n\t\td.AppendChild(nameLabel)\n\t\td.AppendChild(name)\n\t\td.AppendChild(xjs.CreateElement(\"br\"))\n\t\td.AppendChild(serverLabel)\n\t\td.AppendChild(server)\n\n\t\tdom.GetWindow().Document().DocumentElement().AppendChild(od)\n\t}\n}\n\n\/\/ Errors\nvar ErrInvalidGameMode = errors.New(\"invalid game mode\")\nAdded server set buttonpackage main\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\n\t\"github.com\/MJKWoolnough\/gopherjs\/overlay\"\n\t\"github.com\/MJKWoolnough\/gopherjs\/tabs\"\n\t\"github.com\/MJKWoolnough\/gopherjs\/xjs\"\n\t\"honnef.co\/go\/js\/dom\"\n)\n\nfunc maps(c dom.Element) {\n\txjs.RemoveChildren(c)\n\tmapsDiv := xjs.CreateElement(\"div\")\n\tdefer c.AppendChild(mapsDiv)\n\tlist, err := MapList()\n\tif err != nil {\n\t\txjs.SetInnerText(mapsDiv, err.Error())\n\t\treturn\n\t}\n\n\tnewButton := xjs.CreateElement(\"input\").(*dom.HTMLInputElement)\n\tnewButton.Type = \"button\"\n\tnewButton.Value = \"New Map\"\n\tnewButton.AddEventListener(\"click\", false, newMap(c))\n\n\tmapsDiv.AppendChild(newButton)\n\n\tfor _, m := range list {\n\t\tsd := xjs.CreateElement(\"div\")\n\t\txjs.SetInnerText(sd, m.Name)\n\t\tsd.AddEventListener(\"click\", false, viewMap(m))\n\t\tmapsDiv.AppendChild(sd)\n\t}\n\tc.AppendChild(mapsDiv)\n}\n\nfunc newMap(c dom.Element) func(dom.Event) {\n\treturn func(dom.Event) {\n\t\tf := xjs.CreateElement(\"div\")\n\t\to := overlay.New(f)\n\t\tf.AppendChild(xjs.SetInnerText(xjs.CreateElement(\"h1\"), \"New Map\"))\n\t\tf.AppendChild(tabs.MakeTabs([]tabs.Tab{\n\t\t\t{\"Create\", createMap(o)},\n\t\t\t{\"Upload\/Download\", uploadMap(o)},\n\t\t\t{\"Generate\", generate},\n\t\t}))\n\t\to.OnClose(func() {\n\t\t\tmaps(c)\n\t\t})\n\t\tc.AppendChild(o)\n\t}\n}\n\nvar gameModes = [...]string{\"Survival\", \"Creative\", \"Adventure\", \"Hardcore\", \"Spectator\"}\n\nfunc createMap(o overlay.Overlay) func(dom.Element) {\n\tc := xjs.CreateElement(\"div\")\n\tnameLabel := xjs.CreateElement(\"label\").(*dom.HTMLLabelElement)\n\tnameLabel.For = \"name\"\n\txjs.SetInnerText(nameLabel, \"Level Name\")\n\n\tname := xjs.CreateElement(\"input\").(*dom.HTMLInputElement)\n\tname.Type = \"text\"\n\tname.SetID(\"name\")\n\n\tgameModeLabel := xjs.CreateElement(\"label\").(*dom.HTMLLabelElement)\n\tgameModeLabel.For = \"gameMode\"\n\txjs.SetInnerText(gameModeLabel, \"Game Mode\")\n\n\tgameMode := xjs.CreateElement(\"select\").(*dom.HTMLSelectElement)\n\tfor k, v := range gameModes {\n\t\to := xjs.CreateElement(\"option\").(*dom.HTMLOptionElement)\n\t\to.Value = strconv.Itoa(k)\n\t\txjs.SetInnerText(o, v)\n\t\tgameMode.AppendChild(o)\n\t}\n\n\tseedLabel := xjs.CreateElement(\"label\").(*dom.HTMLLabelElement)\n\tseedLabel.For = \"seed\"\n\txjs.SetInnerText(seedLabel, \"Level Seed\")\n\n\tseed := xjs.CreateElement(\"input\").(*dom.HTMLInputElement)\n\tseed.Type = \"text\"\n\tseed.SetID(\"seed\")\n\tseed.Value = \"\"\n\n\tstructuresLabel := xjs.CreateElement(\"label\").(*dom.HTMLLabelElement)\n\tstructuresLabel.For = \"structures\"\n\txjs.SetInnerText(structuresLabel, \"Generate Structures\")\n\n\tstructures := xjs.CreateElement(\"input\").(*dom.HTMLInputElement)\n\tstructures.Type = \"checkbox\"\n\tstructures.Checked = true\n\tstructures.SetID(\"structures\")\n\n\tcheatsLabel := xjs.CreateElement(\"label\").(*dom.HTMLLabelElement)\n\tcheatsLabel.For = \"cheats\"\n\txjs.SetInnerText(cheatsLabel, \"Allow Cheats\")\n\n\tcheats := xjs.CreateElement(\"input\").(*dom.HTMLInputElement)\n\tcheats.Type = \"checkbox\"\n\tcheats.Checked = false\n\tcheats.SetID(\"cheats\")\n\n\tc.AppendChild(nameLabel)\n\tc.AppendChild(name)\n\tc.AppendChild(xjs.CreateElement(\"br\"))\n\tc.AppendChild(gameModeLabel)\n\tc.AppendChild(gameMode)\n\tc.AppendChild(xjs.CreateElement(\"br\"))\n\tc.AppendChild(seedLabel)\n\tc.AppendChild(seed)\n\tc.AppendChild(xjs.CreateElement(\"br\"))\n\tc.AppendChild(structuresLabel)\n\tc.AppendChild(structures)\n\tc.AppendChild(xjs.CreateElement(\"br\"))\n\tc.AppendChild(cheatsLabel)\n\tc.AppendChild(cheats)\n\tc.AppendChild(xjs.CreateElement(\"br\"))\n\tc.AppendChild(xjs.CreateElement(\"br\"))\n\n\tdataParser := func(mode int) func() (DefaultMap, error) {\n\t\treturn func() (DefaultMap, error) {\n\t\t\tdata := DefaultMap{\n\t\t\t\tMode: mode,\n\t\t\t}\n\t\t\tvar err error\n\t\t\tdata.Name = name.Value\n\t\t\tsi := gameMode.SelectedIndex\n\t\t\tif si < 0 || si >= len(gameModes) {\n\t\t\t\treturn data, ErrInvalidGameMode\n\t\t\t}\n\t\t\tif seed.Value == \"\" {\n\t\t\t\tseed.Value = \"0\"\n\t\t\t}\n\t\t\tdata.Seed, err = strconv.ParseInt(seed.Value, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn data, err\n\t\t\t}\n\t\t\tdata.Structures = structures.Checked\n\t\t\tdata.Cheats = cheats.Checked\n\t\t\treturn data, nil\n\t\t}\n\t}\n\n\tc.AppendChild(tabs.MakeTabs([]tabs.Tab{\n\t\t{\"Default\", createMapMode(0, o, dataParser(0))},\n\t\t{\"Super Flat\", createSuperFlatMap(o, dataParser(1))},\n\t\t{\"Large Biomes\", createMapMode(2, o, dataParser(2))},\n\t\t{\"Amplified\", createMapMode(3, o, dataParser(3))},\n\t\t{\"Customised\", createCustomisedMap(o, dataParser(4))},\n\t}))\n\treturn func(d dom.Element) {\n\t\td.AppendChild(c)\n\t}\n}\n\nvar worldTypes = [...]string{\n\t\"The standard minecraft map generation.\",\n\t\"A simple generator allowing customised levels of blocks.\",\n\t\"The standard minecraft map generation, but tweaked to allow for much larger biomes.\",\n\t\"The standard minecraft map generation, but tweaked to stretch the land upwards.\",\n\t\"A completely customiseable generator.\",\n}\n\nfunc createMapMode(mode int, o overlay.Overlay, dataParser func() (DefaultMap, error)) func(dom.Element) {\n\tsubmit := xjs.CreateElement(\"input\").(*dom.HTMLInputElement)\n\tsubmit.Type = \"button\"\n\tsubmit.Value = \"Create Map\"\n\tsubmit.AddEventListener(\"click\", false, func(dom.Event) {\n\t\tdata, err := dataParser()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tgo func() {\n\t\t\terr = CreateDefaultMap(data)\n\t\t\tif err != nil {\n\t\t\t\tdom.GetWindow().Alert(err.Error())\n\t\t\t}\n\t\t\to.Close()\n\t\t}()\n\t})\n\treturn func(c dom.Element) {\n\t\td := xjs.CreateElement(\"div\")\n\t\txjs.SetPreText(d, worldTypes[mode])\n\t\tc.AppendChild(d)\n\t\tc.AppendChild(xjs.CreateElement(\"br\"))\n\t\tc.AppendChild(submit)\n\t}\n}\n\nfunc createSuperFlatMap(o overlay.Overlay, dataParser func() (DefaultMap, error)) func(dom.Element) {\n\td := xjs.CreateElement(\"div\")\n\treturn func(c dom.Element) {\n\t\tc.AppendChild(d)\n\t}\n}\n\nfunc createCustomisedMap(o overlay.Overlay, dataParser func() (DefaultMap, error)) func(dom.Element) {\n\td := xjs.CreateElement(\"div\")\n\treturn func(c dom.Element) {\n\t\tc.AppendChild(d)\n\t}\n\t\/\/ Sea Level - 0-255\n\t\/\/ Caves, Strongholds, Villages, Mineshafts, Temples, Ocean Monuments, Ravines\n\t\/\/ Dungeons + Count 1-100\n\t\/\/ Water Lakes + Rarity 1-100\n\t\/\/ Lava Lakes + Rarity 1-100\n\t\/\/ Lava Oceans\n\t\/\/ Biome - All\/Choose\n\t\/\/ Biome Size 1-8\n\t\/\/ River Size 1-5\n\t\/\/ Ores -> Dirt\/Gravel\/Granite\/Diorite\/Andesite\/Coal Ore\/Iron Ore\/Gold Ore\/Redstone Ore\/Diamond Ore\/Lapis Lazuli Ore ->\n\t\/\/ Spawn Size - 1-50\n\t\/\/ Spawn Tries - 0-40\n\t\/\/ Min-Height - 0-255\n\t\/\/ Max-Height - 0-255\n\t\/\/ Advanced ->\n\t\/\/ Main Noise Scale X - 1-5000\n\t\/\/ Main Noise Scale Y - 1-5000\n\t\/\/ Main Noise Scale Z - 1-5000\n\t\/\/ Depth Noise Scale X - 1-2000\n\t\/\/ Depth Noise Scale Y - 1-2000\n\t\/\/ Depth Noise Scale Z - 1-2000\n\t\/\/ Depth Base Size - 1-25\n\t\/\/ Coordinate Scale - 1-6000\n\t\/\/ Height Scale - 1-6000\n\t\/\/ Height Stretch - 0.01-50\n\t\/\/ Upper Limit Scale - 1-5000\n\t\/\/ Lower Limit Scale - 1-5000\n\t\/\/ Biome Depth Weight - 1-20\n\t\/\/ Biome Depth Offset - 1-20\n\t\/\/ Biome Scale Weight - 1-20\n\t\/\/ Biome Scale Offset - 1-20\n\n}\n\nfunc uploadMap(o overlay.Overlay) func(dom.Element) {\n\treturn func(c dom.Element) {\n\t}\n}\n\nfunc viewMap(m Map) func(dom.Event) {\n\treturn func(dom.Event) {\n\t\tservers, err := ServerList()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\td := xjs.CreateElement(\"div\")\n\t\tod := overlay.New(d)\n\t\td.AppendChild(xjs.SetInnerText(xjs.CreateElement(\"h1\"), \"Map Details\"))\n\n\t\tnameLabel := xjs.CreateElement(\"label\").(*dom.HTMLLabelElement)\n\t\tnameLabel.For = \"name\"\n\t\txjs.SetInnerText(nameLabel, \"Name\")\n\t\tname := xjs.CreateElement(\"input\").(*dom.HTMLInputElement)\n\t\txjs.SetInnerText(nameLabel, \"Name\")\n\t\tname.SetID(\"name\")\n\t\tname.Value = m.Name\n\t\tname.Type = \"text\"\n\n\t\tserverLabel := xjs.CreateElement(\"label\").(*dom.HTMLLabelElement)\n\t\tserverLabel.For = \"server\"\n\t\txjs.SetInnerText(serverLabel, \"Server\")\n\t\tserverEditable := true\n\t\tserverSet := xjs.DocumentFragment()\n\t\tvar (\n\t\t\tselServer Server\n\t\t\tserver dom.Element\n\t\t)\n\t\tif m.Server != -1 {\n\t\t\tfor _, s := range servers {\n\t\t\t\tif s.ID == m.Server {\n\t\t\t\t\tselServer = s\n\t\t\t\t\tserverEditable = !s.IsRunning()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif serverEditable {\n\t\t\tsel := xjs.CreateElement(\"select\").(*dom.HTMLSelectElement)\n\t\t\tsel.SetID(\"server\")\n\t\t\tfor _, s := range servers {\n\t\t\t\tif s.Map != -1 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\to := xjs.CreateElement(\"option\").(*dom.HTMLOptionElement)\n\t\t\t\to.Value = strconv.Itoa(s.ID)\n\t\t\t\txjs.SetInnerText(o, s.Name)\n\t\t\t\tif s.ID == m.Server {\n\t\t\t\t\to.Selected = true\n\t\t\t\t}\n\t\t\t\tsel.AppendChild(o)\n\t\t\t}\n\t\t\tif len(servers) > 0 {\n\t\t\t\tc := xjs.CreateElement(\"input\").(*dom.HTMLInputElement)\n\t\t\t\tc.Value = \"Set Server\"\n\t\t\t\tserverSet.AppendChild(c)\n\t\t\t\tc.AddEventListener(\"click\", false, func(dom.Event) {\n\n\t\t\t\t})\n\t\t\t}\n\t\t\tserver = sel\n\t\t} else {\n\t\t\tserver.AppendChild(xjs.SetInnerText(xjs.CreateElement(\"div\"), selServer.Name))\n\t\t}\n\n\t\td.AppendChild(nameLabel)\n\t\td.AppendChild(name)\n\t\td.AppendChild(xjs.CreateElement(\"br\"))\n\t\td.AppendChild(serverLabel)\n\t\td.AppendChild(server)\n\t\td.AppendChild(serverSet)\n\n\t\tdom.GetWindow().Document().DocumentElement().AppendChild(od)\n\t}\n}\n\n\/\/ Errors\nvar ErrInvalidGameMode = errors.New(\"invalid game mode\")\n<|endoftext|>"} {"text":"package rel\n\nimport (\n\t\"testing\"\n)\n\nfunc TestAttributeNotEqSql(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").NotEq(Sql(10)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE \\\"users\\\".\\\"id\\\" != 10\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeNotEqSql sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeNotEqAny(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").NotEqAny(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" != 1 OR \\\"users\\\".\\\"id\\\" != 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeNotEqAny sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeNotEqNil(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").NotEq(nil))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE \\\"users\\\".\\\"id\\\" IS NOT NULL\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeNotEqNil sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeNotEqAll(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").NotEqAll(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" != 1 AND \\\"users\\\".\\\"id\\\" != 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeNotEqAll sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeGt(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").Gt(Sql(10)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE \\\"users\\\".\\\"id\\\" > 10\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeGt sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeGtEq(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").GtEq(Sql(10)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE \\\"users\\\".\\\"id\\\" >= 10\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeGtEq sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeGtEqAny(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").GtEqAny(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" >= 1 OR \\\"users\\\".\\\"id\\\" >= 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeGtEqAny sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeGtEqAll(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").GtEqAll(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" >= 1 AND \\\"users\\\".\\\"id\\\" >= 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeGtEqAll sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeGtAll(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").GtAll(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" > 1 AND \\\"users\\\".\\\"id\\\" > 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeGtAll sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeGtAny(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").GtAny(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" > 1 OR \\\"users\\\".\\\"id\\\" > 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeGtAny sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeLt(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").Lt(Sql(10)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE \\\"users\\\".\\\"id\\\" < 10\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeLt sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeLtEq(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").LtEq(Sql(10)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE \\\"users\\\".\\\"id\\\" <= 10\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeLt sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeLtEqAny(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").LtEqAny(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" <= 1 OR \\\"users\\\".\\\"id\\\" <= 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeLt sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeLtEqAll(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").LtEqAll(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" <= 1 AND \\\"users\\\".\\\"id\\\" <= 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeLt sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeLtAny(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").LtAny(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" < 1 OR \\\"users\\\".\\\"id\\\" < 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeLtAny sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeLtAll(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").LtAll(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" < 1 AND \\\"users\\\".\\\"id\\\" < 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeLtAll sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeCount(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\").Count())\n\tsql := mgr.ToSql()\n\texpected := \"SELECT COUNT(\\\"users\\\".\\\"id\\\") FROM \\\"users\\\"\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeCount sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeEq(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").Eq(Sql(10)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE \\\"users\\\".\\\"id\\\" = 10\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeEq sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\nCreated test for AttributeNode#Eq to nilpackage rel\n\nimport (\n\t\"testing\"\n)\n\nfunc TestAttributeNotEqSql(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").NotEq(Sql(10)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE \\\"users\\\".\\\"id\\\" != 10\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeNotEqSql sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeNotEqAny(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").NotEqAny(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" != 1 OR \\\"users\\\".\\\"id\\\" != 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeNotEqAny sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeNotEqNil(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").NotEq(nil))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE \\\"users\\\".\\\"id\\\" IS NOT NULL\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeNotEqNil sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeNotEqAll(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").NotEqAll(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" != 1 AND \\\"users\\\".\\\"id\\\" != 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeNotEqAll sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeGt(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").Gt(Sql(10)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE \\\"users\\\".\\\"id\\\" > 10\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeGt sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeGtEq(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").GtEq(Sql(10)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE \\\"users\\\".\\\"id\\\" >= 10\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeGtEq sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeGtEqAny(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").GtEqAny(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" >= 1 OR \\\"users\\\".\\\"id\\\" >= 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeGtEqAny sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeGtEqAll(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").GtEqAll(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" >= 1 AND \\\"users\\\".\\\"id\\\" >= 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeGtEqAll sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeGtAll(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").GtAll(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" > 1 AND \\\"users\\\".\\\"id\\\" > 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeGtAll sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeGtAny(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").GtAny(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" > 1 OR \\\"users\\\".\\\"id\\\" > 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeGtAny sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeLt(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").Lt(Sql(10)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE \\\"users\\\".\\\"id\\\" < 10\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeLt sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeLtEq(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").LtEq(Sql(10)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE \\\"users\\\".\\\"id\\\" <= 10\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeLt sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeLtEqAny(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").LtEqAny(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" <= 1 OR \\\"users\\\".\\\"id\\\" <= 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeLt sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeLtEqAll(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").LtEqAll(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" <= 1 AND \\\"users\\\".\\\"id\\\" <= 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeLt sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeLtAny(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").LtAny(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" < 1 OR \\\"users\\\".\\\"id\\\" < 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeLtAny sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeLtAll(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").LtAll(Sql(1), Sql(2)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE (\\\"users\\\".\\\"id\\\" < 1 AND \\\"users\\\".\\\"id\\\" < 2)\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeLtAll sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeCount(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\").Count())\n\tsql := mgr.ToSql()\n\texpected := \"SELECT COUNT(\\\"users\\\".\\\"id\\\") FROM \\\"users\\\"\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeCount sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeEq(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").Eq(Sql(10)))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE \\\"users\\\".\\\"id\\\" = 10\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeEq sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n\nfunc TestAttributeEqNil(t *testing.T) {\n\tusers := NewTable(\"users\")\n\tmgr := users.Select(users.Attr(\"id\"))\n\tmgr.Where(users.Attr(\"id\").Eq(nil))\n\tsql := mgr.ToSql()\n\texpected := \"SELECT \\\"users\\\".\\\"id\\\" FROM \\\"users\\\" WHERE \\\"users\\\".\\\"id\\\" IS NULL\"\n\tif sql != expected {\n\t\tt.Logf(\"TestAttributeEqNil sql: \\n%s != \\n%s\", sql, expected)\n\t\tt.Fail()\n\t}\n}\n<|endoftext|>"} {"text":"package sockjs\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n)\nimport \"testing\"\n\nfunc TestInfoGet(t *testing.T) {\n\trecorder := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"GET\", \"\", nil)\n\tDefaultOptions.info(recorder, request)\n\n\tif recorder.Code != http.StatusOK {\n\t\tt.Errorf(\"Wrong status code, got '%d' expected '%d'\", recorder.Code, http.StatusOK)\n\t}\n\n\tdecoder := json.NewDecoder(recorder.Body)\n\tvar a info\n\tdecoder.Decode(&a)\n\tif !a.Websocket {\n\t\tt.Errorf(\"Websocket field should be set true\")\n\t}\n\tif a.CookieNeeded {\n\t\tt.Errorf(\"CookieNeede should be set to false\")\n\t}\n}\n\nfunc TestInfoOptions(t *testing.T) {\n\trecorder := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"OPTIONS\", \"\", nil)\n\tDefaultOptions.info(recorder, request)\n\tif recorder.Code != http.StatusNoContent {\n\t\tt.Errorf(\"Incorrect status code received, got '%d' expected '%d'\", recorder.Code, http.StatusNoContent)\n\t}\n}\n\nfunc TestInfoUnknown(t *testing.T) {\n\treq, _ := http.NewRequest(\"PUT\", \"\", nil)\n\trec := httptest.NewRecorder()\n\tDefaultOptions.info(rec, req)\n\tif rec.Code != http.StatusNotFound {\n\t\tt.Errorf(\"Incorrec response status, got '%d' expected '%d'\", rec.Code, http.StatusNotFound)\n\t}\n}\n\nfunc TestCookies(t *testing.T) {\n\trec := httptest.NewRecorder()\n\treq, _ := http.NewRequest(\"GET\", \"\", nil)\n\toptionsWithCookies := DefaultOptions\n\toptionsWithCookies.JSessionID = DefaultJSessionID\n\toptionsWithCookies.cookie(rec, req)\n\tif rec.Header().Get(\"set-cookie\") != \"JSESSIONID=dummy; Path=\/\" {\n\t\tt.Errorf(\"Cookie not properly set in response\")\n\t}\n\t\/\/ cookie value set in request\n\treq.AddCookie(&http.Cookie{Name: \"JSESSIONID\", Value: \"some_jsession_id\", Path: \"\/\"})\n\trec = httptest.NewRecorder()\n\toptionsWithCookies.cookie(rec, req)\n\tif rec.Header().Get(\"set-cookie\") != \"JSESSIONID=some_jsession_id; Path=\/\" {\n\t\tt.Errorf(\"Cookie not properly set in response\")\n\t}\n}\nFix typo in options_test.gopackage sockjs\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n)\nimport \"testing\"\n\nfunc TestInfoGet(t *testing.T) {\n\trecorder := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"GET\", \"\", nil)\n\tDefaultOptions.info(recorder, request)\n\n\tif recorder.Code != http.StatusOK {\n\t\tt.Errorf(\"Wrong status code, got '%d' expected '%d'\", recorder.Code, http.StatusOK)\n\t}\n\n\tdecoder := json.NewDecoder(recorder.Body)\n\tvar a info\n\tdecoder.Decode(&a)\n\tif !a.Websocket {\n\t\tt.Errorf(\"Websocket field should be set true\")\n\t}\n\tif a.CookieNeeded {\n\t\tt.Errorf(\"CookieNeeded should be set to false\")\n\t}\n}\n\nfunc TestInfoOptions(t *testing.T) {\n\trecorder := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"OPTIONS\", \"\", nil)\n\tDefaultOptions.info(recorder, request)\n\tif recorder.Code != http.StatusNoContent {\n\t\tt.Errorf(\"Incorrect status code received, got '%d' expected '%d'\", recorder.Code, http.StatusNoContent)\n\t}\n}\n\nfunc TestInfoUnknown(t *testing.T) {\n\treq, _ := http.NewRequest(\"PUT\", \"\", nil)\n\trec := httptest.NewRecorder()\n\tDefaultOptions.info(rec, req)\n\tif rec.Code != http.StatusNotFound {\n\t\tt.Errorf(\"Incorrec response status, got '%d' expected '%d'\", rec.Code, http.StatusNotFound)\n\t}\n}\n\nfunc TestCookies(t *testing.T) {\n\trec := httptest.NewRecorder()\n\treq, _ := http.NewRequest(\"GET\", \"\", nil)\n\toptionsWithCookies := DefaultOptions\n\toptionsWithCookies.JSessionID = DefaultJSessionID\n\toptionsWithCookies.cookie(rec, req)\n\tif rec.Header().Get(\"set-cookie\") != \"JSESSIONID=dummy; Path=\/\" {\n\t\tt.Errorf(\"Cookie not properly set in response\")\n\t}\n\t\/\/ cookie value set in request\n\treq.AddCookie(&http.Cookie{Name: \"JSESSIONID\", Value: \"some_jsession_id\", Path: \"\/\"})\n\trec = httptest.NewRecorder()\n\toptionsWithCookies.cookie(rec, req)\n\tif rec.Header().Get(\"set-cookie\") != \"JSESSIONID=some_jsession_id; Path=\/\" {\n\t\tt.Errorf(\"Cookie not properly set in response\")\n\t}\n}\n<|endoftext|>"} {"text":"package smtpapi\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"unicode\/utf16\"\n)\n\nconst Version = \"0.4.0\"\n\n\/\/ SMTPAPIHeader will be used to set up X-SMTPAPI params\ntype SMTPAPIHeader struct {\n\tTo []string `json:\"to,omitempty\"`\n\tSub map[string][]string `json:\"sub,omitempty\"`\n\tSection map[string]string `json:\"section,omitempty\"`\n\tCategory []string `json:\"category,omitempty\"`\n\tUniqueArgs map[string]string `json:\"unique_args,omitempty\"`\n\tFilters map[string]Filter `json:\"filters,omitempty\"`\n\tASMGroupID int `json:\"asm_group_id,omitempty\"`\n\tSendAt int64 `json:\"send_at,omitempty\"`\n\tSendEachAt []int64 `json:\"send_each_at,omitempty\"`\n\tIpPool string `json:\"ip_pool,omitempty\"`\n}\n\n\/\/ Filter represents an App\/Filter and its settings\ntype Filter struct {\n\tSettings map[string]string `json:\"settings,omitempty\"`\n}\n\n\/\/ NewSMTPAPIHeader creates a new header struct\nfunc NewSMTPAPIHeader() *SMTPAPIHeader {\n\treturn &SMTPAPIHeader{}\n}\n\n\/\/ AddTo appends a single email to the To header\nfunc (h *SMTPAPIHeader) AddTo(email string) {\n\th.To = append(h.To, email)\n}\n\n\/\/ AddTos appends multiple emails to the To header\nfunc (h *SMTPAPIHeader) AddTos(emails []string) {\n\tfor i := 0; i < len(emails); i++ {\n\t\th.AddTo(emails[i])\n\t}\n}\n\n\/\/ SetTos sets the value of the To header\nfunc (h *SMTPAPIHeader) SetTos(emails []string) {\n\th.To = emails\n}\n\n\/\/ AddSubstitution adds a new substitution to a specific key\nfunc (h *SMTPAPIHeader) AddSubstitution(key, sub string) {\n\tif h.Sub == nil {\n\t\th.Sub = make(map[string][]string)\n\t}\n\th.Sub[key] = append(h.Sub[key], sub)\n}\n\n\/\/ AddSubstitutions adds a multiple substitutions to a specific key\nfunc (h *SMTPAPIHeader) AddSubstitutions(key string, subs []string) {\n\tfor i := 0; i < len(subs); i++ {\n\t\th.AddSubstitution(key, subs[i])\n\t}\n}\n\n\/\/ SetSubstitutions sets the value of the substitutions on the Sub header\nfunc (h *SMTPAPIHeader) SetSubstitutions(sub map[string][]string) {\n\th.Sub = sub\n}\n\n\/\/ AddSection sets the value for a specific section\nfunc (h *SMTPAPIHeader) AddSection(section, value string) {\n\tif h.Section == nil {\n\t\th.Section = make(map[string]string)\n\t}\n\th.Section[section] = value\n}\n\n\/\/ SetSections sets the value for the Section header\nfunc (h *SMTPAPIHeader) SetSections(sections map[string]string) {\n\th.Section = sections\n}\n\n\/\/ AddCategory adds a new category to the Category header\nfunc (h *SMTPAPIHeader) AddCategory(category string) {\n\th.Category = append(h.Category, category)\n}\n\n\/\/ AddCategories adds multiple categories to the Category header\nfunc (h *SMTPAPIHeader) AddCategories(categories []string) {\n\tfor i := 0; i < len(categories); i++ {\n\t\th.AddCategory(categories[i])\n\t}\n}\n\n\/\/ SetCategories will set the value of the Categories field\nfunc (h *SMTPAPIHeader) SetCategories(categories []string) {\n\th.Category = categories\n}\n\n\/\/ SetASMGroupID will set the value of the ASMGroupID field\nfunc (h *SMTPAPIHeader) SetASMGroupID(groupID int) {\n\th.ASMGroupID = groupID\n}\n\n\/\/ AddUniqueArg will set the value of a specific argument\nfunc (h *SMTPAPIHeader) AddUniqueArg(arg, value string) {\n\tif h.UniqueArgs == nil {\n\t\th.UniqueArgs = make(map[string]string)\n\t}\n\th.UniqueArgs[arg] = value\n}\n\n\/\/ SetUniqueArgs will set the value of the Unique_args header\nfunc (h *SMTPAPIHeader) SetUniqueArgs(args map[string]string) {\n\th.UniqueArgs = args\n}\n\n\/\/ AddFilter will set the specific setting for a filter\nfunc (h *SMTPAPIHeader) AddFilter(filter, setting, value string) {\n\tif h.Filters == nil {\n\t\th.Filters = make(map[string]Filter)\n\t}\n\tif _, ok := h.Filters[filter]; !ok {\n\t\th.Filters[filter] = Filter{\n\t\t\tSettings: make(map[string]string),\n\t\t}\n\t}\n\th.Filters[filter].Settings[setting] = value\n}\n\n\/\/ SetFilter takes in a Filter struct with predetermined settings and sets it for such Filter key\nfunc (h *SMTPAPIHeader) SetFilter(filter string, value *Filter) {\n\tif h.Filters == nil {\n\t\th.Filters = make(map[string]Filter)\n\t}\n\th.Filters[filter] = *value\n}\n\n\/\/ SetSendAt takes in a timestamp which determines when the email will be sent\nfunc (h *SMTPAPIHeader) SetSendAt(sendAt int64) {\n\th.SendAt = sendAt\n}\n\n\/\/ AddSendEachAt takes in a timestamp and pushes it into a list Must match length of To emails\nfunc (h *SMTPAPIHeader) AddSendEachAt(sendEachAt int64) {\n\th.SendEachAt = append(h.SendEachAt, sendEachAt)\n}\n\n\/\/ SetSendEachAt takes an array of timestamps. Must match length of To emails\nfunc (h *SMTPAPIHeader) SetSendEachAt(sendEachAt []int64) {\n\th.SendEachAt = sendEachAt\n}\n\n\/\/ SetIpPool takes a strings and sets the IpPool field\nfunc (h *SMTPAPIHeader) SetIpPool(ipPool string) {\n\th.IpPool = ipPool\n}\n\n\/\/ Unicode escape\nfunc escapeUnicode(input string) string {\n\t\/\/var buffer bytes.Buffer\n\tbuffer := bytes.NewBufferString(\"\")\n\tfor _, r := range input {\n\t\tif r > 65535 {\n\t\t\t\/\/ surrogate pair\n\t\t\tvar r1, r2 = utf16.EncodeRune(r)\n\t\t\tvar s = fmt.Sprintf(\"\\\\u%x\\\\u%x\", r1, r2)\n\t\t\tbuffer.WriteString(s)\n\t\t} else if r > 127 {\n\t\t\tvar s = fmt.Sprintf(\"\\\\u%04x\", r)\n\t\t\tbuffer.WriteString(s)\n\t\t} else {\n\t\t\tvar s = fmt.Sprintf(\"%c\", r)\n\t\t\tbuffer.WriteString(s)\n\t\t}\n\t}\n\treturn buffer.String()\n}\n\n\/\/ JSONString returns the representation of the Header\nfunc (h *SMTPAPIHeader) JSONString() (string, error) {\n\theaders, e := json.Marshal(h)\n\treturn escapeUnicode(string(headers)), e\n}\nadd loaderpackage smtpapi\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"unicode\/utf16\"\n)\n\nconst Version = \"0.4.0\"\n\n\/\/ SMTPAPIHeader will be used to set up X-SMTPAPI params\ntype SMTPAPIHeader struct {\n\tTo []string `json:\"to,omitempty\"`\n\tSub map[string][]string `json:\"sub,omitempty\"`\n\tSection map[string]string `json:\"section,omitempty\"`\n\tCategory []string `json:\"category,omitempty\"`\n\tUniqueArgs map[string]string `json:\"unique_args,omitempty\"`\n\tFilters map[string]Filter `json:\"filters,omitempty\"`\n\tASMGroupID int `json:\"asm_group_id,omitempty\"`\n\tSendAt int64 `json:\"send_at,omitempty\"`\n\tSendEachAt []int64 `json:\"send_each_at,omitempty\"`\n\tIpPool string `json:\"ip_pool,omitempty\"`\n}\n\n\/\/ Filter represents an App\/Filter and its settings\ntype Filter struct {\n\tSettings map[string]string `json:\"settings,omitempty\"`\n}\n\n\/\/ NewSMTPAPIHeader creates a new header struct\nfunc NewSMTPAPIHeader() *SMTPAPIHeader {\n\treturn &SMTPAPIHeader{}\n}\n\n\/\/ AddTo appends a single email to the To header\nfunc (h *SMTPAPIHeader) AddTo(email string) {\n\th.To = append(h.To, email)\n}\n\n\/\/ AddTos appends multiple emails to the To header\nfunc (h *SMTPAPIHeader) AddTos(emails []string) {\n\tfor i := 0; i < len(emails); i++ {\n\t\th.AddTo(emails[i])\n\t}\n}\n\n\/\/ SetTos sets the value of the To header\nfunc (h *SMTPAPIHeader) SetTos(emails []string) {\n\th.To = emails\n}\n\n\/\/ AddSubstitution adds a new substitution to a specific key\nfunc (h *SMTPAPIHeader) AddSubstitution(key, sub string) {\n\tif h.Sub == nil {\n\t\th.Sub = make(map[string][]string)\n\t}\n\th.Sub[key] = append(h.Sub[key], sub)\n}\n\n\/\/ AddSubstitutions adds a multiple substitutions to a specific key\nfunc (h *SMTPAPIHeader) AddSubstitutions(key string, subs []string) {\n\tfor i := 0; i < len(subs); i++ {\n\t\th.AddSubstitution(key, subs[i])\n\t}\n}\n\n\/\/ SetSubstitutions sets the value of the substitutions on the Sub header\nfunc (h *SMTPAPIHeader) SetSubstitutions(sub map[string][]string) {\n\th.Sub = sub\n}\n\n\/\/ AddSection sets the value for a specific section\nfunc (h *SMTPAPIHeader) AddSection(section, value string) {\n\tif h.Section == nil {\n\t\th.Section = make(map[string]string)\n\t}\n\th.Section[section] = value\n}\n\n\/\/ SetSections sets the value for the Section header\nfunc (h *SMTPAPIHeader) SetSections(sections map[string]string) {\n\th.Section = sections\n}\n\n\/\/ AddCategory adds a new category to the Category header\nfunc (h *SMTPAPIHeader) AddCategory(category string) {\n\th.Category = append(h.Category, category)\n}\n\n\/\/ AddCategories adds multiple categories to the Category header\nfunc (h *SMTPAPIHeader) AddCategories(categories []string) {\n\tfor i := 0; i < len(categories); i++ {\n\t\th.AddCategory(categories[i])\n\t}\n}\n\n\/\/ SetCategories will set the value of the Categories field\nfunc (h *SMTPAPIHeader) SetCategories(categories []string) {\n\th.Category = categories\n}\n\n\/\/ SetASMGroupID will set the value of the ASMGroupID field\nfunc (h *SMTPAPIHeader) SetASMGroupID(groupID int) {\n\th.ASMGroupID = groupID\n}\n\n\/\/ AddUniqueArg will set the value of a specific argument\nfunc (h *SMTPAPIHeader) AddUniqueArg(arg, value string) {\n\tif h.UniqueArgs == nil {\n\t\th.UniqueArgs = make(map[string]string)\n\t}\n\th.UniqueArgs[arg] = value\n}\n\n\/\/ SetUniqueArgs will set the value of the Unique_args header\nfunc (h *SMTPAPIHeader) SetUniqueArgs(args map[string]string) {\n\th.UniqueArgs = args\n}\n\n\/\/ AddFilter will set the specific setting for a filter\nfunc (h *SMTPAPIHeader) AddFilter(filter, setting, value string) {\n\tif h.Filters == nil {\n\t\th.Filters = make(map[string]Filter)\n\t}\n\tif _, ok := h.Filters[filter]; !ok {\n\t\th.Filters[filter] = Filter{\n\t\t\tSettings: make(map[string]string),\n\t\t}\n\t}\n\th.Filters[filter].Settings[setting] = value\n}\n\n\/\/ SetFilter takes in a Filter struct with predetermined settings and sets it for such Filter key\nfunc (h *SMTPAPIHeader) SetFilter(filter string, value *Filter) {\n\tif h.Filters == nil {\n\t\th.Filters = make(map[string]Filter)\n\t}\n\th.Filters[filter] = *value\n}\n\n\/\/ SetSendAt takes in a timestamp which determines when the email will be sent\nfunc (h *SMTPAPIHeader) SetSendAt(sendAt int64) {\n\th.SendAt = sendAt\n}\n\n\/\/ AddSendEachAt takes in a timestamp and pushes it into a list Must match length of To emails\nfunc (h *SMTPAPIHeader) AddSendEachAt(sendEachAt int64) {\n\th.SendEachAt = append(h.SendEachAt, sendEachAt)\n}\n\n\/\/ SetSendEachAt takes an array of timestamps. Must match length of To emails\nfunc (h *SMTPAPIHeader) SetSendEachAt(sendEachAt []int64) {\n\th.SendEachAt = sendEachAt\n}\n\n\/\/ SetIpPool takes a strings and sets the IpPool field\nfunc (h *SMTPAPIHeader) SetIpPool(ipPool string) {\n\th.IpPool = ipPool\n}\n\n\/\/ Unicode escape\nfunc escapeUnicode(input string) string {\n\t\/\/var buffer bytes.Buffer\n\tbuffer := bytes.NewBufferString(\"\")\n\tfor _, r := range input {\n\t\tif r > 65535 {\n\t\t\t\/\/ surrogate pair\n\t\t\tvar r1, r2 = utf16.EncodeRune(r)\n\t\t\tvar s = fmt.Sprintf(\"\\\\u%x\\\\u%x\", r1, r2)\n\t\t\tbuffer.WriteString(s)\n\t\t} else if r > 127 {\n\t\t\tvar s = fmt.Sprintf(\"\\\\u%04x\", r)\n\t\t\tbuffer.WriteString(s)\n\t\t} else {\n\t\t\tvar s = fmt.Sprintf(\"%c\", r)\n\t\t\tbuffer.WriteString(s)\n\t\t}\n\t}\n\treturn buffer.String()\n}\n\n\/\/ JSONString returns the representation of the Header\nfunc (h *SMTPAPIHeader) JSONString() (string, error) {\n\theaders, e := json.Marshal(h)\n\treturn escapeUnicode(string(headers)), e\n}\n\n\/\/ Load allows you to load a pre-formed x-smtpapi header\nfunc (h *SMTPAPIHeader) Load(b []byte) error {\n\treturn json.Unmarshal(b, h)\n}\n<|endoftext|>"} {"text":"package jose\n\nimport (\n\t\"crypto\"\n\t\"crypto\/ecdsa\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/sha256\"\n\t\"crypto\/sha512\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"hash\"\n\t\"math\/big\"\n\t\"strings\"\n)\n\n\/\/ JWS\n\ntype JwsHeader struct {\n\tAlgorithm JoseAlgorithm `json:\"alg,omitempty\"`\n\tNonce string `json:\"nonce,omitempty\"`\n\tKey JsonWebKey `json:\"jwk,omitempty\"`\n}\n\n\/\/ rawJsonWebSignature and JsonWebSignature are the same.\n\/\/ We just use rawJsonWebSignature for the basic parse,\n\/\/ and JsonWebSignature for the full parse\ntype rawJsonWebSignature struct {\n\tsigned bool\n\tHeader JwsHeader `json:\"header,omitempty\"`\n\tProtected JsonBuffer `json:\"protected,omitempty\"`\n\tPayload JsonBuffer `json:\"payload,omitempty\"`\n\tSignature JsonBuffer `json:\"signature,omitempty\"`\n}\n\ntype JsonWebSignature rawJsonWebSignature\n\n\/\/ No need for special MarshalJSON handling; it's OK for\n\/\/ elements to remain in the unprotected header, since they'll\n\/\/ just be overwritten.\n\/\/ func (jwk JsonWebKey) MarshalJSON() ([]byte, error) {}\n\n\/\/ On unmarshal, copy protected header fields to protected\nfunc (jws *JsonWebSignature) UnmarshalJSON(data []byte) error {\n\tvar raw rawJsonWebSignature\n\terr := json.Unmarshal(data, &raw)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Copy over simple fields\n\tjws.Header = raw.Header\n\tjws.Protected = raw.Protected\n\tjws.Payload = raw.Payload\n\tjws.Signature = raw.Signature\n\n\tif len(jws.Protected) > 0 {\n\t\t\/\/ This overwrites fields in jwk.Header if there is a conflict\n\t\terr = json.Unmarshal(jws.Protected, &jws.Header)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Check that required fields are present\n\tif len(jws.Signature) == 0 || len(jws.Payload) == 0 {\n\t\treturn errors.New(\"JWS missing required fields\")\n\t}\n\n\treturn nil\n}\n\nfunc (jws JsonWebSignature) MarshalCompact() ([]byte, error) {\n\tif !jws.signed {\n\t\treturn []byte{}, errors.New(\"Cannot marshal unsigned JWS\")\n\t}\n\n\treturn []byte(B64enc(jws.Protected) + \".\" + B64enc(jws.Payload) + \".\" + B64enc(jws.Signature)), nil\n}\n\nfunc UnmarshalCompact(data []byte) (JsonWebSignature, error) {\n\tjws := JsonWebSignature{}\n\tparts := strings.Split(string(data), \".\")\n\tif len(parts) != 3 {\n\t\treturn jws, errors.New(\"Mal-formed compact JWS\")\n\t}\n\n\t\/\/ Decode simple fields\n\tvar err error\n\tjws.Protected, err = B64dec(parts[0])\n\tif err != nil {\n\t\treturn jws, err\n\t}\n\tjws.Payload, err = B64dec(parts[1])\n\tif err != nil {\n\t\treturn jws, err\n\t}\n\tjws.Signature, err = B64dec(parts[2])\n\tif err != nil {\n\t\treturn jws, err\n\t}\n\n\t\/\/ Populate header from protected\n\terr = json.Unmarshal(jws.Protected, &jws.Header)\n\tif err != nil {\n\t\treturn jws, err\n\t}\n\n\tjws.signed = true\n\treturn jws, nil\n}\n\nfunc prepareInput(jws JsonWebSignature) (crypto.Hash, []byte, error) {\n\tinput := []byte(B64enc(jws.Protected) + \".\" + B64enc(jws.Payload))\n\tzeroh := crypto.Hash(0)\n\tzerob := []byte{}\n\n\t\/\/ TODO: Check for valid algorithm\n\n\t\/\/ Hash the payload\n\thashAlg := string(jws.Header.Algorithm[2:])\n\tvar hashID crypto.Hash\n\tvar hash hash.Hash\n\tswitch hashAlg {\n\tcase \"256\":\n\t\thashID = crypto.SHA256\n\t\thash = sha256.New()\n\tcase \"384\":\n\t\thashID = crypto.SHA384\n\t\thash = sha512.New384()\n\tcase \"512\":\n\t\thashID = crypto.SHA512\n\t\thash = sha512.New()\n\tdefault:\n\t\treturn zeroh, zerob, errors.New(\"Invalid hash length \" + hashAlg)\n\t}\n\thash.Write(input)\n\tinputHash := hash.Sum(nil)\n\n\treturn hashID, inputHash, nil\n}\n\nfunc Sign(alg JoseAlgorithm, privateKey interface{}, payload []byte) (JsonWebSignature, error) {\n\tzero := JsonWebSignature{}\n\n\t\/\/ Create a working JWS\n\tjws := JsonWebSignature{Payload: payload}\n\tjws.Header.Algorithm = alg\n\n\t\/\/ Cast the private key to the appropriate type, and\n\t\/\/ add the corresponding public key to the header\n\tvar rsaPriv *rsa.PrivateKey\n\tvar ecPriv *ecdsa.PrivateKey\n\tswitch privateKey := privateKey.(type) {\n\tcase rsa.PrivateKey:\n\t\trsaPriv = &privateKey\n\t\tjws.Header.Key = JsonWebKey{KeyType: KeyTypeRSA, Rsa: &rsaPriv.PublicKey}\n\tcase ecdsa.PrivateKey:\n\t\tecPriv = &privateKey\n\t\tjws.Header.Key = JsonWebKey{KeyType: KeyTypeEC, Ec: &ecPriv.PublicKey}\n\tdefault:\n\t\treturn zero, errors.New(fmt.Sprintf(\"Unsupported key type for %+v\\n\", privateKey))\n\t}\n\n\t\/\/ Base64-encode the header -> protected\n\t\/\/ NOTE: This implies that unprotected headers are not supported\n\tprotected, err := json.Marshal(jws.Header)\n\tif err != nil {\n\t\treturn zero, err\n\t}\n\tjws.Protected = protected\n\n\t\/\/ Compute the signature input\n\thashID, inputHash, err := prepareInput(jws)\n\tif err != nil {\n\t\treturn zero, err\n\t}\n\n\t\/\/ Sign\n\t\/\/ TODO: Check that key type is compatible\n\tvar sig []byte\n\tswitch jws.Header.Algorithm[:1] {\n\tcase \"R\":\n\t\tif rsaPriv == nil {\n\t\t\treturn zero, errors.New(fmt.Sprintf(\"Algorithm %s requres RSA private key\", jws.Header.Algorithm))\n\t\t}\n\t\tsig, err = rsa.SignPKCS1v15(rand.Reader, rsaPriv, hashID, inputHash)\n\tcase \"P\":\n\t\tif rsaPriv == nil {\n\t\t\treturn zero, errors.New(fmt.Sprintf(\"Algorithm %s requres RSA private key\", jws.Header.Algorithm))\n\t\t}\n\t\tsig, err = rsa.SignPSS(rand.Reader, rsaPriv, hashID, inputHash, &rsa.PSSOptions{})\n\tcase \"E\":\n\t\tif ecPriv == nil {\n\t\t\treturn zero, errors.New(fmt.Sprintf(\"Algorithm %s requres EC private key\", jws.Header.Algorithm))\n\t\t}\n\t\tr, s, err := ecdsa.Sign(rand.Reader, ecPriv, inputHash)\n\t\tif err == nil {\n\t\t\tsig = concatRS(r, s)\n\t\t}\n\tdefault:\n\t\treturn zero, errors.New(\"Invalid signature algorithm \" + string(jws.Header.Algorithm[:1]))\n\t}\n\n\tif err != nil {\n\t\treturn zero, err\n\t}\n\tjws.Signature = sig\n\tjws.signed = true\n\n\treturn jws, nil\n}\n\nfunc concatRS(r, s *big.Int) []byte {\n\trb, sb := r.Bytes(), s.Bytes()\n\n\tif padSize := len(rb) - len(sb); padSize > 0 {\n\t\tsb = append(make([]byte, padSize), sb...)\n\t} else if padSize < 0 {\n\t\trb = append(make([]byte, -padSize), rb...)\n\t}\n\n\treturn append(rb, sb...)\n}\n\nfunc (jws *JsonWebSignature) Verify() error {\n\thashID, inputHash, err := prepareInput(*jws)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsig := jws.Signature\n\n\t\/\/ Check the signature, branching from the first character in the alg value\n\t\/\/ For example: \"RS256\" => \"R\" => PKCS1v15\n\tswitch jws.Header.Algorithm[:1] {\n\tcase \"R\":\n\t\tif jws.Header.Key.Rsa == nil {\n\t\t\treturn errors.New(fmt.Sprintf(\"Algorithm %s requires RSA key\", jws.Header.Algorithm))\n\t\t}\n\t\treturn rsa.VerifyPKCS1v15(jws.Header.Key.Rsa, hashID, inputHash, sig)\n\tcase \"P\":\n\t\tif jws.Header.Key.Rsa == nil {\n\t\t\treturn errors.New(fmt.Sprintf(\"Algorithm %s requires RSA key\", jws.Header.Algorithm))\n\t\t}\n\t\treturn rsa.VerifyPSS(jws.Header.Key.Rsa, hashID, inputHash, sig, nil)\n\tcase \"E\":\n\t\tif jws.Header.Key.Ec == nil {\n\t\t\treturn errors.New(fmt.Sprintf(\"Algorithm %s requires EC key\", jws.Header.Algorithm))\n\t\t}\n\t\tintlen := len(sig) \/ 2\n\t\trBytes, sBytes := sig[:intlen], sig[intlen:]\n\t\tr, s := big.NewInt(0), big.NewInt(0)\n\t\tr.SetBytes(rBytes)\n\t\ts.SetBytes(sBytes)\n\t\tif ecdsa.Verify(jws.Header.Key.Ec, inputHash, r, s) {\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn errors.New(\"ECDSA signature validation failed\")\n\t\t}\n\tdefault:\n\t\treturn errors.New(\"Invalid signature algorithm \" + string(jws.Header.Algorithm[:1]))\n\t}\n}\nAdd comment about PSSOptions to jws.go.package jose\n\nimport (\n\t\"crypto\"\n\t\"crypto\/ecdsa\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/sha256\"\n\t\"crypto\/sha512\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"hash\"\n\t\"math\/big\"\n\t\"strings\"\n)\n\n\/\/ JWS\n\ntype JwsHeader struct {\n\tAlgorithm JoseAlgorithm `json:\"alg,omitempty\"`\n\tNonce string `json:\"nonce,omitempty\"`\n\tKey JsonWebKey `json:\"jwk,omitempty\"`\n}\n\n\/\/ rawJsonWebSignature and JsonWebSignature are the same.\n\/\/ We just use rawJsonWebSignature for the basic parse,\n\/\/ and JsonWebSignature for the full parse\ntype rawJsonWebSignature struct {\n\tsigned bool\n\tHeader JwsHeader `json:\"header,omitempty\"`\n\tProtected JsonBuffer `json:\"protected,omitempty\"`\n\tPayload JsonBuffer `json:\"payload,omitempty\"`\n\tSignature JsonBuffer `json:\"signature,omitempty\"`\n}\n\ntype JsonWebSignature rawJsonWebSignature\n\n\/\/ No need for special MarshalJSON handling; it's OK for\n\/\/ elements to remain in the unprotected header, since they'll\n\/\/ just be overwritten.\n\/\/ func (jwk JsonWebKey) MarshalJSON() ([]byte, error) {}\n\n\/\/ On unmarshal, copy protected header fields to protected\nfunc (jws *JsonWebSignature) UnmarshalJSON(data []byte) error {\n\tvar raw rawJsonWebSignature\n\terr := json.Unmarshal(data, &raw)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Copy over simple fields\n\tjws.Header = raw.Header\n\tjws.Protected = raw.Protected\n\tjws.Payload = raw.Payload\n\tjws.Signature = raw.Signature\n\n\tif len(jws.Protected) > 0 {\n\t\t\/\/ This overwrites fields in jwk.Header if there is a conflict\n\t\terr = json.Unmarshal(jws.Protected, &jws.Header)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Check that required fields are present\n\tif len(jws.Signature) == 0 || len(jws.Payload) == 0 {\n\t\treturn errors.New(\"JWS missing required fields\")\n\t}\n\n\treturn nil\n}\n\nfunc (jws JsonWebSignature) MarshalCompact() ([]byte, error) {\n\tif !jws.signed {\n\t\treturn []byte{}, errors.New(\"Cannot marshal unsigned JWS\")\n\t}\n\n\treturn []byte(B64enc(jws.Protected) + \".\" + B64enc(jws.Payload) + \".\" + B64enc(jws.Signature)), nil\n}\n\nfunc UnmarshalCompact(data []byte) (JsonWebSignature, error) {\n\tjws := JsonWebSignature{}\n\tparts := strings.Split(string(data), \".\")\n\tif len(parts) != 3 {\n\t\treturn jws, errors.New(\"Mal-formed compact JWS\")\n\t}\n\n\t\/\/ Decode simple fields\n\tvar err error\n\tjws.Protected, err = B64dec(parts[0])\n\tif err != nil {\n\t\treturn jws, err\n\t}\n\tjws.Payload, err = B64dec(parts[1])\n\tif err != nil {\n\t\treturn jws, err\n\t}\n\tjws.Signature, err = B64dec(parts[2])\n\tif err != nil {\n\t\treturn jws, err\n\t}\n\n\t\/\/ Populate header from protected\n\terr = json.Unmarshal(jws.Protected, &jws.Header)\n\tif err != nil {\n\t\treturn jws, err\n\t}\n\n\tjws.signed = true\n\treturn jws, nil\n}\n\nfunc prepareInput(jws JsonWebSignature) (crypto.Hash, []byte, error) {\n\tinput := []byte(B64enc(jws.Protected) + \".\" + B64enc(jws.Payload))\n\tzeroh := crypto.Hash(0)\n\tzerob := []byte{}\n\n\t\/\/ TODO: Check for valid algorithm\n\n\t\/\/ Hash the payload\n\thashAlg := string(jws.Header.Algorithm[2:])\n\tvar hashID crypto.Hash\n\tvar hash hash.Hash\n\tswitch hashAlg {\n\tcase \"256\":\n\t\thashID = crypto.SHA256\n\t\thash = sha256.New()\n\tcase \"384\":\n\t\thashID = crypto.SHA384\n\t\thash = sha512.New384()\n\tcase \"512\":\n\t\thashID = crypto.SHA512\n\t\thash = sha512.New()\n\tdefault:\n\t\treturn zeroh, zerob, errors.New(\"Invalid hash length \" + hashAlg)\n\t}\n\thash.Write(input)\n\tinputHash := hash.Sum(nil)\n\n\treturn hashID, inputHash, nil\n}\n\nfunc Sign(alg JoseAlgorithm, privateKey interface{}, payload []byte) (JsonWebSignature, error) {\n\tzero := JsonWebSignature{}\n\n\t\/\/ Create a working JWS\n\tjws := JsonWebSignature{Payload: payload}\n\tjws.Header.Algorithm = alg\n\n\t\/\/ Cast the private key to the appropriate type, and\n\t\/\/ add the corresponding public key to the header\n\tvar rsaPriv *rsa.PrivateKey\n\tvar ecPriv *ecdsa.PrivateKey\n\tswitch privateKey := privateKey.(type) {\n\tcase rsa.PrivateKey:\n\t\trsaPriv = &privateKey\n\t\tjws.Header.Key = JsonWebKey{KeyType: KeyTypeRSA, Rsa: &rsaPriv.PublicKey}\n\tcase ecdsa.PrivateKey:\n\t\tecPriv = &privateKey\n\t\tjws.Header.Key = JsonWebKey{KeyType: KeyTypeEC, Ec: &ecPriv.PublicKey}\n\tdefault:\n\t\treturn zero, errors.New(fmt.Sprintf(\"Unsupported key type for %+v\\n\", privateKey))\n\t}\n\n\t\/\/ Base64-encode the header -> protected\n\t\/\/ NOTE: This implies that unprotected headers are not supported\n\tprotected, err := json.Marshal(jws.Header)\n\tif err != nil {\n\t\treturn zero, err\n\t}\n\tjws.Protected = protected\n\n\t\/\/ Compute the signature input\n\thashID, inputHash, err := prepareInput(jws)\n\tif err != nil {\n\t\treturn zero, err\n\t}\n\n\t\/\/ Sign\n\t\/\/ TODO: Check that key type is compatible\n\tvar sig []byte\n\tswitch jws.Header.Algorithm[:1] {\n\tcase \"R\":\n\t\tif rsaPriv == nil {\n\t\t\treturn zero, errors.New(fmt.Sprintf(\"Algorithm %s requres RSA private key\", jws.Header.Algorithm))\n\t\t}\n\t\tsig, err = rsa.SignPKCS1v15(rand.Reader, rsaPriv, hashID, inputHash)\n\tcase \"P\":\n\t\tif rsaPriv == nil {\n\t\t\treturn zero, errors.New(fmt.Sprintf(\"Algorithm %s requres RSA private key\", jws.Header.Algorithm))\n\t\t}\n\t\t\/\/ Contrary to docs, you can't pass a nil instead of the PSSOptions; You'll\n\t\t\/\/ get a nil dereference.\n\t\tsig, err = rsa.SignPSS(rand.Reader, rsaPriv, hashID, inputHash, &rsa.PSSOptions{})\n\tcase \"E\":\n\t\tif ecPriv == nil {\n\t\t\treturn zero, errors.New(fmt.Sprintf(\"Algorithm %s requres EC private key\", jws.Header.Algorithm))\n\t\t}\n\t\tr, s, err := ecdsa.Sign(rand.Reader, ecPriv, inputHash)\n\t\tif err == nil {\n\t\t\tsig = concatRS(r, s)\n\t\t}\n\tdefault:\n\t\treturn zero, errors.New(\"Invalid signature algorithm \" + string(jws.Header.Algorithm[:1]))\n\t}\n\n\tif err != nil {\n\t\treturn zero, err\n\t}\n\tjws.Signature = sig\n\tjws.signed = true\n\n\treturn jws, nil\n}\n\nfunc concatRS(r, s *big.Int) []byte {\n\trb, sb := r.Bytes(), s.Bytes()\n\n\tif padSize := len(rb) - len(sb); padSize > 0 {\n\t\tsb = append(make([]byte, padSize), sb...)\n\t} else if padSize < 0 {\n\t\trb = append(make([]byte, -padSize), rb...)\n\t}\n\n\treturn append(rb, sb...)\n}\n\nfunc (jws *JsonWebSignature) Verify() error {\n\thashID, inputHash, err := prepareInput(*jws)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsig := jws.Signature\n\n\t\/\/ Check the signature, branching from the first character in the alg value\n\t\/\/ For example: \"RS256\" => \"R\" => PKCS1v15\n\tswitch jws.Header.Algorithm[:1] {\n\tcase \"R\":\n\t\tif jws.Header.Key.Rsa == nil {\n\t\t\treturn errors.New(fmt.Sprintf(\"Algorithm %s requires RSA key\", jws.Header.Algorithm))\n\t\t}\n\t\treturn rsa.VerifyPKCS1v15(jws.Header.Key.Rsa, hashID, inputHash, sig)\n\tcase \"P\":\n\t\tif jws.Header.Key.Rsa == nil {\n\t\t\treturn errors.New(fmt.Sprintf(\"Algorithm %s requires RSA key\", jws.Header.Algorithm))\n\t\t}\n\t\treturn rsa.VerifyPSS(jws.Header.Key.Rsa, hashID, inputHash, sig, nil)\n\tcase \"E\":\n\t\tif jws.Header.Key.Ec == nil {\n\t\t\treturn errors.New(fmt.Sprintf(\"Algorithm %s requires EC key\", jws.Header.Algorithm))\n\t\t}\n\t\tintlen := len(sig) \/ 2\n\t\trBytes, sBytes := sig[:intlen], sig[intlen:]\n\t\tr, s := big.NewInt(0), big.NewInt(0)\n\t\tr.SetBytes(rBytes)\n\t\ts.SetBytes(sBytes)\n\t\tif ecdsa.Verify(jws.Header.Key.Ec, inputHash, r, s) {\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn errors.New(\"ECDSA signature validation failed\")\n\t\t}\n\tdefault:\n\t\treturn errors.New(\"Invalid signature algorithm \" + string(jws.Header.Algorithm[:1]))\n\t}\n}\n<|endoftext|>"} {"text":"package scheme\n\nimport (\n\t\"fmt\"\n\t\"github.com\/orc\/db\"\n\t\"github.com\/orc\/mvc\/controllers\"\n)\n\nfunc Init() {\n\t\/\/Drop()\n\tfor i, _ := range db.Tables {\n\t\tcontrollers.GetModel(db.Tables[i]).Create()\n\t}\n}\n\nfunc Drop() {\n\tfor _, v := range db.Tables {\n\t\tdb.Query(fmt.Sprintf(\"DROP TABLE IF EXISTS %s CASCADE;\", v), nil)\n\t\tdb.Query(fmt.Sprintf(\"DROP SEQUENCE IF EXISTS %s_id_seq;\", v), nil)\n\t}\n}\nscheme: fix whitespace, uncommentpackage scheme\n\nimport (\n \"fmt\"\n \"github.com\/orc\/db\"\n \"github.com\/orc\/mvc\/controllers\"\n)\n\nfunc Init() {\n Drop()\n for i, _ := range db.Tables {\n controllers.GetModel(db.Tables[i]).Create()\n }\n}\n\nfunc Drop() {\n for _, v := range db.Tables {\n db.Query(fmt.Sprintf(\"DROP TABLE IF EXISTS %s CASCADE;\", v), nil)\n db.Query(fmt.Sprintf(\"DROP SEQUENCE IF EXISTS %s_id_seq;\", v), nil)\n }\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/miekg\/dns\"\n)\n\ntype ResolvError struct {\n\tqname, net string\n\tnameservers []string\n}\n\nfunc (e ResolvError) Error() string {\n\terrmsg := fmt.Sprintf(\"%s resolv failed on %s (%s)\", e.qname, strings.Join(e.nameservers, \"; \"), e.net)\n\treturn errmsg\n}\n\ntype Resolver struct {\n\tservers []string\n\tdomain_server *suffixTreeNode\n\tconfig *ResolvSettings\n}\n\nfunc NewResolver(c ResolvSettings) *Resolver {\n\tr := &Resolver{\n\t\tservers: []string{},\n\t\tdomain_server: newSuffixTreeRoot(),\n\t\tconfig: &c,\n\t}\n\n\tif len(c.ServerListFile) > 0 {\n\t\tr.ReadServerListFile(c.ServerListFile)\n\t}\n\n\tif len(c.ResolvFile) > 0 {\n\t\tclientConfig, err := dns.ClientConfigFromFile(c.ResolvFile)\n\t\tif err != nil {\n\t\t\tlogger.Error(\":%s is not a valid resolv.conf file\\n\", c.ResolvFile)\n\t\t\tlogger.Error(\"%s\", err)\n\t\t\tpanic(err)\n\t\t}\n\t\tfor _, server := range clientConfig.Servers {\n\t\t\tnameserver := net.JoinHostPort(server, clientConfig.Port)\n\t\t\tr.servers = append(r.servers, nameserver)\n\t\t}\n\t}\n\n\treturn r\n}\n\nfunc (r *Resolver) ReadServerListFile(file string) {\n\tbuf, err := os.Open(file)\n\tif err != nil {\n\t\tpanic(\"Can't open \" + file)\n\t}\n\tscanner := bufio.NewScanner(buf)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tline = strings.TrimSpace(line)\n\n\t\tif !strings.HasPrefix(line, \"server\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tsli := strings.Split(line, \"=\")\n\t\tif len(sli) != 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tline = strings.TrimSpace(sli[1])\n\n\t\ttokens := strings.Split(line, \"\/\")\n\t\tswitch len(tokens) {\n\t\tcase 3:\n\t\t\tdomain := tokens[1]\n\t\t\tip := tokens[2]\n\t\t\tif !isDomain(domain) || !isIP(ip) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tr.domain_server.sinsert(strings.Split(domain, \".\"), ip)\n\t\tcase 1:\n\t\t\tsrv_port := strings.Split(line, \"#\")\n\t\t\tif len(srv_port) > 2 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tip := \"\"\n\t\t\tif ip = srv_port[0]; !isIP(ip) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tport := \"53\"\n\t\t\tif len(srv_port) == 2 {\n\t\t\t\tif _, err := strconv.Atoi(srv_port[1]); err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tport = srv_port[1]\n\t\t\t}\n\t\t\tr.servers = append(r.servers, net.JoinHostPort(ip, port))\n\t\t}\n\t}\n\n}\n\n\/\/ Lookup will ask each nameserver in top-to-bottom fashion, starting a new request\n\/\/ in every second, and return as early as possbile (have an answer).\n\/\/ It returns an error if no request has succeeded.\nfunc (r *Resolver) Lookup(net string, req *dns.Msg) (message *dns.Msg, err error) {\n\tc := &dns.Client{\n\t\tNet: net,\n\t\tReadTimeout: r.Timeout(),\n\t\tWriteTimeout: r.Timeout(),\n\t}\n\n\tif net == \"udp\" && settings.ResolvConfig.SetEDNS0 {\n\t\treq = req.SetEdns0(65535, true)\n\t}\n\n\tqname := req.Question[0].Name\n\n\tres := make(chan *dns.Msg, 1)\n\tvar wg sync.WaitGroup\n\tL := func(nameserver string) {\n\t\tdefer wg.Done()\n\t\tr, rtt, err := c.Exchange(req, nameserver)\n\t\tif err != nil {\n\t\t\tlogger.Warn(\"%s socket error on %s\", qname, nameserver)\n\t\t\tlogger.Warn(\"error:%s\", err.Error())\n\t\t\treturn\n\t\t}\n\t\t\/\/ If SERVFAIL happen, should return immediately and try another upstream resolver.\n\t\t\/\/ However, other Error code like NXDOMAIN is an clear response stating\n\t\t\/\/ that it has been verified no such domain existas and ask other resolvers\n\t\t\/\/ would make no sense. See more about #20\n\t\tif r != nil && r.Rcode != dns.RcodeSuccess {\n\t\t\tlogger.Warn(\"%s failed to get an valid answer on %s\", qname, nameserver)\n\t\t\tif r.Rcode == dns.RcodeServerFailure {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tlogger.Debug(\"%s resolv on %s (%s) ttl: %v\", UnFqdn(qname), nameserver, net, rtt)\n\t\t}\n\t\tselect {\n\t\tcase res <- r:\n\t\tdefault:\n\t\t}\n\t}\n\n\tticker := time.NewTicker(time.Duration(settings.ResolvConfig.Interval) * time.Millisecond)\n\tdefer ticker.Stop()\n\t\/\/ Start lookup on each nameserver top-down, in every second\n\tnameservers := r.Nameservers(qname)\n\tfor _, nameserver := range nameservers {\n\t\twg.Add(1)\n\t\tgo L(nameserver)\n\t\t\/\/ but exit early, if we have an answer\n\t\tselect {\n\t\tcase r := <-res:\n\t\t\t\/\/ logger.Debug(\"%s resolv on %s rtt: %v\", UnFqdn(qname), nameserver, rtt)\n\t\t\treturn r, nil\n\t\tcase <-ticker.C:\n\t\t\tcontinue\n\t\t}\n\t}\n\t\/\/ wait for all the namservers to finish\n\twg.Wait()\n\tselect {\n\tcase r := <-res:\n\t\t\/\/ logger.Debug(\"%s resolv on %s rtt: %v\", UnFqdn(qname), nameserver, rtt)\n\t\treturn r, nil\n\tdefault:\n\t\treturn nil, ResolvError{qname, net, nameservers}\n\t}\n}\n\n\/\/ Namservers return the array of nameservers, with port number appended.\n\/\/ '#' in the name is treated as port separator, as with dnsmasq.\n\nfunc (r *Resolver) Nameservers(qname string) []string {\n\tqueryKeys := strings.Split(qname, \".\")\n\tqueryKeys = queryKeys[:len(queryKeys)-1] \/\/ ignore last '.'\n\n\tns := []string{}\n\tif v, found := r.domain_server.search(queryKeys); found {\n\t\tlogger.Debug(\"found upstream: %v\", v)\n\t\tserver := v\n\t\tnameserver := server + \":53\"\n\t\tns = append(ns, nameserver)\n\t}\n\n\tfor _, nameserver := range r.servers {\n\t\tns = append(ns, nameserver)\n\t}\n\treturn ns\n}\n\nfunc (r *Resolver) Timeout() time.Duration {\n\treturn time.Duration(r.config.Timeout) * time.Second\n}\nmore pretty logging and ensure query the specific upstream nameserver in async Lookup() function.package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/miekg\/dns\"\n)\n\ntype ResolvError struct {\n\tqname, net string\n\tnameservers []string\n}\n\nfunc (e ResolvError) Error() string {\n\terrmsg := fmt.Sprintf(\"%s resolv failed on %s (%s)\", e.qname, strings.Join(e.nameservers, \"; \"), e.net)\n\treturn errmsg\n}\n\ntype RResp struct {\n\tmsg *dns.Msg\n\tnameserver string\n\trtt time.Duration\n}\n\ntype Resolver struct {\n\tservers []string\n\tdomain_server *suffixTreeNode\n\tconfig *ResolvSettings\n}\n\nfunc NewResolver(c ResolvSettings) *Resolver {\n\tr := &Resolver{\n\t\tservers: []string{},\n\t\tdomain_server: newSuffixTreeRoot(),\n\t\tconfig: &c,\n\t}\n\n\tif len(c.ServerListFile) > 0 {\n\t\tr.ReadServerListFile(c.ServerListFile)\n\t}\n\n\tif len(c.ResolvFile) > 0 {\n\t\tclientConfig, err := dns.ClientConfigFromFile(c.ResolvFile)\n\t\tif err != nil {\n\t\t\tlogger.Error(\":%s is not a valid resolv.conf file\\n\", c.ResolvFile)\n\t\t\tlogger.Error(\"%s\", err)\n\t\t\tpanic(err)\n\t\t}\n\t\tfor _, server := range clientConfig.Servers {\n\t\t\tnameserver := net.JoinHostPort(server, clientConfig.Port)\n\t\t\tr.servers = append(r.servers, nameserver)\n\t\t}\n\t}\n\n\treturn r\n}\n\nfunc (r *Resolver) ReadServerListFile(file string) {\n\tbuf, err := os.Open(file)\n\tif err != nil {\n\t\tpanic(\"Can't open \" + file)\n\t}\n\tscanner := bufio.NewScanner(buf)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tline = strings.TrimSpace(line)\n\n\t\tif !strings.HasPrefix(line, \"server\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tsli := strings.Split(line, \"=\")\n\t\tif len(sli) != 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tline = strings.TrimSpace(sli[1])\n\n\t\ttokens := strings.Split(line, \"\/\")\n\t\tswitch len(tokens) {\n\t\tcase 3:\n\t\t\tdomain := tokens[1]\n\t\t\tip := tokens[2]\n\t\t\tif !isDomain(domain) || !isIP(ip) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tr.domain_server.sinsert(strings.Split(domain, \".\"), ip)\n\t\tcase 1:\n\t\t\tsrv_port := strings.Split(line, \"#\")\n\t\t\tif len(srv_port) > 2 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tip := \"\"\n\t\t\tif ip = srv_port[0]; !isIP(ip) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tport := \"53\"\n\t\t\tif len(srv_port) == 2 {\n\t\t\t\tif _, err := strconv.Atoi(srv_port[1]); err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tport = srv_port[1]\n\t\t\t}\n\t\t\tr.servers = append(r.servers, net.JoinHostPort(ip, port))\n\t\t}\n\t}\n\n}\n\n\/\/ Lookup will ask each nameserver in top-to-bottom fashion, starting a new request\n\/\/ in every second, and return as early as possbile (have an answer).\n\/\/ It returns an error if no request has succeeded.\nfunc (r *Resolver) Lookup(net string, req *dns.Msg) (message *dns.Msg, err error) {\n\tc := &dns.Client{\n\t\tNet: net,\n\t\tReadTimeout: r.Timeout(),\n\t\tWriteTimeout: r.Timeout(),\n\t}\n\n\tif net == \"udp\" && settings.ResolvConfig.SetEDNS0 {\n\t\treq = req.SetEdns0(65535, true)\n\t}\n\n\tqname := req.Question[0].Name\n\n\tres := make(chan *RResp, 1)\n\tvar wg sync.WaitGroup\n\tL := func(nameserver string) {\n\t\tdefer wg.Done()\n\t\tr, rtt, err := c.Exchange(req, nameserver)\n\t\tif err != nil {\n\t\t\tlogger.Warn(\"%s socket error on %s\", qname, nameserver)\n\t\t\tlogger.Warn(\"error:%s\", err.Error())\n\t\t\treturn\n\t\t}\n\t\t\/\/ If SERVFAIL happen, should return immediately and try another upstream resolver.\n\t\t\/\/ However, other Error code like NXDOMAIN is an clear response stating\n\t\t\/\/ that it has been verified no such domain existas and ask other resolvers\n\t\t\/\/ would make no sense. See more about #20\n\t\tif r != nil && r.Rcode != dns.RcodeSuccess {\n\t\t\tlogger.Warn(\"%s failed to get an valid answer on %s\", qname, nameserver)\n\t\t\tif r.Rcode == dns.RcodeServerFailure {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tre := &RResp{r, nameserver, rtt}\n\t\tselect {\n\t\tcase res <- re:\n\t\tdefault:\n\t\t}\n\t}\n\n\tticker := time.NewTicker(time.Duration(settings.ResolvConfig.Interval) * time.Millisecond)\n\tdefer ticker.Stop()\n\t\/\/ Start lookup on each nameserver top-down, in every second\n\tnameservers := r.Nameservers(qname)\n\tfor _, nameserver := range nameservers {\n\t\twg.Add(1)\n\t\tgo L(nameserver)\n\t\t\/\/ but exit early, if we have an answer\n\t\tselect {\n\t\tcase re := <-res:\n\t\t\tlogger.Debug(\"%s resolv on %s rtt: %v\", UnFqdn(qname), re.nameserver, re.rtt)\n\t\t\treturn re.msg, nil\n\t\tcase <-ticker.C:\n\t\t\tcontinue\n\t\t}\n\t}\n\t\/\/ wait for all the namservers to finish\n\twg.Wait()\n\tselect {\n\tcase re := <-res:\n\t\tlogger.Debug(\"%s resolv on %s rtt: %v\", UnFqdn(qname), re.nameserver, re.rtt)\n\t\treturn re.msg, nil\n\tdefault:\n\t\treturn nil, ResolvError{qname, net, nameservers}\n\t}\n}\n\n\/\/ Namservers return the array of nameservers, with port number appended.\n\/\/ '#' in the name is treated as port separator, as with dnsmasq.\n\nfunc (r *Resolver) Nameservers(qname string) []string {\n\tqueryKeys := strings.Split(qname, \".\")\n\tqueryKeys = queryKeys[:len(queryKeys)-1] \/\/ ignore last '.'\n\n\tns := []string{}\n\tif v, found := r.domain_server.search(queryKeys); found {\n\t\tlogger.Debug(\"%s be found in domain server list, upstream: %v\", qname, v)\n\t\tserver := v\n\t\tnameserver := net.JoinHostPort(server, \"53\")\n\t\tns = append(ns, nameserver)\n\t\t\/\/Ensure query the specific upstream nameserver in async Lookup() function.\n\t\treturn ns\n\t}\n\n\tfor _, nameserver := range r.servers {\n\t\tns = append(ns, nameserver)\n\t}\n\treturn ns\n}\n\nfunc (r *Resolver) Timeout() time.Duration {\n\treturn time.Duration(r.config.Timeout) * time.Second\n}\n<|endoftext|>"} {"text":"package utils\nimport (\n \"gopkg.in\/cookieo9\/resources-go.v2\"\n \"io\/ioutil\"\n \"strings\"\n \"log\"\n \"mime\"\n \"time\"\n \"path\/filepath\"\n \"net\/http\"\n)\ntype Resource struct{}\n\nvar DefaultResource *Resource=&Resource{}\n\n\nfunc (re *Resource)Load(path string) []byte{\n res,err:=re.Get(path)\n if(err!=nil){\n return []byte{}\n }\n r,_:=res.Open()\n bf,err:=ioutil.ReadAll(r)\n if(err!=nil){\n log.Println(\"read res[\",path,\"] failed\",err.Error())\n }\n return bf\n}\n\nfunc (re *Resource)Get(path string)(resources.Resource,error){\n path=strings.TrimLeft(path,\"\/\")\n res,err:=resources.Find(path)\n if(err!=nil){\n log.Println(\"load res[\",path,\"] failed\",err.Error())\n return nil,err\n }\n return res,nil\n}\n\nfunc (re *Resource)HandleStatic(w http.ResponseWriter,r *http.Request,path string){\n res,err:=re.Get(path)\n if(err!=nil){\n http.NotFound(w,r)\n return;\n }\n finfo,_:=res.Stat()\n modtime:=finfo.ModTime()\n if t, err := time.Parse(http.TimeFormat, r.Header.Get(\"If-Modified-Since\")); err == nil && modtime.Before(t.Add(1*time.Second)) {\n h := w.Header()\n delete(h, \"Content-Type\")\n delete(h, \"Content-Length\")\n w.WriteHeader(http.StatusNotModified)\n return\n }\n mimeType:= mime.TypeByExtension(filepath.Ext(path))\n if(mimeType!=\"\"){\n w.Header().Set(\"Content-Type\",mimeType)\n }\n w.Header().Set(\"Last-Modified\",modtime.UTC().Format(http.TimeFormat))\n w.Write(re.Load(path))\n}\n\nfunc ResetDefaultBundle(execDir bool){\n resources.DefaultBundle=make(resources.BundleSequence,1,10)\n var exe_dir, exe resources.Bundle\n if exe_path, err := resources.ExecutablePath(); err == nil {\n\t\texe_dir = resources.OpenFS(filepath.Dir(exe_path))\n\t\tif exe, err = resources.OpenZip(exe_path); err == nil {\n\t\t\tresources.DefaultBundle = append(resources.DefaultBundle, exe)\n\t\t}\n\t\tif(execDir){\n\t\t\tresources.DefaultBundle = append(resources.DefaultBundle, exe_dir)\n\t\t}\n\t}\n}\n\n\/\/func init() {\n\/\/\tvar cwd, cur_pkg, exe_dir, exe Bundle\n\/\/\tcwd = OpenFS(\".\")\n\/\/\tcur_pkg = OpenAutoBundle(OpenCurrentPackage)\n\/\/\n\/\/\tif exe_path, err := ExecutablePath(); err == nil {\n\/\/\t\texe_dir = OpenFS(filepath.Dir(exe_path))\n\/\/\t\tif exe, err = OpenZip(exe_path); err == nil {\n\/\/\t\t\tDefaultBundle = append(DefaultBundle, exe)\n\/\/\t\t}\n\/\/\t}\n\/\/\n\/\/\tDefaultBundle = append(DefaultBundle, cwd, exe_dir, cur_pkg, exe)\n\/\/}\n\nauto bundle zippackage utils\nimport (\n \"gopkg.in\/cookieo9\/resources-go.v2\"\n \"io\/ioutil\"\n \"strings\"\n \"log\"\n \"mime\"\n \"time\"\n \"path\/filepath\"\n \"net\/http\"\n)\ntype Resource struct{}\n\nvar DefaultResource *Resource=&Resource{}\n\n\nfunc (re *Resource)Load(path string) []byte{\n res,err:=re.Get(path)\n if(err!=nil){\n return []byte{}\n }\n r,_:=res.Open()\n bf,err:=ioutil.ReadAll(r)\n if(err!=nil){\n log.Println(\"read res[\",path,\"] failed\",err.Error())\n }\n return bf\n}\n\nfunc (re *Resource)Get(path string)(resources.Resource,error){\n path=strings.TrimLeft(path,\"\/\")\n res,err:=resources.Find(path)\n if(err!=nil){\n log.Println(\"load res[\",path,\"] failed\",err.Error())\n return nil,err\n }\n return res,nil\n}\n\nfunc (re *Resource)HandleStatic(w http.ResponseWriter,r *http.Request,path string){\n res,err:=re.Get(path)\n if(err!=nil){\n http.NotFound(w,r)\n return;\n }\n finfo,_:=res.Stat()\n modtime:=finfo.ModTime()\n if t, err := time.Parse(http.TimeFormat, r.Header.Get(\"If-Modified-Since\")); err == nil && modtime.Before(t.Add(1*time.Second)) {\n h := w.Header()\n delete(h, \"Content-Type\")\n delete(h, \"Content-Length\")\n w.Header().Set(\"Last-Modified\",modtime.UTC().Format(http.TimeFormat))\n w.WriteHeader(http.StatusNotModified)\n return\n }\n mimeType:= mime.TypeByExtension(filepath.Ext(path))\n if(mimeType!=\"\"){\n w.Header().Set(\"Content-Type\",mimeType)\n }\n w.Header().Set(\"Last-Modified\",modtime.UTC().Format(http.TimeFormat))\n w.Write(re.Load(path))\n}\n\nfunc ResetDefaultBundle(){\n resources.DefaultBundle=make(resources.BundleSequence,1,10)\n \n var cwd ,exe_dir, exe ,cur_pkg resources.Bundle\n hasZip:=false\n if exe_path, err := resources.ExecutablePath(); err == nil {\n\t\tif exe, err = resources.OpenZip(exe_path); err == nil {\n log.Println(\"bundle resource zip\",exe_path)\n hasZip=true\n\t\t\tresources.DefaultBundle = append(resources.DefaultBundle, exe)\n\t\t}\n\t\tif(err!=nil){\n\t\t log.Println(\"bundle resource zip failed\")\n\t\t}\n\t\tif(!hasZip){\n\t\t\texe_dir = resources.OpenFS(filepath.Dir(exe_path))\n\t\t\tresources.DefaultBundle = append(resources.DefaultBundle, exe_dir)\n\t\t}\n\t}\n\tif(!hasZip){\n\t cwd = resources.OpenFS(\".\")\n\t cur_pkg = resources.OpenAutoBundle(resources.OpenCurrentPackage)\n\t resources.DefaultBundle = append(resources.DefaultBundle, cwd,cur_pkg)\n\t}\n}\n\n\/\/func init() {\n\/\/\tvar cwd, cur_pkg, exe_dir, exe Bundle\n\/\/\tcwd = OpenFS(\".\")\n\/\/\tcur_pkg = OpenAutoBundle(OpenCurrentPackage)\n\/\/\n\/\/\tif exe_path, err := ExecutablePath(); err == nil {\n\/\/\t\texe_dir = OpenFS(filepath.Dir(exe_path))\n\/\/\t\tif exe, err = OpenZip(exe_path); err == nil {\n\/\/\t\t\tDefaultBundle = append(DefaultBundle, exe)\n\/\/\t\t}\n\/\/\t}\n\/\/\n\/\/\tDefaultBundle = append(DefaultBundle, cwd, exe_dir, cur_pkg, exe)\n\/\/}\n\n<|endoftext|>"} {"text":"package executor\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/nomad\/client\/allocdir\"\n\t\"github.com\/hashicorp\/nomad\/client\/taskenv\"\n\t\"github.com\/hashicorp\/nomad\/client\/testutil\"\n\t\"github.com\/hashicorp\/nomad\/helper\/testlog\"\n\t\"github.com\/hashicorp\/nomad\/nomad\/mock\"\n\t\"github.com\/hashicorp\/nomad\/plugins\/drivers\"\n\ttu \"github.com\/hashicorp\/nomad\/testutil\"\n\tlconfigs \"github.com\/opencontainers\/runc\/libcontainer\/configs\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"golang.org\/x\/sys\/unix\"\n)\n\nfunc init() {\n\texecutorFactories[\"LibcontainerExecutor\"] = libcontainerFactory\n}\n\nvar libcontainerFactory = executorFactory{\n\tnew: NewExecutorWithIsolation,\n\tconfigureExecCmd: func(t *testing.T, cmd *ExecCommand) {\n\t\tcmd.ResourceLimits = true\n\t\tsetupRootfs(t, cmd.TaskDir)\n\t},\n}\n\n\/\/ testExecutorContextWithChroot returns an ExecutorContext and AllocDir with\n\/\/ chroot. Use testExecutorContext if you don't need a chroot.\n\/\/\n\/\/ The caller is responsible for calling AllocDir.Destroy() to cleanup.\nfunc testExecutorCommandWithChroot(t *testing.T) *testExecCmd {\n\tchrootEnv := map[string]string{\n\t\t\"\/etc\/ld.so.cache\": \"\/etc\/ld.so.cache\",\n\t\t\"\/etc\/ld.so.conf\": \"\/etc\/ld.so.conf\",\n\t\t\"\/etc\/ld.so.conf.d\": \"\/etc\/ld.so.conf.d\",\n\t\t\"\/lib\": \"\/lib\",\n\t\t\"\/lib64\": \"\/lib64\",\n\t\t\"\/usr\/lib\": \"\/usr\/lib\",\n\t\t\"\/bin\/ls\": \"\/bin\/ls\",\n\t\t\"\/bin\/cat\": \"\/bin\/cat\",\n\t\t\"\/bin\/echo\": \"\/bin\/echo\",\n\t\t\"\/bin\/bash\": \"\/bin\/bash\",\n\t\t\"\/bin\/sleep\": \"\/bin\/sleep\",\n\t\t\"\/foobar\": \"\/does\/not\/exist\",\n\t}\n\n\talloc := mock.Alloc()\n\ttask := alloc.Job.TaskGroups[0].Tasks[0]\n\ttaskEnv := taskenv.NewBuilder(mock.Node(), alloc, task, \"global\").Build()\n\n\tallocDir := allocdir.NewAllocDir(testlog.HCLogger(t), filepath.Join(os.TempDir(), alloc.ID))\n\tif err := allocDir.Build(); err != nil {\n\t\tt.Fatalf(\"AllocDir.Build() failed: %v\", err)\n\t}\n\tif err := allocDir.NewTaskDir(task.Name).Build(true, chrootEnv); err != nil {\n\t\tallocDir.Destroy()\n\t\tt.Fatalf(\"allocDir.NewTaskDir(%q) failed: %v\", task.Name, err)\n\t}\n\ttd := allocDir.TaskDirs[task.Name]\n\tcmd := &ExecCommand{\n\t\tEnv: taskEnv.List(),\n\t\tTaskDir: td.Dir,\n\t\tResources: &drivers.Resources{\n\t\t\tNomadResources: alloc.AllocatedResources.Tasks[task.Name],\n\t\t},\n\t}\n\n\ttestCmd := &testExecCmd{\n\t\tcommand: cmd,\n\t\tallocDir: allocDir,\n\t}\n\tconfigureTLogging(t, testCmd)\n\treturn testCmd\n}\n\nfunc TestExecutor_IsolationAndConstraints(t *testing.T) {\n\tt.Parallel()\n\trequire := require.New(t)\n\ttestutil.ExecCompatible(t)\n\n\ttestExecCmd := testExecutorCommandWithChroot(t)\n\texecCmd, allocDir := testExecCmd.command, testExecCmd.allocDir\n\texecCmd.Cmd = \"\/bin\/ls\"\n\texecCmd.Args = []string{\"-F\", \"\/\", \"\/etc\/\"}\n\tdefer allocDir.Destroy()\n\n\texecCmd.ResourceLimits = true\n\n\texecutor := NewExecutorWithIsolation(testlog.HCLogger(t))\n\tdefer executor.Shutdown(\"SIGKILL\", 0)\n\n\tps, err := executor.Launch(execCmd)\n\trequire.NoError(err)\n\trequire.NotZero(ps.Pid)\n\n\tstate, err := executor.Wait(context.Background())\n\trequire.NoError(err)\n\trequire.Zero(state.ExitCode)\n\n\t\/\/ Check if the resource constraints were applied\n\tif lexec, ok := executor.(*LibcontainerExecutor); ok {\n\t\tstate, err := lexec.container.State()\n\t\trequire.NoError(err)\n\n\t\tmemLimits := filepath.Join(state.CgroupPaths[\"memory\"], \"memory.limit_in_bytes\")\n\t\tdata, err := ioutil.ReadFile(memLimits)\n\t\trequire.NoError(err)\n\n\t\texpectedMemLim := strconv.Itoa(int(execCmd.Resources.NomadResources.Memory.MemoryMB * 1024 * 1024))\n\t\tactualMemLim := strings.TrimSpace(string(data))\n\t\trequire.Equal(actualMemLim, expectedMemLim)\n\t\trequire.NoError(executor.Shutdown(\"\", 0))\n\t\texecutor.Wait(context.Background())\n\n\t\t\/\/ Check if Nomad has actually removed the cgroups\n\t\ttu.WaitForResult(func() (bool, error) {\n\t\t\t_, err = os.Stat(memLimits)\n\t\t\tif err == nil {\n\t\t\t\treturn false, fmt.Errorf(\"expected an error from os.Stat %s\", memLimits)\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}, func(err error) { t.Error(err) })\n\n\t}\n\texpected := `\/:\nalloc\/\nbin\/\ndev\/\netc\/\nlib\/\nlib64\/\nlocal\/\nproc\/\nsecrets\/\nsys\/\ntmp\/\nusr\/\n\n\/etc\/:\nld.so.cache\nld.so.conf\nld.so.conf.d\/`\n\ttu.WaitForResult(func() (bool, error) {\n\t\toutput := testExecCmd.stdout.String()\n\t\tact := strings.TrimSpace(string(output))\n\t\tif act != expected {\n\t\t\treturn false, fmt.Errorf(\"Command output incorrectly: want %v; got %v\", expected, act)\n\t\t}\n\t\treturn true, nil\n\t}, func(err error) { t.Error(err) })\n}\n\nfunc TestUniversalExecutor_LookupTaskBin(t *testing.T) {\n\tt.Parallel()\n\trequire := require.New(t)\n\n\t\/\/ Create a temp dir\n\ttmpDir, err := ioutil.TempDir(\"\", \"\")\n\trequire.Nil(err)\n\tdefer os.Remove(tmpDir)\n\n\t\/\/ Create the command\n\tcmd := &ExecCommand{Env: []string{\"PATH=\/bin\"}, TaskDir: tmpDir}\n\n\t\/\/ Make a foo subdir\n\tos.MkdirAll(filepath.Join(tmpDir, \"foo\"), 0700)\n\n\t\/\/ Write a file under foo\n\tfilePath := filepath.Join(tmpDir, \"foo\", \"tmp.txt\")\n\terr = ioutil.WriteFile(filePath, []byte{1, 2}, os.ModeAppend)\n\trequire.NoError(err)\n\n\t\/\/ Lookout with an absolute path to the binary\n\tcmd.Cmd = \"\/foo\/tmp.txt\"\n\t_, err = lookupTaskBin(cmd)\n\trequire.NoError(err)\n\n\t\/\/ Write a file under local subdir\n\tos.MkdirAll(filepath.Join(tmpDir, \"local\"), 0700)\n\tfilePath2 := filepath.Join(tmpDir, \"local\", \"tmp.txt\")\n\tioutil.WriteFile(filePath2, []byte{1, 2}, os.ModeAppend)\n\n\t\/\/ Lookup with file name, should find the one we wrote above\n\tcmd.Cmd = \"tmp.txt\"\n\t_, err = lookupTaskBin(cmd)\n\trequire.NoError(err)\n\n\t\/\/ Lookup a host absolute path\n\tcmd.Cmd = \"\/bin\/sh\"\n\t_, err = lookupTaskBin(cmd)\n\trequire.Error(err)\n}\n\n\/\/ Exec Launch looks for the binary only inside the chroot\nfunc TestExecutor_EscapeContainer(t *testing.T) {\n\tt.Parallel()\n\trequire := require.New(t)\n\ttestutil.ExecCompatible(t)\n\n\ttestExecCmd := testExecutorCommandWithChroot(t)\n\texecCmd, allocDir := testExecCmd.command, testExecCmd.allocDir\n\texecCmd.Cmd = \"\/bin\/kill\" \/\/ missing from the chroot container\n\tdefer allocDir.Destroy()\n\n\texecCmd.ResourceLimits = true\n\n\texecutor := NewExecutorWithIsolation(testlog.HCLogger(t))\n\tdefer executor.Shutdown(\"SIGKILL\", 0)\n\n\t_, err := executor.Launch(execCmd)\n\trequire.Error(err)\n\trequire.Regexp(\"^file \/bin\/kill not found under path\", err)\n\n\t\/\/ Bare files are looked up using the system path, inside the container\n\tallocDir.Destroy()\n\ttestExecCmd = testExecutorCommandWithChroot(t)\n\texecCmd, allocDir = testExecCmd.command, testExecCmd.allocDir\n\texecCmd.Cmd = \"kill\"\n\t_, err = executor.Launch(execCmd)\n\trequire.Error(err)\n\trequire.Regexp(\"^file kill not found under path\", err)\n\n\tallocDir.Destroy()\n\ttestExecCmd = testExecutorCommandWithChroot(t)\n\texecCmd, allocDir = testExecCmd.command, testExecCmd.allocDir\n\texecCmd.Cmd = \"echo\"\n\t_, err = executor.Launch(execCmd)\n\trequire.NoError(err)\n}\n\nfunc TestExecutor_Capabilities(t *testing.T) {\n\tt.Parallel()\n\trequire := require.New(t)\n\ttestutil.ExecCompatible(t)\n\n\ttestExecCmd := testExecutorCommandWithChroot(t)\n\texecCmd, allocDir := testExecCmd.command, testExecCmd.allocDir\n\tdefer allocDir.Destroy()\n\n\texecCmd.ResourceLimits = true\n\texecCmd.Cmd = \"\/bin\/sh\"\n\texecCmd.Args = []string{\"-c\", \"cat \/proc\/$$\/cmdline\"}\n\n\texecutor := NewExecutorWithIsolation(testlog.HCLogger(t))\n\tdefer executor.Shutdown(\"SIGKILL\", 0)\n\n\t_, err := executor.Launch(execCmd)\n\trequire.NoError(err)\n\n\tch := make(chan interface{})\n\tgo func() {\n\t\texecutor.Wait(context.Background())\n\t\tclose(ch)\n\t}()\n\n\tselect {\n\tcase <-ch:\n\t\t\/\/ all good\n\tcase <-time.After(5 * time.Second):\n\t\trequire.Fail(\"timeout waiting for exec to shutdown\")\n\t}\n\n\toutput := testExecCmd.stdout.String()\n\trequire.Empty(output)\n\n}\n\nfunc TestExecutor_ClientCleanup(t *testing.T) {\n\tt.Parallel()\n\ttestutil.ExecCompatible(t)\n\trequire := require.New(t)\n\n\ttestExecCmd := testExecutorCommandWithChroot(t)\n\texecCmd, allocDir := testExecCmd.command, testExecCmd.allocDir\n\tdefer allocDir.Destroy()\n\n\texecutor := NewExecutorWithIsolation(testlog.HCLogger(t))\n\tdefer executor.Shutdown(\"\", 0)\n\n\t\/\/ Need to run a command which will produce continuous output but not\n\t\/\/ too quickly to ensure executor.Exit() stops the process.\n\texecCmd.Cmd = \"\/bin\/bash\"\n\texecCmd.Args = []string{\"-c\", \"while true; do \/bin\/echo X; \/bin\/sleep 1; done\"}\n\texecCmd.ResourceLimits = true\n\n\tps, err := executor.Launch(execCmd)\n\n\trequire.NoError(err)\n\trequire.NotZero(ps.Pid)\n\ttime.Sleep(500 * time.Millisecond)\n\trequire.NoError(executor.Shutdown(\"SIGINT\", 100*time.Millisecond))\n\n\tch := make(chan interface{})\n\tgo func() {\n\t\texecutor.Wait(context.Background())\n\t\tclose(ch)\n\t}()\n\n\tselect {\n\tcase <-ch:\n\t\t\/\/ all good\n\tcase <-time.After(5 * time.Second):\n\t\trequire.Fail(\"timeout waiting for exec to shutdown\")\n\t}\n\n\toutput := testExecCmd.stdout.String()\n\trequire.NotZero(len(output))\n\ttime.Sleep(2 * time.Second)\n\toutput1 := testExecCmd.stdout.String()\n\trequire.Equal(len(output), len(output1))\n}\n\nfunc TestExecutor_cmdDevices(t *testing.T) {\n\tinput := []*drivers.DeviceConfig{\n\t\t{\n\t\t\tHostPath: \"\/dev\/null\",\n\t\t\tTaskPath: \"\/task\/dev\/null\",\n\t\t\tPermissions: \"rwm\",\n\t\t},\n\t}\n\n\texpected := &lconfigs.Device{\n\t\tPath: \"\/task\/dev\/null\",\n\t\tType: 99,\n\t\tMajor: 1,\n\t\tMinor: 3,\n\t\tPermissions: \"rwm\",\n\t}\n\n\tfound, err := cmdDevices(input)\n\trequire.NoError(t, err)\n\trequire.Len(t, found, 1)\n\n\t\/\/ ignore file permission and ownership\n\t\/\/ as they are host specific potentially\n\td := found[0]\n\td.FileMode = 0\n\td.Uid = 0\n\td.Gid = 0\n\n\trequire.EqualValues(t, expected, d)\n}\n\nfunc TestExecutor_cmdMounts(t *testing.T) {\n\tinput := []*drivers.MountConfig{\n\t\t{\n\t\t\tHostPath: \"\/host\/path-ro\",\n\t\t\tTaskPath: \"\/task\/path-ro\",\n\t\t\tReadonly: true,\n\t\t},\n\t\t{\n\t\t\tHostPath: \"\/host\/path-rw\",\n\t\t\tTaskPath: \"\/task\/path-rw\",\n\t\t\tReadonly: false,\n\t\t},\n\t}\n\n\texpected := []*lconfigs.Mount{\n\t\t{\n\t\t\tSource: \"\/host\/path-ro\",\n\t\t\tDestination: \"\/task\/path-ro\",\n\t\t\tFlags: unix.MS_BIND | unix.MS_RDONLY,\n\t\t\tDevice: \"bind\",\n\t\t},\n\t\t{\n\t\t\tSource: \"\/host\/path-rw\",\n\t\t\tDestination: \"\/task\/path-rw\",\n\t\t\tFlags: unix.MS_BIND,\n\t\t\tDevice: \"bind\",\n\t\t},\n\t}\n\n\trequire.EqualValues(t, expected, cmdMounts(input))\n}\nuse \/bin\/bashpackage executor\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/nomad\/client\/allocdir\"\n\t\"github.com\/hashicorp\/nomad\/client\/taskenv\"\n\t\"github.com\/hashicorp\/nomad\/client\/testutil\"\n\t\"github.com\/hashicorp\/nomad\/helper\/testlog\"\n\t\"github.com\/hashicorp\/nomad\/nomad\/mock\"\n\t\"github.com\/hashicorp\/nomad\/plugins\/drivers\"\n\ttu \"github.com\/hashicorp\/nomad\/testutil\"\n\tlconfigs \"github.com\/opencontainers\/runc\/libcontainer\/configs\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"golang.org\/x\/sys\/unix\"\n)\n\nfunc init() {\n\texecutorFactories[\"LibcontainerExecutor\"] = libcontainerFactory\n}\n\nvar libcontainerFactory = executorFactory{\n\tnew: NewExecutorWithIsolation,\n\tconfigureExecCmd: func(t *testing.T, cmd *ExecCommand) {\n\t\tcmd.ResourceLimits = true\n\t\tsetupRootfs(t, cmd.TaskDir)\n\t},\n}\n\n\/\/ testExecutorContextWithChroot returns an ExecutorContext and AllocDir with\n\/\/ chroot. Use testExecutorContext if you don't need a chroot.\n\/\/\n\/\/ The caller is responsible for calling AllocDir.Destroy() to cleanup.\nfunc testExecutorCommandWithChroot(t *testing.T) *testExecCmd {\n\tchrootEnv := map[string]string{\n\t\t\"\/etc\/ld.so.cache\": \"\/etc\/ld.so.cache\",\n\t\t\"\/etc\/ld.so.conf\": \"\/etc\/ld.so.conf\",\n\t\t\"\/etc\/ld.so.conf.d\": \"\/etc\/ld.so.conf.d\",\n\t\t\"\/lib\": \"\/lib\",\n\t\t\"\/lib64\": \"\/lib64\",\n\t\t\"\/usr\/lib\": \"\/usr\/lib\",\n\t\t\"\/bin\/ls\": \"\/bin\/ls\",\n\t\t\"\/bin\/cat\": \"\/bin\/cat\",\n\t\t\"\/bin\/echo\": \"\/bin\/echo\",\n\t\t\"\/bin\/bash\": \"\/bin\/bash\",\n\t\t\"\/bin\/sleep\": \"\/bin\/sleep\",\n\t\t\"\/foobar\": \"\/does\/not\/exist\",\n\t}\n\n\talloc := mock.Alloc()\n\ttask := alloc.Job.TaskGroups[0].Tasks[0]\n\ttaskEnv := taskenv.NewBuilder(mock.Node(), alloc, task, \"global\").Build()\n\n\tallocDir := allocdir.NewAllocDir(testlog.HCLogger(t), filepath.Join(os.TempDir(), alloc.ID))\n\tif err := allocDir.Build(); err != nil {\n\t\tt.Fatalf(\"AllocDir.Build() failed: %v\", err)\n\t}\n\tif err := allocDir.NewTaskDir(task.Name).Build(true, chrootEnv); err != nil {\n\t\tallocDir.Destroy()\n\t\tt.Fatalf(\"allocDir.NewTaskDir(%q) failed: %v\", task.Name, err)\n\t}\n\ttd := allocDir.TaskDirs[task.Name]\n\tcmd := &ExecCommand{\n\t\tEnv: taskEnv.List(),\n\t\tTaskDir: td.Dir,\n\t\tResources: &drivers.Resources{\n\t\t\tNomadResources: alloc.AllocatedResources.Tasks[task.Name],\n\t\t},\n\t}\n\n\ttestCmd := &testExecCmd{\n\t\tcommand: cmd,\n\t\tallocDir: allocDir,\n\t}\n\tconfigureTLogging(t, testCmd)\n\treturn testCmd\n}\n\nfunc TestExecutor_IsolationAndConstraints(t *testing.T) {\n\tt.Parallel()\n\trequire := require.New(t)\n\ttestutil.ExecCompatible(t)\n\n\ttestExecCmd := testExecutorCommandWithChroot(t)\n\texecCmd, allocDir := testExecCmd.command, testExecCmd.allocDir\n\texecCmd.Cmd = \"\/bin\/ls\"\n\texecCmd.Args = []string{\"-F\", \"\/\", \"\/etc\/\"}\n\tdefer allocDir.Destroy()\n\n\texecCmd.ResourceLimits = true\n\n\texecutor := NewExecutorWithIsolation(testlog.HCLogger(t))\n\tdefer executor.Shutdown(\"SIGKILL\", 0)\n\n\tps, err := executor.Launch(execCmd)\n\trequire.NoError(err)\n\trequire.NotZero(ps.Pid)\n\n\tstate, err := executor.Wait(context.Background())\n\trequire.NoError(err)\n\trequire.Zero(state.ExitCode)\n\n\t\/\/ Check if the resource constraints were applied\n\tif lexec, ok := executor.(*LibcontainerExecutor); ok {\n\t\tstate, err := lexec.container.State()\n\t\trequire.NoError(err)\n\n\t\tmemLimits := filepath.Join(state.CgroupPaths[\"memory\"], \"memory.limit_in_bytes\")\n\t\tdata, err := ioutil.ReadFile(memLimits)\n\t\trequire.NoError(err)\n\n\t\texpectedMemLim := strconv.Itoa(int(execCmd.Resources.NomadResources.Memory.MemoryMB * 1024 * 1024))\n\t\tactualMemLim := strings.TrimSpace(string(data))\n\t\trequire.Equal(actualMemLim, expectedMemLim)\n\t\trequire.NoError(executor.Shutdown(\"\", 0))\n\t\texecutor.Wait(context.Background())\n\n\t\t\/\/ Check if Nomad has actually removed the cgroups\n\t\ttu.WaitForResult(func() (bool, error) {\n\t\t\t_, err = os.Stat(memLimits)\n\t\t\tif err == nil {\n\t\t\t\treturn false, fmt.Errorf(\"expected an error from os.Stat %s\", memLimits)\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}, func(err error) { t.Error(err) })\n\n\t}\n\texpected := `\/:\nalloc\/\nbin\/\ndev\/\netc\/\nlib\/\nlib64\/\nlocal\/\nproc\/\nsecrets\/\nsys\/\ntmp\/\nusr\/\n\n\/etc\/:\nld.so.cache\nld.so.conf\nld.so.conf.d\/`\n\ttu.WaitForResult(func() (bool, error) {\n\t\toutput := testExecCmd.stdout.String()\n\t\tact := strings.TrimSpace(string(output))\n\t\tif act != expected {\n\t\t\treturn false, fmt.Errorf(\"Command output incorrectly: want %v; got %v\", expected, act)\n\t\t}\n\t\treturn true, nil\n\t}, func(err error) { t.Error(err) })\n}\n\nfunc TestUniversalExecutor_LookupTaskBin(t *testing.T) {\n\tt.Parallel()\n\trequire := require.New(t)\n\n\t\/\/ Create a temp dir\n\ttmpDir, err := ioutil.TempDir(\"\", \"\")\n\trequire.Nil(err)\n\tdefer os.Remove(tmpDir)\n\n\t\/\/ Create the command\n\tcmd := &ExecCommand{Env: []string{\"PATH=\/bin\"}, TaskDir: tmpDir}\n\n\t\/\/ Make a foo subdir\n\tos.MkdirAll(filepath.Join(tmpDir, \"foo\"), 0700)\n\n\t\/\/ Write a file under foo\n\tfilePath := filepath.Join(tmpDir, \"foo\", \"tmp.txt\")\n\terr = ioutil.WriteFile(filePath, []byte{1, 2}, os.ModeAppend)\n\trequire.NoError(err)\n\n\t\/\/ Lookout with an absolute path to the binary\n\tcmd.Cmd = \"\/foo\/tmp.txt\"\n\t_, err = lookupTaskBin(cmd)\n\trequire.NoError(err)\n\n\t\/\/ Write a file under local subdir\n\tos.MkdirAll(filepath.Join(tmpDir, \"local\"), 0700)\n\tfilePath2 := filepath.Join(tmpDir, \"local\", \"tmp.txt\")\n\tioutil.WriteFile(filePath2, []byte{1, 2}, os.ModeAppend)\n\n\t\/\/ Lookup with file name, should find the one we wrote above\n\tcmd.Cmd = \"tmp.txt\"\n\t_, err = lookupTaskBin(cmd)\n\trequire.NoError(err)\n\n\t\/\/ Lookup a host absolute path\n\tcmd.Cmd = \"\/bin\/sh\"\n\t_, err = lookupTaskBin(cmd)\n\trequire.Error(err)\n}\n\n\/\/ Exec Launch looks for the binary only inside the chroot\nfunc TestExecutor_EscapeContainer(t *testing.T) {\n\tt.Parallel()\n\trequire := require.New(t)\n\ttestutil.ExecCompatible(t)\n\n\ttestExecCmd := testExecutorCommandWithChroot(t)\n\texecCmd, allocDir := testExecCmd.command, testExecCmd.allocDir\n\texecCmd.Cmd = \"\/bin\/kill\" \/\/ missing from the chroot container\n\tdefer allocDir.Destroy()\n\n\texecCmd.ResourceLimits = true\n\n\texecutor := NewExecutorWithIsolation(testlog.HCLogger(t))\n\tdefer executor.Shutdown(\"SIGKILL\", 0)\n\n\t_, err := executor.Launch(execCmd)\n\trequire.Error(err)\n\trequire.Regexp(\"^file \/bin\/kill not found under path\", err)\n\n\t\/\/ Bare files are looked up using the system path, inside the container\n\tallocDir.Destroy()\n\ttestExecCmd = testExecutorCommandWithChroot(t)\n\texecCmd, allocDir = testExecCmd.command, testExecCmd.allocDir\n\texecCmd.Cmd = \"kill\"\n\t_, err = executor.Launch(execCmd)\n\trequire.Error(err)\n\trequire.Regexp(\"^file kill not found under path\", err)\n\n\tallocDir.Destroy()\n\ttestExecCmd = testExecutorCommandWithChroot(t)\n\texecCmd, allocDir = testExecCmd.command, testExecCmd.allocDir\n\texecCmd.Cmd = \"echo\"\n\t_, err = executor.Launch(execCmd)\n\trequire.NoError(err)\n}\n\nfunc TestExecutor_Capabilities(t *testing.T) {\n\tt.Parallel()\n\trequire := require.New(t)\n\ttestutil.ExecCompatible(t)\n\n\ttestExecCmd := testExecutorCommandWithChroot(t)\n\texecCmd, allocDir := testExecCmd.command, testExecCmd.allocDir\n\tdefer allocDir.Destroy()\n\n\texecCmd.ResourceLimits = true\n\texecCmd.Cmd = \"\/bin\/bash\"\n\texecCmd.Args = []string{\"-c\", \"cat \/proc\/$$\/cmdline\"}\n\n\texecutor := NewExecutorWithIsolation(testlog.HCLogger(t))\n\tdefer executor.Shutdown(\"SIGKILL\", 0)\n\n\t_, err := executor.Launch(execCmd)\n\trequire.NoError(err)\n\n\tch := make(chan interface{})\n\tgo func() {\n\t\texecutor.Wait(context.Background())\n\t\tclose(ch)\n\t}()\n\n\tselect {\n\tcase <-ch:\n\t\t\/\/ all good\n\tcase <-time.After(5 * time.Second):\n\t\trequire.Fail(\"timeout waiting for exec to shutdown\")\n\t}\n\n\toutput := testExecCmd.stdout.String()\n\trequire.Empty(output)\n\n}\n\nfunc TestExecutor_ClientCleanup(t *testing.T) {\n\tt.Parallel()\n\ttestutil.ExecCompatible(t)\n\trequire := require.New(t)\n\n\ttestExecCmd := testExecutorCommandWithChroot(t)\n\texecCmd, allocDir := testExecCmd.command, testExecCmd.allocDir\n\tdefer allocDir.Destroy()\n\n\texecutor := NewExecutorWithIsolation(testlog.HCLogger(t))\n\tdefer executor.Shutdown(\"\", 0)\n\n\t\/\/ Need to run a command which will produce continuous output but not\n\t\/\/ too quickly to ensure executor.Exit() stops the process.\n\texecCmd.Cmd = \"\/bin\/bash\"\n\texecCmd.Args = []string{\"-c\", \"while true; do \/bin\/echo X; \/bin\/sleep 1; done\"}\n\texecCmd.ResourceLimits = true\n\n\tps, err := executor.Launch(execCmd)\n\n\trequire.NoError(err)\n\trequire.NotZero(ps.Pid)\n\ttime.Sleep(500 * time.Millisecond)\n\trequire.NoError(executor.Shutdown(\"SIGINT\", 100*time.Millisecond))\n\n\tch := make(chan interface{})\n\tgo func() {\n\t\texecutor.Wait(context.Background())\n\t\tclose(ch)\n\t}()\n\n\tselect {\n\tcase <-ch:\n\t\t\/\/ all good\n\tcase <-time.After(5 * time.Second):\n\t\trequire.Fail(\"timeout waiting for exec to shutdown\")\n\t}\n\n\toutput := testExecCmd.stdout.String()\n\trequire.NotZero(len(output))\n\ttime.Sleep(2 * time.Second)\n\toutput1 := testExecCmd.stdout.String()\n\trequire.Equal(len(output), len(output1))\n}\n\nfunc TestExecutor_cmdDevices(t *testing.T) {\n\tinput := []*drivers.DeviceConfig{\n\t\t{\n\t\t\tHostPath: \"\/dev\/null\",\n\t\t\tTaskPath: \"\/task\/dev\/null\",\n\t\t\tPermissions: \"rwm\",\n\t\t},\n\t}\n\n\texpected := &lconfigs.Device{\n\t\tPath: \"\/task\/dev\/null\",\n\t\tType: 99,\n\t\tMajor: 1,\n\t\tMinor: 3,\n\t\tPermissions: \"rwm\",\n\t}\n\n\tfound, err := cmdDevices(input)\n\trequire.NoError(t, err)\n\trequire.Len(t, found, 1)\n\n\t\/\/ ignore file permission and ownership\n\t\/\/ as they are host specific potentially\n\td := found[0]\n\td.FileMode = 0\n\td.Uid = 0\n\td.Gid = 0\n\n\trequire.EqualValues(t, expected, d)\n}\n\nfunc TestExecutor_cmdMounts(t *testing.T) {\n\tinput := []*drivers.MountConfig{\n\t\t{\n\t\t\tHostPath: \"\/host\/path-ro\",\n\t\t\tTaskPath: \"\/task\/path-ro\",\n\t\t\tReadonly: true,\n\t\t},\n\t\t{\n\t\t\tHostPath: \"\/host\/path-rw\",\n\t\t\tTaskPath: \"\/task\/path-rw\",\n\t\t\tReadonly: false,\n\t\t},\n\t}\n\n\texpected := []*lconfigs.Mount{\n\t\t{\n\t\t\tSource: \"\/host\/path-ro\",\n\t\t\tDestination: \"\/task\/path-ro\",\n\t\t\tFlags: unix.MS_BIND | unix.MS_RDONLY,\n\t\t\tDevice: \"bind\",\n\t\t},\n\t\t{\n\t\t\tSource: \"\/host\/path-rw\",\n\t\t\tDestination: \"\/task\/path-rw\",\n\t\t\tFlags: unix.MS_BIND,\n\t\t\tDevice: \"bind\",\n\t\t},\n\t}\n\n\trequire.EqualValues(t, expected, cmdMounts(input))\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage csr\n\nimport (\n\t\"crypto\"\n\t\"crypto\/sha512\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"encoding\/base64\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"k8s.io\/klog\"\n\n\tcertificates \"k8s.io\/api\/certificates\/v1beta1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n\tcertificatesclient \"k8s.io\/client-go\/kubernetes\/typed\/certificates\/v1beta1\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\twatchtools \"k8s.io\/client-go\/tools\/watch\"\n\tcertutil \"k8s.io\/client-go\/util\/cert\"\n)\n\n\/\/ RequestNodeCertificate will create a certificate signing request for a node\n\/\/ (Organization and CommonName for the CSR will be set as expected for node\n\/\/ certificates) and send it to API server, then it will watch the object's\n\/\/ status, once approved by API server, it will return the API server's issued\n\/\/ certificate (pem-encoded). If there is any errors, or the watch timeouts, it\n\/\/ will return an error. This is intended for use on nodes (kubelet and\n\/\/ kubeadm).\nfunc RequestNodeCertificate(client certificatesclient.CertificateSigningRequestInterface, privateKeyData []byte, nodeName types.NodeName) (certData []byte, err error) {\n\tsubject := &pkix.Name{\n\t\tOrganization: []string{\"system:nodes\"},\n\t\tCommonName: \"system:node:\" + string(nodeName),\n\t}\n\n\tprivateKey, err := certutil.ParsePrivateKeyPEM(privateKeyData)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid private key for certificate request: %v\", err)\n\t}\n\tcsrData, err := certutil.MakeCSR(privateKey, subject, nil, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to generate certificate request: %v\", err)\n\t}\n\n\tusages := []certificates.KeyUsage{\n\t\tcertificates.UsageDigitalSignature,\n\t\tcertificates.UsageKeyEncipherment,\n\t\tcertificates.UsageClientAuth,\n\t}\n\tname := digestedName(privateKeyData, subject, usages)\n\treq, err := RequestCertificate(client, csrData, name, usages, privateKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn WaitForCertificate(client, req, 3600*time.Second)\n}\n\n\/\/ RequestCertificate will either use an existing (if this process has run\n\/\/ before but not to completion) or create a certificate signing request using the\n\/\/ PEM encoded CSR and send it to API server, then it will watch the object's\n\/\/ status, once approved by API server, it will return the API server's issued\n\/\/ certificate (pem-encoded). If there is any errors, or the watch timeouts, it\n\/\/ will return an error.\nfunc RequestCertificate(client certificatesclient.CertificateSigningRequestInterface, csrData []byte, name string, usages []certificates.KeyUsage, privateKey interface{}) (req *certificates.CertificateSigningRequest, err error) {\n\tcsr := &certificates.CertificateSigningRequest{\n\t\t\/\/ Username, UID, Groups will be injected by API server.\n\t\tTypeMeta: metav1.TypeMeta{Kind: \"CertificateSigningRequest\"},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tSpec: certificates.CertificateSigningRequestSpec{\n\t\t\tRequest: csrData,\n\t\t\tUsages: usages,\n\t\t},\n\t}\n\tif len(csr.Name) == 0 {\n\t\tcsr.GenerateName = \"csr-\"\n\t}\n\n\treq, err = client.Create(csr)\n\tswitch {\n\tcase err == nil:\n\tcase errors.IsAlreadyExists(err) && len(name) > 0:\n\t\tklog.Infof(\"csr for this node already exists, reusing\")\n\t\treq, err = client.Get(name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn nil, formatError(\"cannot retrieve certificate signing request: %v\", err)\n\t\t}\n\t\tif err := ensureCompatible(req, csr, privateKey); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"retrieved csr is not compatible: %v\", err)\n\t\t}\n\t\tklog.Infof(\"csr for this node is still valid\")\n\tdefault:\n\t\treturn nil, formatError(\"cannot create certificate signing request: %v\", err)\n\t}\n\treturn req, nil\n}\n\n\/\/ WaitForCertificate waits for a certificate to be issued until timeout, or returns an error.\nfunc WaitForCertificate(client certificatesclient.CertificateSigningRequestInterface, req *certificates.CertificateSigningRequest, timeout time.Duration) (certData []byte, err error) {\n\tfieldSelector := fields.OneTermEqualSelector(\"metadata.name\", req.Name).String()\n\n\tevent, err := watchtools.ListWatchUntil(\n\t\ttimeout,\n\t\t&cache.ListWatch{\n\t\t\tListFunc: func(options metav1.ListOptions) (runtime.Object, error) {\n\t\t\t\toptions.FieldSelector = fieldSelector\n\t\t\t\treturn client.List(options)\n\t\t\t},\n\t\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\t\toptions.FieldSelector = fieldSelector\n\t\t\t\treturn client.Watch(options)\n\t\t\t},\n\t\t},\n\t\tfunc(event watch.Event) (bool, error) {\n\t\t\tswitch event.Type {\n\t\t\tcase watch.Modified, watch.Added:\n\t\t\tcase watch.Deleted:\n\t\t\t\treturn false, fmt.Errorf(\"csr %q was deleted\", req.Name)\n\t\t\tdefault:\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\tcsr := event.Object.(*certificates.CertificateSigningRequest)\n\t\t\tif csr.UID != req.UID {\n\t\t\t\treturn false, fmt.Errorf(\"csr %q changed UIDs\", csr.Name)\n\t\t\t}\n\t\t\tfor _, c := range csr.Status.Conditions {\n\t\t\t\tif c.Type == certificates.CertificateDenied {\n\t\t\t\t\treturn false, fmt.Errorf(\"certificate signing request is not approved, reason: %v, message: %v\", c.Reason, c.Message)\n\t\t\t\t}\n\t\t\t\tif c.Type == certificates.CertificateApproved && csr.Status.Certificate != nil {\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false, nil\n\t\t},\n\t)\n\tif err == wait.ErrWaitTimeout {\n\t\treturn nil, wait.ErrWaitTimeout\n\t}\n\tif err != nil {\n\t\treturn nil, formatError(\"cannot watch on the certificate signing request: %v\", err)\n\t}\n\n\treturn event.Object.(*certificates.CertificateSigningRequest).Status.Certificate, nil\n}\n\n\/\/ This digest should include all the relevant pieces of the CSR we care about.\n\/\/ We can't direcly hash the serialized CSR because of random padding that we\n\/\/ regenerate every loop and we include usages which are not contained in the\n\/\/ CSR. This needs to be kept up to date as we add new fields to the node\n\/\/ certificates and with ensureCompatible.\nfunc digestedName(privateKeyData []byte, subject *pkix.Name, usages []certificates.KeyUsage) string {\n\thash := sha512.New512_256()\n\n\t\/\/ Here we make sure two different inputs can't write the same stream\n\t\/\/ to the hash. This delimiter is not in the base64.URLEncoding\n\t\/\/ alphabet so there is no way to have spill over collisions. Without\n\t\/\/ it 'CN:foo,ORG:bar' hashes to the same value as 'CN:foob,ORG:ar'\n\tconst delimiter = '|'\n\tencode := base64.RawURLEncoding.EncodeToString\n\n\twrite := func(data []byte) {\n\t\thash.Write([]byte(encode(data)))\n\t\thash.Write([]byte{delimiter})\n\t}\n\n\twrite(privateKeyData)\n\twrite([]byte(subject.CommonName))\n\tfor _, v := range subject.Organization {\n\t\twrite([]byte(v))\n\t}\n\tfor _, v := range usages {\n\t\twrite([]byte(v))\n\t}\n\n\treturn \"node-csr-\" + encode(hash.Sum(nil))\n}\n\n\/\/ ensureCompatible ensures that a CSR object is compatible with an original CSR\nfunc ensureCompatible(new, orig *certificates.CertificateSigningRequest, privateKey interface{}) error {\n\tnewCsr, err := ParseCSR(new)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to parse new csr: %v\", err)\n\t}\n\torigCsr, err := ParseCSR(orig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to parse original csr: %v\", err)\n\t}\n\tif !reflect.DeepEqual(newCsr.Subject, origCsr.Subject) {\n\t\treturn fmt.Errorf(\"csr subjects differ: new: %#v, orig: %#v\", newCsr.Subject, origCsr.Subject)\n\t}\n\tsigner, ok := privateKey.(crypto.Signer)\n\tif !ok {\n\t\treturn fmt.Errorf(\"privateKey is not a signer\")\n\t}\n\tnewCsr.PublicKey = signer.Public()\n\tif err := newCsr.CheckSignature(); err != nil {\n\t\treturn fmt.Errorf(\"error validating signature new CSR against old key: %v\", err)\n\t}\n\tif len(new.Status.Certificate) > 0 {\n\t\tcerts, err := certutil.ParseCertsPEM(new.Status.Certificate)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error parsing signed certificate for CSR: %v\", err)\n\t\t}\n\t\tnow := time.Now()\n\t\tfor _, cert := range certs {\n\t\t\tif now.After(cert.NotAfter) {\n\t\t\t\treturn fmt.Errorf(\"one of the certificates for the CSR has expired: %s\", cert.NotAfter)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ formatError preserves the type of an API message but alters the message. Expects\n\/\/ a single argument format string, and returns the wrapped error.\nfunc formatError(format string, err error) error {\n\tif s, ok := err.(errors.APIStatus); ok {\n\t\tse := &errors.StatusError{ErrStatus: s.Status()}\n\t\tse.ErrStatus.Message = fmt.Sprintf(format, se.ErrStatus.Message)\n\t\treturn se\n\t}\n\treturn fmt.Errorf(format, err)\n}\n\n\/\/ ParseCSR extracts the CSR from the API object and decodes it.\nfunc ParseCSR(obj *certificates.CertificateSigningRequest) (*x509.CertificateRequest, error) {\n\t\/\/ extract PEM from request object\n\tpemBytes := obj.Spec.Request\n\tblock, _ := pem.Decode(pemBytes)\n\tif block == nil || block.Type != \"CERTIFICATE REQUEST\" {\n\t\treturn nil, fmt.Errorf(\"PEM block type must be CERTIFICATE REQUEST\")\n\t}\n\tcsr, err := x509.ParseCertificateRequest(block.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn csr, nil\n}\nUnexport csr.ParseCSR\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage csr\n\nimport (\n\t\"crypto\"\n\t\"crypto\/sha512\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"encoding\/base64\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"k8s.io\/klog\"\n\n\tcertificates \"k8s.io\/api\/certificates\/v1beta1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n\tcertificatesclient \"k8s.io\/client-go\/kubernetes\/typed\/certificates\/v1beta1\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\twatchtools \"k8s.io\/client-go\/tools\/watch\"\n\tcertutil \"k8s.io\/client-go\/util\/cert\"\n)\n\n\/\/ RequestNodeCertificate will create a certificate signing request for a node\n\/\/ (Organization and CommonName for the CSR will be set as expected for node\n\/\/ certificates) and send it to API server, then it will watch the object's\n\/\/ status, once approved by API server, it will return the API server's issued\n\/\/ certificate (pem-encoded). If there is any errors, or the watch timeouts, it\n\/\/ will return an error. This is intended for use on nodes (kubelet and\n\/\/ kubeadm).\nfunc RequestNodeCertificate(client certificatesclient.CertificateSigningRequestInterface, privateKeyData []byte, nodeName types.NodeName) (certData []byte, err error) {\n\tsubject := &pkix.Name{\n\t\tOrganization: []string{\"system:nodes\"},\n\t\tCommonName: \"system:node:\" + string(nodeName),\n\t}\n\n\tprivateKey, err := certutil.ParsePrivateKeyPEM(privateKeyData)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid private key for certificate request: %v\", err)\n\t}\n\tcsrData, err := certutil.MakeCSR(privateKey, subject, nil, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to generate certificate request: %v\", err)\n\t}\n\n\tusages := []certificates.KeyUsage{\n\t\tcertificates.UsageDigitalSignature,\n\t\tcertificates.UsageKeyEncipherment,\n\t\tcertificates.UsageClientAuth,\n\t}\n\tname := digestedName(privateKeyData, subject, usages)\n\treq, err := RequestCertificate(client, csrData, name, usages, privateKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn WaitForCertificate(client, req, 3600*time.Second)\n}\n\n\/\/ RequestCertificate will either use an existing (if this process has run\n\/\/ before but not to completion) or create a certificate signing request using the\n\/\/ PEM encoded CSR and send it to API server, then it will watch the object's\n\/\/ status, once approved by API server, it will return the API server's issued\n\/\/ certificate (pem-encoded). If there is any errors, or the watch timeouts, it\n\/\/ will return an error.\nfunc RequestCertificate(client certificatesclient.CertificateSigningRequestInterface, csrData []byte, name string, usages []certificates.KeyUsage, privateKey interface{}) (req *certificates.CertificateSigningRequest, err error) {\n\tcsr := &certificates.CertificateSigningRequest{\n\t\t\/\/ Username, UID, Groups will be injected by API server.\n\t\tTypeMeta: metav1.TypeMeta{Kind: \"CertificateSigningRequest\"},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tSpec: certificates.CertificateSigningRequestSpec{\n\t\t\tRequest: csrData,\n\t\t\tUsages: usages,\n\t\t},\n\t}\n\tif len(csr.Name) == 0 {\n\t\tcsr.GenerateName = \"csr-\"\n\t}\n\n\treq, err = client.Create(csr)\n\tswitch {\n\tcase err == nil:\n\tcase errors.IsAlreadyExists(err) && len(name) > 0:\n\t\tklog.Infof(\"csr for this node already exists, reusing\")\n\t\treq, err = client.Get(name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn nil, formatError(\"cannot retrieve certificate signing request: %v\", err)\n\t\t}\n\t\tif err := ensureCompatible(req, csr, privateKey); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"retrieved csr is not compatible: %v\", err)\n\t\t}\n\t\tklog.Infof(\"csr for this node is still valid\")\n\tdefault:\n\t\treturn nil, formatError(\"cannot create certificate signing request: %v\", err)\n\t}\n\treturn req, nil\n}\n\n\/\/ WaitForCertificate waits for a certificate to be issued until timeout, or returns an error.\nfunc WaitForCertificate(client certificatesclient.CertificateSigningRequestInterface, req *certificates.CertificateSigningRequest, timeout time.Duration) (certData []byte, err error) {\n\tfieldSelector := fields.OneTermEqualSelector(\"metadata.name\", req.Name).String()\n\n\tevent, err := watchtools.ListWatchUntil(\n\t\ttimeout,\n\t\t&cache.ListWatch{\n\t\t\tListFunc: func(options metav1.ListOptions) (runtime.Object, error) {\n\t\t\t\toptions.FieldSelector = fieldSelector\n\t\t\t\treturn client.List(options)\n\t\t\t},\n\t\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\t\toptions.FieldSelector = fieldSelector\n\t\t\t\treturn client.Watch(options)\n\t\t\t},\n\t\t},\n\t\tfunc(event watch.Event) (bool, error) {\n\t\t\tswitch event.Type {\n\t\t\tcase watch.Modified, watch.Added:\n\t\t\tcase watch.Deleted:\n\t\t\t\treturn false, fmt.Errorf(\"csr %q was deleted\", req.Name)\n\t\t\tdefault:\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\tcsr := event.Object.(*certificates.CertificateSigningRequest)\n\t\t\tif csr.UID != req.UID {\n\t\t\t\treturn false, fmt.Errorf(\"csr %q changed UIDs\", csr.Name)\n\t\t\t}\n\t\t\tfor _, c := range csr.Status.Conditions {\n\t\t\t\tif c.Type == certificates.CertificateDenied {\n\t\t\t\t\treturn false, fmt.Errorf(\"certificate signing request is not approved, reason: %v, message: %v\", c.Reason, c.Message)\n\t\t\t\t}\n\t\t\t\tif c.Type == certificates.CertificateApproved && csr.Status.Certificate != nil {\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false, nil\n\t\t},\n\t)\n\tif err == wait.ErrWaitTimeout {\n\t\treturn nil, wait.ErrWaitTimeout\n\t}\n\tif err != nil {\n\t\treturn nil, formatError(\"cannot watch on the certificate signing request: %v\", err)\n\t}\n\n\treturn event.Object.(*certificates.CertificateSigningRequest).Status.Certificate, nil\n}\n\n\/\/ This digest should include all the relevant pieces of the CSR we care about.\n\/\/ We can't direcly hash the serialized CSR because of random padding that we\n\/\/ regenerate every loop and we include usages which are not contained in the\n\/\/ CSR. This needs to be kept up to date as we add new fields to the node\n\/\/ certificates and with ensureCompatible.\nfunc digestedName(privateKeyData []byte, subject *pkix.Name, usages []certificates.KeyUsage) string {\n\thash := sha512.New512_256()\n\n\t\/\/ Here we make sure two different inputs can't write the same stream\n\t\/\/ to the hash. This delimiter is not in the base64.URLEncoding\n\t\/\/ alphabet so there is no way to have spill over collisions. Without\n\t\/\/ it 'CN:foo,ORG:bar' hashes to the same value as 'CN:foob,ORG:ar'\n\tconst delimiter = '|'\n\tencode := base64.RawURLEncoding.EncodeToString\n\n\twrite := func(data []byte) {\n\t\thash.Write([]byte(encode(data)))\n\t\thash.Write([]byte{delimiter})\n\t}\n\n\twrite(privateKeyData)\n\twrite([]byte(subject.CommonName))\n\tfor _, v := range subject.Organization {\n\t\twrite([]byte(v))\n\t}\n\tfor _, v := range usages {\n\t\twrite([]byte(v))\n\t}\n\n\treturn \"node-csr-\" + encode(hash.Sum(nil))\n}\n\n\/\/ ensureCompatible ensures that a CSR object is compatible with an original CSR\nfunc ensureCompatible(new, orig *certificates.CertificateSigningRequest, privateKey interface{}) error {\n\tnewCSR, err := parseCSR(new)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to parse new csr: %v\", err)\n\t}\n\torigCSR, err := parseCSR(orig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to parse original csr: %v\", err)\n\t}\n\tif !reflect.DeepEqual(newCSR.Subject, origCSR.Subject) {\n\t\treturn fmt.Errorf(\"csr subjects differ: new: %#v, orig: %#v\", newCSR.Subject, origCSR.Subject)\n\t}\n\tsigner, ok := privateKey.(crypto.Signer)\n\tif !ok {\n\t\treturn fmt.Errorf(\"privateKey is not a signer\")\n\t}\n\tnewCSR.PublicKey = signer.Public()\n\tif err := newCSR.CheckSignature(); err != nil {\n\t\treturn fmt.Errorf(\"error validating signature new CSR against old key: %v\", err)\n\t}\n\tif len(new.Status.Certificate) > 0 {\n\t\tcerts, err := certutil.ParseCertsPEM(new.Status.Certificate)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error parsing signed certificate for CSR: %v\", err)\n\t\t}\n\t\tnow := time.Now()\n\t\tfor _, cert := range certs {\n\t\t\tif now.After(cert.NotAfter) {\n\t\t\t\treturn fmt.Errorf(\"one of the certificates for the CSR has expired: %s\", cert.NotAfter)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ formatError preserves the type of an API message but alters the message. Expects\n\/\/ a single argument format string, and returns the wrapped error.\nfunc formatError(format string, err error) error {\n\tif s, ok := err.(errors.APIStatus); ok {\n\t\tse := &errors.StatusError{ErrStatus: s.Status()}\n\t\tse.ErrStatus.Message = fmt.Sprintf(format, se.ErrStatus.Message)\n\t\treturn se\n\t}\n\treturn fmt.Errorf(format, err)\n}\n\n\/\/ parseCSR extracts the CSR from the API object and decodes it.\nfunc parseCSR(obj *certificates.CertificateSigningRequest) (*x509.CertificateRequest, error) {\n\t\/\/ extract PEM from request object\n\tblock, _ := pem.Decode(obj.Spec.Request)\n\tif block == nil || block.Type != \"CERTIFICATE REQUEST\" {\n\t\treturn nil, fmt.Errorf(\"PEM block type must be CERTIFICATE REQUEST\")\n\t}\n\treturn x509.ParseCertificateRequest(block.Bytes)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"v.io\/x\/devtools\/internal\/collect\"\n\t\"v.io\/x\/devtools\/internal\/test\"\n\t\"v.io\/x\/devtools\/internal\/tool\"\n\t\"v.io\/x\/devtools\/internal\/util\"\n\t\"v.io\/x\/devtools\/internal\/xunit\"\n)\n\nvar (\n\tmirrors = []Mirror{\n\t\tMirror{\n\t\t\tname: \"blue\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/roadmap.blue\",\n\t\t\tgithub: \"git@github.com:veyron\/blue.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"browser\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.projects.browser\",\n\t\t\tgithub: \"git@github.com:vanadium\/browser.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"go.v23\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.go.v23\",\n\t\t\tgithub: \"git@github.com:vanadium\/go.v23.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"go.devtools\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.go.x.devtools\",\n\t\t\tgithub: \"git@github.com:vanadium\/go.devtools.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"go.lib\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.go.x.lib\",\n\t\t\tgithub: \"git@github.com:vanadium\/go.lib.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"go.ref\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.go.x.ref\",\n\t\t\tgithub: \"git@github.com:vanadium\/go.ref.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"js\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.js.core\",\n\t\t\tgithub: \"git@github.com:vanadium\/js.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"chat\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.projects.chat\",\n\t\t\tgithub: \"git@github.com:vanadium\/chat.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"media-sharing\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.projects.media-sharing\",\n\t\t\tgithub: \"git@github.com:vanadium\/media-sharing.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"pipe2browser\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.projects.pipe2browser\",\n\t\t\tgithub: \"git@github.com:vanadium\/pipe2browser.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"playground\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.projects.playground\",\n\t\t\tgithub: \"git@github.com:vanadium\/playground.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"physical-lock\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.projects.physical-lock\",\n\t\t\tgithub: \"git@github.com:vanadium\/physical-lock.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"reader\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.projects.reader\",\n\t\t\tgithub: \"git@github.com:vanadium\/reader.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"third_party\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/third_party\",\n\t\t\tgithub: \"git@github.com:vanadium\/third_party.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"www\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/www\",\n\t\t\tgithub: \"git@github.com:vanadium\/www.git\",\n\t\t},\n\t}\n)\n\ntype Mirror struct {\n\tname, googlesource, github string\n}\n\n\/\/ vanadiumGitHubMirror mirrors googlesource.com vanadium projects to\n\/\/ github.com.\nfunc vanadiumGitHubMirror(ctx *tool.Context, testName string, _ ...Opt) (_ *test.Result, e error) {\n\t\/\/ Initialize the test\/task.\n\tcleanup, err := initTest(ctx, testName, nil)\n\tif err != nil {\n\t\treturn nil, internalTestError{err, \"Init\"}\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\troot, err := util.V23Root()\n\tif err != nil {\n\t\treturn nil, internalTestError{err, \"V23Root\"}\n\t}\n\n\tprojects := filepath.Join(root, \"projects\")\n\tmode := os.FileMode(0755)\n\tif err := ctx.Run().MkdirAll(projects, mode); err != nil {\n\t\treturn nil, internalTestError{err, \"MkdirAll\"}\n\t}\n\n\tallPassed := true\n\tsuites := []xunit.TestSuite{}\n\tfor _, mirror := range mirrors {\n\t\tsuite, err := sync(ctx, mirror, projects)\n\t\tif err != nil {\n\t\t\treturn nil, internalTestError{err, \"sync\"}\n\t\t}\n\n\t\tallPassed = allPassed && (suite.Failures == 0)\n\t\tsuites = append(suites, *suite)\n\t}\n\n\tif err := xunit.CreateReport(ctx, testName, suites); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !allPassed {\n\t\treturn &test.Result{Status: test.Failed}, nil\n\t}\n\n\treturn &test.Result{Status: test.Passed}, nil\n}\n\nfunc sync(ctx *tool.Context, mirror Mirror, projects string) (*xunit.TestSuite, error) {\n\tsuite := xunit.TestSuite{Name: mirror.name}\n\tdirname := filepath.Join(projects, mirror.name)\n\n\t\/\/ If dirname does not exist `git clone` otherwise `git pull`.\n\tif _, err := os.Stat(dirname); err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn nil, internalTestError{err, \"Stat\"}\n\t\t}\n\n\t\terr := clone(ctx, mirror, projects)\n\t\ttestCase := makeTestCase(\"clone\", err)\n\t\tif err != nil {\n\t\t\tsuite.Failures++\n\t\t}\n\t\tsuite.Cases = append(suite.Cases, *testCase)\n\t} else {\n\t\terr := pull(ctx, mirror, projects)\n\t\ttestCase := makeTestCase(\"pull\", err)\n\t\tif err != nil {\n\t\t\tsuite.Failures++\n\t\t}\n\t\tsuite.Cases = append(suite.Cases, *testCase)\n\t}\n\n\terr := push(ctx, mirror, projects)\n\ttestCase := makeTestCase(\"push\", err)\n\tif err != nil {\n\t\tsuite.Failures++\n\t}\n\tsuite.Cases = append(suite.Cases, *testCase)\n\n\treturn &suite, nil\n}\n\nfunc makeTestCase(action string, err error) *xunit.TestCase {\n\tc := xunit.TestCase{\n\t\tClassname: \"git\",\n\t\tName: action,\n\t}\n\n\tif err != nil {\n\t\tf := xunit.Failure{\n\t\t\tMessage: \"git error\",\n\t\t\tData: fmt.Sprintf(\"%v\", err),\n\t\t}\n\t\tc.Failures = append(c.Failures, f)\n\t}\n\n\treturn &c\n}\n\nfunc clone(ctx *tool.Context, mirror Mirror, projects string) error {\n\tdirname := filepath.Join(projects, mirror.name)\n\treturn ctx.Git().Clone(mirror.googlesource, dirname)\n}\n\nfunc pull(ctx *tool.Context, mirror Mirror, projects string) error {\n\tdirname := filepath.Join(projects, mirror.name)\n\topts := tool.RootDirOpt(dirname)\n\treturn ctx.Git(opts).Pull(\"origin\", \"master\")\n}\n\nfunc push(ctx *tool.Context, mirror Mirror, projects string) error {\n\tdirname := filepath.Join(projects, mirror.name)\n\topts := tool.RootDirOpt(dirname)\n\treturn ctx.Git(opts).Push(mirror.github, \"master\")\n}\nRevert \"TBR: Add mirroring from roadmap.blue to github.com\/veyron\/blue\"\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"v.io\/x\/devtools\/internal\/collect\"\n\t\"v.io\/x\/devtools\/internal\/test\"\n\t\"v.io\/x\/devtools\/internal\/tool\"\n\t\"v.io\/x\/devtools\/internal\/util\"\n\t\"v.io\/x\/devtools\/internal\/xunit\"\n)\n\nvar (\n\tmirrors = []Mirror{\n\t\tMirror{\n\t\t\tname: \"browser\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.projects.browser\",\n\t\t\tgithub: \"git@github.com:vanadium\/browser.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"go.v23\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.go.v23\",\n\t\t\tgithub: \"git@github.com:vanadium\/go.v23.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"go.devtools\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.go.x.devtools\",\n\t\t\tgithub: \"git@github.com:vanadium\/go.devtools.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"go.lib\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.go.x.lib\",\n\t\t\tgithub: \"git@github.com:vanadium\/go.lib.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"go.ref\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.go.x.ref\",\n\t\t\tgithub: \"git@github.com:vanadium\/go.ref.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"js\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.js.core\",\n\t\t\tgithub: \"git@github.com:vanadium\/js.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"chat\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.projects.chat\",\n\t\t\tgithub: \"git@github.com:vanadium\/chat.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"media-sharing\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.projects.media-sharing\",\n\t\t\tgithub: \"git@github.com:vanadium\/media-sharing.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"pipe2browser\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.projects.pipe2browser\",\n\t\t\tgithub: \"git@github.com:vanadium\/pipe2browser.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"playground\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.projects.playground\",\n\t\t\tgithub: \"git@github.com:vanadium\/playground.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"physical-lock\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.projects.physical-lock\",\n\t\t\tgithub: \"git@github.com:vanadium\/physical-lock.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"reader\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/release.projects.reader\",\n\t\t\tgithub: \"git@github.com:vanadium\/reader.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"third_party\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/third_party\",\n\t\t\tgithub: \"git@github.com:vanadium\/third_party.git\",\n\t\t},\n\t\tMirror{\n\t\t\tname: \"www\",\n\t\t\tgooglesource: \"https:\/\/vanadium.googlesource.com\/www\",\n\t\t\tgithub: \"git@github.com:vanadium\/www.git\",\n\t\t},\n\t}\n)\n\ntype Mirror struct {\n\tname, googlesource, github string\n}\n\n\/\/ vanadiumGitHubMirror mirrors googlesource.com vanadium projects to\n\/\/ github.com.\nfunc vanadiumGitHubMirror(ctx *tool.Context, testName string, _ ...Opt) (_ *test.Result, e error) {\n\t\/\/ Initialize the test\/task.\n\tcleanup, err := initTest(ctx, testName, nil)\n\tif err != nil {\n\t\treturn nil, internalTestError{err, \"Init\"}\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\troot, err := util.V23Root()\n\tif err != nil {\n\t\treturn nil, internalTestError{err, \"V23Root\"}\n\t}\n\n\tprojects := filepath.Join(root, \"projects\")\n\tmode := os.FileMode(0755)\n\tif err := ctx.Run().MkdirAll(projects, mode); err != nil {\n\t\treturn nil, internalTestError{err, \"MkdirAll\"}\n\t}\n\n\tallPassed := true\n\tsuites := []xunit.TestSuite{}\n\tfor _, mirror := range mirrors {\n\t\tsuite, err := sync(ctx, mirror, projects)\n\t\tif err != nil {\n\t\t\treturn nil, internalTestError{err, \"sync\"}\n\t\t}\n\n\t\tallPassed = allPassed && (suite.Failures == 0)\n\t\tsuites = append(suites, *suite)\n\t}\n\n\tif err := xunit.CreateReport(ctx, testName, suites); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !allPassed {\n\t\treturn &test.Result{Status: test.Failed}, nil\n\t}\n\n\treturn &test.Result{Status: test.Passed}, nil\n}\n\nfunc sync(ctx *tool.Context, mirror Mirror, projects string) (*xunit.TestSuite, error) {\n\tsuite := xunit.TestSuite{Name: mirror.name}\n\tdirname := filepath.Join(projects, mirror.name)\n\n\t\/\/ If dirname does not exist `git clone` otherwise `git pull`.\n\tif _, err := os.Stat(dirname); err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn nil, internalTestError{err, \"Stat\"}\n\t\t}\n\n\t\terr := clone(ctx, mirror, projects)\n\t\ttestCase := makeTestCase(\"clone\", err)\n\t\tif err != nil {\n\t\t\tsuite.Failures++\n\t\t}\n\t\tsuite.Cases = append(suite.Cases, *testCase)\n\t} else {\n\t\terr := pull(ctx, mirror, projects)\n\t\ttestCase := makeTestCase(\"pull\", err)\n\t\tif err != nil {\n\t\t\tsuite.Failures++\n\t\t}\n\t\tsuite.Cases = append(suite.Cases, *testCase)\n\t}\n\n\terr := push(ctx, mirror, projects)\n\ttestCase := makeTestCase(\"push\", err)\n\tif err != nil {\n\t\tsuite.Failures++\n\t}\n\tsuite.Cases = append(suite.Cases, *testCase)\n\n\treturn &suite, nil\n}\n\nfunc makeTestCase(action string, err error) *xunit.TestCase {\n\tc := xunit.TestCase{\n\t\tClassname: \"git\",\n\t\tName: action,\n\t}\n\n\tif err != nil {\n\t\tf := xunit.Failure{\n\t\t\tMessage: \"git error\",\n\t\t\tData: fmt.Sprintf(\"%v\", err),\n\t\t}\n\t\tc.Failures = append(c.Failures, f)\n\t}\n\n\treturn &c\n}\n\nfunc clone(ctx *tool.Context, mirror Mirror, projects string) error {\n\tdirname := filepath.Join(projects, mirror.name)\n\treturn ctx.Git().Clone(mirror.googlesource, dirname)\n}\n\nfunc pull(ctx *tool.Context, mirror Mirror, projects string) error {\n\tdirname := filepath.Join(projects, mirror.name)\n\topts := tool.RootDirOpt(dirname)\n\treturn ctx.Git(opts).Pull(\"origin\", \"master\")\n}\n\nfunc push(ctx *tool.Context, mirror Mirror, projects string) error {\n\tdirname := filepath.Join(projects, mirror.name)\n\topts := tool.RootDirOpt(dirname)\n\treturn ctx.Git(opts).Push(mirror.github, \"master\")\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2021 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage apimachinery\n\nimport (\n\t\"context\"\n\t\"strings\"\n\n\t\"github.com\/onsi\/ginkgo\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\n\tv1 \"k8s.io\/apiextensions-apiserver\/pkg\/apis\/apiextensions\/v1\"\n\t\"k8s.io\/apiextensions-apiserver\/pkg\/client\/clientset\/clientset\"\n\t\"k8s.io\/apiextensions-apiserver\/test\/integration\/fixtures\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/json\"\n\t\"k8s.io\/apiserver\/pkg\/storage\/names\"\n\t\"k8s.io\/client-go\/dynamic\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n)\n\nvar _ = SIGDescribe(\"CustomResourceValidationRules [Privileged:ClusterAdmin][Alpha][Feature:CustomResourceValidationExpressions]\", func() {\n\tf := framework.NewDefaultFramework(\"crd-validation-expressions\")\n\n\tvar apiExtensionClient *clientset.Clientset\n\tginkgo.BeforeEach(func() {\n\t\tvar err error\n\t\tapiExtensionClient, err = clientset.NewForConfig(f.ClientConfig())\n\t\tframework.ExpectNoError(err, \"initializing apiExtensionClient\")\n\t})\n\n\tcustomResourceClient := func(crd *v1.CustomResourceDefinition) (dynamic.NamespaceableResourceInterface, schema.GroupVersionResource) {\n\t\tgvrs := fixtures.GetGroupVersionResourcesOfCustomResource(crd)\n\t\tif len(gvrs) != 1 {\n\t\t\tginkgo.Fail(\"Expected one version in custom resource definition\")\n\t\t}\n\t\tgvr := gvrs[0]\n\t\treturn f.DynamicClient.Resource(gvr), gvr\n\t}\n\tunmarshallSchema := func(schemaJson []byte) *v1.JSONSchemaProps {\n\t\tvar c v1.JSONSchemaProps\n\t\terr := json.Unmarshal(schemaJson, &c)\n\t\tframework.ExpectNoError(err, \"unmarshalling OpenAPIv3 schema\")\n\t\treturn &c\n\t}\n\n\tvar schemaWithValidationExpression = unmarshallSchema([]byte(`{\n\t \"type\":\"object\",\n\t \"properties\":{\n\t\t \"spec\":{\n\t\t\t \"type\":\"object\",\n\t\t\t \"x-kubernetes-validations\":[\n\t\t { \"rule\":\"self.x + self.y > 0\" }\n\t ],\n\t\t\t \"properties\":{\n\t\t\t\t\"x\":{ \"type\":\"integer\" },\n\t\t\t\t\"y\":{ \"type\":\"integer\" }\n\t\t\t }\n\t\t },\n\t\t \"status\":{\n\t\t\t \"type\":\"object\",\n\t\t\t \"x-kubernetes-validations\":[\n\t\t\t\t{ \"rule\":\"self.health == 'ok' || self.health == 'unhealthy'\" }\n\t\t\t ],\n\t\t\t \"properties\":{\n\t\t\t\t\"health\":{ \"type\":\"string\" }\n\t\t\t }\n\t\t }\n\t }\n\t}`))\n\tginkgo.It(\"MUST NOT fail validation for create of a custom resource that satisfies the x-kubernetes-validator rules\", func() {\n\t\tginkgo.By(\"Creating a custom resource definition with validation rules\")\n\t\tcrd := fixtures.NewRandomNameV1CustomResourceDefinitionWithSchema(v1.NamespaceScoped, schemaWithValidationExpression, false)\n\t\tcrd, err := fixtures.CreateNewV1CustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)\n\t\tframework.ExpectNoError(err, \"creating CustomResourceDefinition\")\n\t\tdefer func() {\n\t\t\terr = fixtures.DeleteV1CustomResourceDefinition(crd, apiExtensionClient)\n\t\t\tframework.ExpectNoError(err, \"deleting CustomResourceDefinition\")\n\t\t}()\n\n\t\tginkgo.By(\"Creating a custom resource with values that are allowed by the validation rules set on the custom resource definition\")\n\t\tcrClient, gvr := customResourceClient(crd)\n\t\tname1 := names.SimpleNameGenerator.GenerateName(\"cr-1\")\n\t\t_, err = crClient.Namespace(f.Namespace.Name).Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{\n\t\t\t\"apiVersion\": gvr.Group + \"\/\" + gvr.Version,\n\t\t\t\"kind\": crd.Spec.Names.Kind,\n\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\"name\": name1,\n\t\t\t\t\"namespace\": f.Namespace.Name,\n\t\t\t},\n\t\t\t\"spec\": map[string]interface{}{\n\t\t\t\t\"x\": int64(1),\n\t\t\t\t\"y\": int64(0),\n\t\t\t},\n\t\t}}, metav1.CreateOptions{})\n\t\tframework.ExpectNoError(err, \"validation rules satisfied\")\n\t})\n\tginkgo.It(\"MUST fail validation for create of a custom resource that does not satisfy the x-kubernetes-validator rules\", func() {\n\t\tginkgo.By(\"Creating a custom resource definition with validation rules\")\n\t\tcrd := fixtures.NewRandomNameV1CustomResourceDefinitionWithSchema(v1.NamespaceScoped, schemaWithValidationExpression, false)\n\t\tcrd, err := fixtures.CreateNewV1CustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)\n\t\tframework.ExpectNoError(err, \"creating CustomResourceDefinition\")\n\t\tdefer func() {\n\t\t\terr = fixtures.DeleteV1CustomResourceDefinition(crd, apiExtensionClient)\n\t\t\tframework.ExpectNoError(err, \"deleting CustomResourceDefinition\")\n\t\t}()\n\n\t\tginkgo.By(\"Creating a custom resource with values that fail the validation rules set on the custom resource definition\")\n\t\tcrClient, gvr := customResourceClient(crd)\n\t\tname1 := names.SimpleNameGenerator.GenerateName(\"cr-1\")\n\t\t_, err = crClient.Namespace(f.Namespace.Name).Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{\n\t\t\t\"apiVersion\": gvr.Group + \"\/\" + gvr.Version,\n\t\t\t\"kind\": crd.Spec.Names.Kind,\n\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\"name\": name1,\n\t\t\t\t\"namespace\": f.Namespace.Name,\n\t\t\t},\n\t\t\t\"spec\": map[string]interface{}{\n\t\t\t\t\"x\": int64(0),\n\t\t\t\t\"y\": int64(0),\n\t\t\t},\n\t\t}}, metav1.CreateOptions{})\n\t\tframework.ExpectError(err, \"validation rules not satisfied\")\n\t\texpectedErrMsg := \"failed rule\"\n\t\tif !strings.Contains(err.Error(), expectedErrMsg) {\n\t\t\tframework.Failf(\"expect error contains %q, got %q\", expectedErrMsg, err.Error())\n\t\t}\n\t})\n\n\tginkgo.It(\"MUST fail create of a custom resource definition that contains a x-kubernetes-validator rule that refers to a property that do not exist\", func() {\n\t\tginkgo.By(\"Defining a custom resource definition with a validation rule that refers to a property that do not exist\")\n\t\tvar schemaWithInvalidValidationRule = unmarshallSchema([]byte(`{\n\t\t \"type\":\"object\",\n\t\t \"properties\":{\n\t\t\t \"spec\":{\n\t\t\t\t \"type\":\"object\",\n\t\t\t\t \"x-kubernetes-validations\":[\n\t\t\t\t { \"rule\":\"self.z == 100\" }\n\t\t\t\t ],\n\t\t\t\t \"properties\":{\n\t\t\t\t\t\"x\":{ \"type\":\"integer\" }\n\t\t\t\t }\n\t\t\t }\n\t\t }\n\t\t}`))\n\t\tcrd := fixtures.NewRandomNameV1CustomResourceDefinitionWithSchema(v1.NamespaceScoped, schemaWithInvalidValidationRule, false)\n\t\t_, err := fixtures.CreateNewV1CustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)\n\t\tframework.ExpectError(err, \"creating CustomResourceDefinition with a validation rule that refers to a property that do not exist\")\n\t\texpectedErrMsg := \"undefined field 'z'\"\n\t\tif !strings.Contains(err.Error(), expectedErrMsg) {\n\t\t\tframework.Failf(\"expect error contains %q, got %q\", expectedErrMsg, err.Error())\n\t\t}\n\t})\n})\nUPSTREAM: : Fix e2e test to solve ginkgo panic with make update\/*\nCopyright 2021 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage apimachinery\n\nimport (\n\t\"context\"\n\t\"strings\"\n\n\t\"github.com\/onsi\/ginkgo\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\n\tv1 \"k8s.io\/apiextensions-apiserver\/pkg\/apis\/apiextensions\/v1\"\n\t\"k8s.io\/apiextensions-apiserver\/pkg\/client\/clientset\/clientset\"\n\t\"k8s.io\/apiextensions-apiserver\/test\/integration\/fixtures\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/json\"\n\t\"k8s.io\/apiserver\/pkg\/storage\/names\"\n\t\"k8s.io\/client-go\/dynamic\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n)\n\nvar _ = SIGDescribe(\"CustomResourceValidationRules [Privileged:ClusterAdmin][Alpha][Feature:CustomResourceValidationExpressions]\", func() {\n\tf := framework.NewDefaultFramework(\"crd-validation-expressions\")\n\n\tvar apiExtensionClient *clientset.Clientset\n\tginkgo.BeforeEach(func() {\n\t\tvar err error\n\t\tapiExtensionClient, err = clientset.NewForConfig(f.ClientConfig())\n\t\tframework.ExpectNoError(err, \"initializing apiExtensionClient\")\n\t})\n\n\tcustomResourceClient := func(crd *v1.CustomResourceDefinition) (dynamic.NamespaceableResourceInterface, schema.GroupVersionResource) {\n\t\tgvrs := fixtures.GetGroupVersionResourcesOfCustomResource(crd)\n\t\tif len(gvrs) != 1 {\n\t\t\tginkgo.Fail(\"Expected one version in custom resource definition\")\n\t\t}\n\t\tgvr := gvrs[0]\n\t\treturn f.DynamicClient.Resource(gvr), gvr\n\t}\n\tunmarshallSchema := func(schemaJson []byte) *v1.JSONSchemaProps {\n\t\tvar c v1.JSONSchemaProps\n\t\terr := json.Unmarshal(schemaJson, &c)\n\t\tframework.ExpectNoError(err, \"unmarshalling OpenAPIv3 schema\")\n\t\treturn &c\n\t}\n\n\tginkgo.It(\"MUST NOT fail validation for create of a custom resource that satisfies the x-kubernetes-validator rules\", func() {\n\t\tginkgo.By(\"Creating a custom resource definition with validation rules\")\n\t\tvar schemaWithValidationExpression = unmarshallSchema([]byte(`{\n\t\t\t\"type\":\"object\",\n\t\t\t\"properties\":{\n\t\t\t \"spec\":{\n\t\t\t\t \"type\":\"object\",\n\t\t\t\t \"x-kubernetes-validations\":[\n\t\t\t\t\t{ \"rule\":\"self.x + self.y > 0\" }\n\t\t\t\t ],\n\t\t\t\t \"properties\":{\n\t\t\t\t\t \"x\":{ \"type\":\"integer\" },\n\t\t\t\t\t \"y\":{ \"type\":\"integer\" }\n\t\t\t\t }\n\t\t\t },\n\t\t\t \"status\":{\n\t\t\t\t \"type\":\"object\",\n\t\t\t\t \"x-kubernetes-validations\":[\n\t\t\t\t\t { \"rule\":\"self.health == 'ok' || self.health == 'unhealthy'\" }\n\t\t\t\t ],\n\t\t\t\t \"properties\":{\n\t\t\t\t\t \"health\":{ \"type\":\"string\" }\n\t\t\t\t }\n\t\t\t }\n\t\t\t}\n\t\t }`))\n\t\tcrd := fixtures.NewRandomNameV1CustomResourceDefinitionWithSchema(v1.NamespaceScoped, schemaWithValidationExpression, false)\n\t\tcrd, err := fixtures.CreateNewV1CustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)\n\t\tframework.ExpectNoError(err, \"creating CustomResourceDefinition\")\n\t\tdefer func() {\n\t\t\terr = fixtures.DeleteV1CustomResourceDefinition(crd, apiExtensionClient)\n\t\t\tframework.ExpectNoError(err, \"deleting CustomResourceDefinition\")\n\t\t}()\n\n\t\tginkgo.By(\"Creating a custom resource with values that are allowed by the validation rules set on the custom resource definition\")\n\t\tcrClient, gvr := customResourceClient(crd)\n\t\tname1 := names.SimpleNameGenerator.GenerateName(\"cr-1\")\n\t\t_, err = crClient.Namespace(f.Namespace.Name).Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{\n\t\t\t\"apiVersion\": gvr.Group + \"\/\" + gvr.Version,\n\t\t\t\"kind\": crd.Spec.Names.Kind,\n\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\"name\": name1,\n\t\t\t\t\"namespace\": f.Namespace.Name,\n\t\t\t},\n\t\t\t\"spec\": map[string]interface{}{\n\t\t\t\t\"x\": int64(1),\n\t\t\t\t\"y\": int64(0),\n\t\t\t},\n\t\t}}, metav1.CreateOptions{})\n\t\tframework.ExpectNoError(err, \"validation rules satisfied\")\n\t})\n\tginkgo.It(\"MUST fail validation for create of a custom resource that does not satisfy the x-kubernetes-validator rules\", func() {\n\t\tginkgo.By(\"Creating a custom resource definition with validation rules\")\n\t\tvar schemaWithValidationExpression = unmarshallSchema([]byte(`{\n\t\t\t\"type\":\"object\",\n\t\t\t\"properties\":{\n\t\t\t \"spec\":{\n\t\t\t\t \"type\":\"object\",\n\t\t\t\t \"x-kubernetes-validations\":[\n\t\t\t\t\t{ \"rule\":\"self.x + self.y > 0\" }\n\t\t\t\t ],\n\t\t\t\t \"properties\":{\n\t\t\t\t\t \"x\":{ \"type\":\"integer\" },\n\t\t\t\t\t \"y\":{ \"type\":\"integer\" }\n\t\t\t\t }\n\t\t\t },\n\t\t\t \"status\":{\n\t\t\t\t \"type\":\"object\",\n\t\t\t\t \"x-kubernetes-validations\":[\n\t\t\t\t\t { \"rule\":\"self.health == 'ok' || self.health == 'unhealthy'\" }\n\t\t\t\t ],\n\t\t\t\t \"properties\":{\n\t\t\t\t\t \"health\":{ \"type\":\"string\" }\n\t\t\t\t }\n\t\t\t }\n\t\t\t}\n\t\t }`))\n\t\tcrd := fixtures.NewRandomNameV1CustomResourceDefinitionWithSchema(v1.NamespaceScoped, schemaWithValidationExpression, false)\n\t\tcrd, err := fixtures.CreateNewV1CustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)\n\t\tframework.ExpectNoError(err, \"creating CustomResourceDefinition\")\n\t\tdefer func() {\n\t\t\terr = fixtures.DeleteV1CustomResourceDefinition(crd, apiExtensionClient)\n\t\t\tframework.ExpectNoError(err, \"deleting CustomResourceDefinition\")\n\t\t}()\n\n\t\tginkgo.By(\"Creating a custom resource with values that fail the validation rules set on the custom resource definition\")\n\t\tcrClient, gvr := customResourceClient(crd)\n\t\tname1 := names.SimpleNameGenerator.GenerateName(\"cr-1\")\n\t\t_, err = crClient.Namespace(f.Namespace.Name).Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{\n\t\t\t\"apiVersion\": gvr.Group + \"\/\" + gvr.Version,\n\t\t\t\"kind\": crd.Spec.Names.Kind,\n\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\"name\": name1,\n\t\t\t\t\"namespace\": f.Namespace.Name,\n\t\t\t},\n\t\t\t\"spec\": map[string]interface{}{\n\t\t\t\t\"x\": int64(0),\n\t\t\t\t\"y\": int64(0),\n\t\t\t},\n\t\t}}, metav1.CreateOptions{})\n\t\tframework.ExpectError(err, \"validation rules not satisfied\")\n\t\texpectedErrMsg := \"failed rule\"\n\t\tif !strings.Contains(err.Error(), expectedErrMsg) {\n\t\t\tframework.Failf(\"expect error contains %q, got %q\", expectedErrMsg, err.Error())\n\t\t}\n\t})\n\n\tginkgo.It(\"MUST fail create of a custom resource definition that contains a x-kubernetes-validator rule that refers to a property that do not exist\", func() {\n\t\tginkgo.By(\"Defining a custom resource definition with a validation rule that refers to a property that do not exist\")\n\t\tvar schemaWithInvalidValidationRule = unmarshallSchema([]byte(`{\n\t\t \"type\":\"object\",\n\t\t \"properties\":{\n\t\t\t \"spec\":{\n\t\t\t\t \"type\":\"object\",\n\t\t\t\t \"x-kubernetes-validations\":[\n\t\t\t\t { \"rule\":\"self.z == 100\" }\n\t\t\t\t ],\n\t\t\t\t \"properties\":{\n\t\t\t\t\t\"x\":{ \"type\":\"integer\" }\n\t\t\t\t }\n\t\t\t }\n\t\t }\n\t\t}`))\n\t\tcrd := fixtures.NewRandomNameV1CustomResourceDefinitionWithSchema(v1.NamespaceScoped, schemaWithInvalidValidationRule, false)\n\t\t_, err := fixtures.CreateNewV1CustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)\n\t\tframework.ExpectError(err, \"creating CustomResourceDefinition with a validation rule that refers to a property that do not exist\")\n\t\texpectedErrMsg := \"undefined field 'z'\"\n\t\tif !strings.Contains(err.Error(), expectedErrMsg) {\n\t\t\tframework.Failf(\"expect error contains %q, got %q\", expectedErrMsg, err.Error())\n\t\t}\n\t})\n})\n<|endoftext|>"} {"text":"package api\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"strconv\"\n\n\t\"..\/miniserver\"\n\t\"..\/utils\"\n)\n\nconst KA_TAG = \"kerneladiutor\"\n\ntype KernelAdiutorApi struct {\n\tclient *miniserver.Client\n\tpath string\n\tversion string\n\tdevicedata *DeviceData\n}\n\nfunc (kaAPi KernelAdiutorApi) GetResponse() *miniserver.Response {\n\tswitch kaAPi.version {\n\tcase \"v1\":\n\t\treturn kaAPi.kernelAdiutorApiv1()\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc NewKernelAdiutorApi(client *miniserver.Client,\n\tpath, version string,\n\tdData *DeviceData) KernelAdiutorApi {\n\treturn KernelAdiutorApi{\n\t\tclient: client,\n\t\tpath: path,\n\t\tversion: version,\n\t\tdevicedata: dData,\n\t}\n}\n\nfunc (kaAPi KernelAdiutorApi) kernelAdiutorApiv1() *miniserver.Response {\n\tvar response *miniserver.Response\n\n\tswitch kaAPi.path {\n\tcase \"device\/create\":\n\t\tif kaAPi.client.Method == http.MethodPost &&\n\t\t\tlen(kaAPi.client.Request) > 0 {\n\n\t\t\tvar data map[string]interface{}\n\t\t\tjson.Unmarshal(kaAPi.client.Request, &data)\n\n\t\t\tvar dInfo *DeviceInfo = NewDeviceInfo(data)\n\t\t\tif dInfo.valid() {\n\n\t\t\t\tvar updated bool = kaAPi.putDatabase(dInfo)\n\t\t\t\tif b, err := kaAPi.createStatus(true); err == nil {\n\t\t\t\t\tresponse = kaAPi.client.ResponseBody(string(b))\n\t\t\t\t}\n\n\t\t\t\tif updated {\n\t\t\t\t\tutils.LogI(KA_TAG, fmt.Sprintf(\"Updating device %s\", dInfo.Model))\n\t\t\t\t} else {\n\t\t\t\t\tutils.LogI(KA_TAG, fmt.Sprintf(\"Inserting device %s\", dInfo.Model))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase \"device\/get\":\n\t\tif kaAPi.client.Method == http.MethodGet {\n\n\t\t\t\/\/ Get all\n\t\t\tif page, pageok := kaAPi.client.Queries[\"page\"]; (pageok && len(kaAPi.client.Queries) == 1) ||\n\t\t\t\tlen(kaAPi.client.Queries) == 0 {\n\n\t\t\t\tvar pageNumber int = 1\n\t\t\t\tif pageok {\n\t\t\t\t\tif num, err := strconv.Atoi(page[0]); err == nil {\n\t\t\t\t\t\tpageNumber = num\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tresponses := make([]DeviceInfo, 0)\n\t\t\t\tfor i := (pageNumber - 1) * 10; i < pageNumber*10; i++ {\n\t\t\t\t\tif i < len(kaAPi.devicedata.sortedScores) {\n\t\t\t\t\t\tif value, ok := kaAPi.devicedata.infos[kaAPi.devicedata.sortedScores[i]]; ok {\n\t\t\t\t\t\t\tvar info DeviceInfo = *value\n\t\t\t\t\t\t\tinfo.AndroidID = \"\"\n\t\t\t\t\t\t\tresponses = append(responses, info)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(responses) > 0 {\n\t\t\t\t\tb, err := json.Marshal(responses)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tresponse = kaAPi.client.ResponseBody(string(b))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif response == nil {\n\t\tif b, err := kaAPi.createStatus(false); err == nil {\n\t\t\tresponse = kaAPi.client.ResponseBody(string(b))\n\t\t}\n\t}\n\tresponse.SetContentType(miniserver.ContentJson)\n\n\treturn response\n}\n\nfunc (kaApi KernelAdiutorApi) createStatus(success bool) ([]byte, error) {\n\tvar statusCode int = http.StatusOK\n\tif !success {\n\t\tstatusCode = http.StatusNotFound\n\t}\n\treturn json.Marshal(struct {\n\t\tSuccess bool `json:\"success\"`\n\t\tMethod string `json:\"method\"`\n\t\tRequest string `json:\"request\"`\n\t\tVersion string `json:\"version\"`\n\t\tStatus int64 `json:\"status\"`\n\t}{success, kaApi.client.Method, kaApi.path,\n\t\tkaApi.version, int64(statusCode)})\n}\n\ntype DeviceInfo struct {\n\tID string `json:\"id\"`\n\tAndroidID string `json:\"android_id,omitempty\"`\n\tAndroidVersion string `json:\"android_version\"`\n\tKernelVersion string `json:\"kernel_version\"`\n\tAppVersion string `json:\"app_version\"`\n\tBoard string `json:\"board\"`\n\tModel string `json:\"model\"`\n\tVendor string `json:\"vendor\"`\n\tCommands []string `json:\"commands\"`\n\tTimes []float64 `json:\"times\"`\n\tCpu float64 `json:\"cpu\"`\n\tDate string `json:\"date\"`\n\tScore float64 `json:score`\n}\n\nfunc NewDeviceInfo(data map[string]interface{}) *DeviceInfo {\n\tvar j utils.Json = utils.Json{data}\n\n\tvar dInfo *DeviceInfo = &DeviceInfo{\n\t\tID: j.GetString(\"id\"),\n\t\tAndroidID: j.GetString(\"android_id\"),\n\t\tAndroidVersion: j.GetString(\"android_version\"),\n\t\tKernelVersion: j.GetString(\"kernel_version\"),\n\t\tAppVersion: j.GetString(\"app_version\"),\n\t\tBoard: j.GetString(\"board\"),\n\t\tModel: j.GetString(\"model\"),\n\t\tVendor: j.GetString(\"vendor\"),\n\t\tCommands: j.GetStringArray(\"commands\"),\n\t\tTimes: j.GetFloatArray(\"times\"),\n\t\tCpu: j.GetFloat(\"cpu\"),\n\t\tDate: j.GetString(\"date\"),\n\t\tScore: j.GetFloat(\"score\"),\n\t}\n\n\tif dInfo.valid() {\n\t\tif utils.StringEmpty(dInfo.ID) {\n\t\t\tdInfo.ID = utils.Encode(dInfo.AndroidID)\n\t\t}\n\t\tif utils.StringEmpty(dInfo.Date) {\n\t\t\tdInfo.Date = time.Now().Format(time.RFC3339)\n\t\t}\n\t\tif dInfo.Score == 0 {\n\t\t\tdInfo.Score = utils.GetAverage(dInfo.Times)*1e9 - dInfo.Cpu\n\t\t}\n\t}\n\n\treturn dInfo\n}\n\nfunc (dInfo DeviceInfo) valid() bool {\n\treturn !utils.StringEmpty(dInfo.AndroidID) &&\n\t\t!utils.StringEmpty(dInfo.AndroidVersion) &&\n\t\t!utils.StringEmpty(dInfo.KernelVersion) &&\n\t\t!utils.StringEmpty(dInfo.AppVersion) &&\n\t\t!utils.StringEmpty(dInfo.Board) &&\n\t\t!utils.StringEmpty(dInfo.Model) && dInfo.Model != \"unknown\" &&\n\t\t!utils.StringEmpty(dInfo.Vendor) &&\n\t\tdInfo.Commands != nil && len(dInfo.Commands) >= 10 &&\n\t\tdInfo.Times != nil && len(dInfo.Times) >= 20 &&\n\t\tdInfo.Cpu != 0\n}\n\nfunc (dInfo DeviceInfo) Json() ([]byte, error) {\n\treturn json.Marshal(dInfo)\n}\n\nfunc (kaApi KernelAdiutorApi) putDatabase(dInfo *DeviceInfo) bool {\n\treturn kaApi.devicedata.Update(dInfo)\n}\nserver: kerneladiutor: Accept all length of commandspackage api\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"strconv\"\n\n\t\"..\/miniserver\"\n\t\"..\/utils\"\n)\n\nconst KA_TAG = \"kerneladiutor\"\n\ntype KernelAdiutorApi struct {\n\tclient *miniserver.Client\n\tpath string\n\tversion string\n\tdevicedata *DeviceData\n}\n\nfunc (kaAPi KernelAdiutorApi) GetResponse() *miniserver.Response {\n\tswitch kaAPi.version {\n\tcase \"v1\":\n\t\treturn kaAPi.kernelAdiutorApiv1()\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc NewKernelAdiutorApi(client *miniserver.Client,\n\tpath, version string,\n\tdData *DeviceData) KernelAdiutorApi {\n\treturn KernelAdiutorApi{\n\t\tclient: client,\n\t\tpath: path,\n\t\tversion: version,\n\t\tdevicedata: dData,\n\t}\n}\n\nfunc (kaAPi KernelAdiutorApi) kernelAdiutorApiv1() *miniserver.Response {\n\tvar response *miniserver.Response\n\n\tswitch kaAPi.path {\n\tcase \"device\/create\":\n\t\tif kaAPi.client.Method == http.MethodPost &&\n\t\t\tlen(kaAPi.client.Request) > 0 {\n\n\t\t\tvar data map[string]interface{}\n\t\t\tjson.Unmarshal(kaAPi.client.Request, &data)\n\n\t\t\tvar dInfo *DeviceInfo = NewDeviceInfo(data)\n\t\t\tif dInfo.valid() {\n\n\t\t\t\tvar updated bool = kaAPi.putDatabase(dInfo)\n\t\t\t\tif b, err := kaAPi.createStatus(true); err == nil {\n\t\t\t\t\tresponse = kaAPi.client.ResponseBody(string(b))\n\t\t\t\t}\n\n\t\t\t\tif updated {\n\t\t\t\t\tutils.LogI(KA_TAG, fmt.Sprintf(\"Updating device %s\", dInfo.Model))\n\t\t\t\t} else {\n\t\t\t\t\tutils.LogI(KA_TAG, fmt.Sprintf(\"Inserting device %s\", dInfo.Model))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase \"device\/get\":\n\t\tif kaAPi.client.Method == http.MethodGet {\n\n\t\t\t\/\/ Get all\n\t\t\tif page, pageok := kaAPi.client.Queries[\"page\"]; (pageok && len(kaAPi.client.Queries) == 1) ||\n\t\t\t\tlen(kaAPi.client.Queries) == 0 {\n\n\t\t\t\tvar pageNumber int = 1\n\t\t\t\tif pageok {\n\t\t\t\t\tif num, err := strconv.Atoi(page[0]); err == nil {\n\t\t\t\t\t\tpageNumber = num\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tresponses := make([]DeviceInfo, 0)\n\t\t\t\tfor i := (pageNumber - 1) * 10; i < pageNumber*10; i++ {\n\t\t\t\t\tif i < len(kaAPi.devicedata.sortedScores) {\n\t\t\t\t\t\tif value, ok := kaAPi.devicedata.infos[kaAPi.devicedata.sortedScores[i]]; ok {\n\t\t\t\t\t\t\tvar info DeviceInfo = *value\n\t\t\t\t\t\t\tinfo.AndroidID = \"\"\n\t\t\t\t\t\t\tresponses = append(responses, info)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(responses) > 0 {\n\t\t\t\t\tb, err := json.Marshal(responses)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tresponse = kaAPi.client.ResponseBody(string(b))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif response == nil {\n\t\tif b, err := kaAPi.createStatus(false); err == nil {\n\t\t\tresponse = kaAPi.client.ResponseBody(string(b))\n\t\t}\n\t}\n\tresponse.SetContentType(miniserver.ContentJson)\n\n\treturn response\n}\n\nfunc (kaApi KernelAdiutorApi) createStatus(success bool) ([]byte, error) {\n\tvar statusCode int = http.StatusOK\n\tif !success {\n\t\tstatusCode = http.StatusNotFound\n\t}\n\treturn json.Marshal(struct {\n\t\tSuccess bool `json:\"success\"`\n\t\tMethod string `json:\"method\"`\n\t\tRequest string `json:\"request\"`\n\t\tVersion string `json:\"version\"`\n\t\tStatus int64 `json:\"status\"`\n\t}{success, kaApi.client.Method, kaApi.path,\n\t\tkaApi.version, int64(statusCode)})\n}\n\ntype DeviceInfo struct {\n\tID string `json:\"id\"`\n\tAndroidID string `json:\"android_id,omitempty\"`\n\tAndroidVersion string `json:\"android_version\"`\n\tKernelVersion string `json:\"kernel_version\"`\n\tAppVersion string `json:\"app_version\"`\n\tBoard string `json:\"board\"`\n\tModel string `json:\"model\"`\n\tVendor string `json:\"vendor\"`\n\tCommands []string `json:\"commands\"`\n\tTimes []float64 `json:\"times\"`\n\tCpu float64 `json:\"cpu\"`\n\tDate string `json:\"date\"`\n\tScore float64 `json:score`\n}\n\nfunc NewDeviceInfo(data map[string]interface{}) *DeviceInfo {\n\tvar j utils.Json = utils.Json{data}\n\n\tvar dInfo *DeviceInfo = &DeviceInfo{\n\t\tID: j.GetString(\"id\"),\n\t\tAndroidID: j.GetString(\"android_id\"),\n\t\tAndroidVersion: j.GetString(\"android_version\"),\n\t\tKernelVersion: j.GetString(\"kernel_version\"),\n\t\tAppVersion: j.GetString(\"app_version\"),\n\t\tBoard: j.GetString(\"board\"),\n\t\tModel: j.GetString(\"model\"),\n\t\tVendor: j.GetString(\"vendor\"),\n\t\tCommands: j.GetStringArray(\"commands\"),\n\t\tTimes: j.GetFloatArray(\"times\"),\n\t\tCpu: j.GetFloat(\"cpu\"),\n\t\tDate: j.GetString(\"date\"),\n\t\tScore: j.GetFloat(\"score\"),\n\t}\n\n\tif dInfo.valid() {\n\t\tif utils.StringEmpty(dInfo.ID) {\n\t\t\tdInfo.ID = utils.Encode(dInfo.AndroidID)\n\t\t}\n\t\tif utils.StringEmpty(dInfo.Date) {\n\t\t\tdInfo.Date = time.Now().Format(time.RFC3339)\n\t\t}\n\t\tif dInfo.Score == 0 {\n\t\t\tdInfo.Score = utils.GetAverage(dInfo.Times)*1e9 - dInfo.Cpu\n\t\t}\n\t}\n\n\treturn dInfo\n}\n\nfunc (dInfo DeviceInfo) valid() bool {\n\treturn !utils.StringEmpty(dInfo.AndroidID) &&\n\t\t!utils.StringEmpty(dInfo.AndroidVersion) &&\n\t\t!utils.StringEmpty(dInfo.KernelVersion) &&\n\t\t!utils.StringEmpty(dInfo.AppVersion) &&\n\t\t!utils.StringEmpty(dInfo.Board) &&\n\t\t!utils.StringEmpty(dInfo.Model) && dInfo.Model != \"unknown\" &&\n\t\t!utils.StringEmpty(dInfo.Vendor) &&\n\t\tdInfo.Commands != nil &&\n\t\tdInfo.Times != nil && len(dInfo.Times) >= 20 &&\n\t\tdInfo.Cpu != 0\n}\n\nfunc (dInfo DeviceInfo) Json() ([]byte, error) {\n\treturn json.Marshal(dInfo)\n}\n\nfunc (kaApi KernelAdiutorApi) putDatabase(dInfo *DeviceInfo) bool {\n\treturn kaApi.devicedata.Update(dInfo)\n}\n<|endoftext|>"} {"text":"package transforms\n\nimport (\n\t. \"connectordb\/streamdb\/datastream\"\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com\/connectordb\/duck\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestPipelineGenerator(t *testing.T) {\n\ttestcases := []struct {\n\t\tPipeline string\n\t\tHasSyntaxError bool\n\t\tHaserror2 bool\n\t\tInput *Datapoint\n\t\tOutput *Datapoint\n\t}{\n\t\t\/\/ Identity functions\n\t\t{\"true\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"false\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"45.555\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 45.555}},\n\t\t{\"\\\"string\\\"\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: \"string\"}},\n\t\t{\"\\\"❤ ☀ ☆ ☂ ☻ ♞ ☯ ☭ ☢ €\\\"\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: \"❤ ☀ ☆ ☂ ☻ ♞ ☯ ☭ ☢ €\"}},\n\n\t\t\/\/ Literal identity\n\t\t{\"$\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 4}},\n\n\t\t\/\/ Basic Testing\n\t\t{\"4 < 5\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"$ < 5\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\n\t\t\/\/ Logical tests\n\t\t{\"true or false\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"false or false\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"true and false\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"true and (false or true)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"true and true\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"true and not false\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\n\t\t\/\/ Logical filter tests\n\t\t{\"if true\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 4}},\n\t\t{\"if true | 42\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 42}},\n\t\t{\"if false\", false, false, &Datapoint{Data: 4}, nil},\n\t\t{\"if $ < 5\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 4}},\n\n\t\t\/\/ Comparison\n\t\t{\"$ > 4\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"$ > 3\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"$ >= 4\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"$ < 4\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"$ < 5\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"$ <= 4\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"$ != 4\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"$ != 5\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"$ == 4\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"$ == 5\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\n\t\t\/\/ Logical pipelines\n\t\t{\"if $ < 5 and $ > 1\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 4}},\n\t\t{\"if $ < 5 | if $ > 1\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 4}},\n\t\t{\"if $ < 5 | if $ > 33\", false, false, &Datapoint{Data: 4}, nil},\n\t\t{\"if $ < 5 | $ > 33\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"has(\\\"test\\\") | $ < 1\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"if has(\\\"test\\\")| $ < 1\", false, false, &Datapoint{Data: 4}, nil},\n\t\t{\"if has(\\\"test\\\")| $[\\\"test\\\"] < 1\", false, false, &Datapoint{Data: map[string]interface{}{\"test\": 25}}, &Datapoint{Data: false}},\n\t\t{\"if has(\\\"tst\\\")| $[\\\"test\\\"] < 1\", false, false, &Datapoint{Data: map[string]interface{}{\"test\": 25}}, nil},\n\t\t{\"if has(\\\"test\\\")| $[\\\"test\\\"] > 1\", false, false, &Datapoint{Data: map[string]interface{}{\"test\": 25}}, &Datapoint{Data: true}},\n\n\t\t\/\/ Invalid\n\t\t{\"if has(\\\"test\\\"\", true, false, nil, nil},\n\t\t{\"$[\\\"test\\\"]\", false, true, &Datapoint{Data: 4}, nil},\n\n\t\t\/\/ Multiple stage pipeline\n\t\t{\"$ | false | 42\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 42}},\n\n\t\t\/\/ implicit logicals\n\t\t{\"gt(4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"gt(3)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"gte(4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"lt(4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"lt(5)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"lte(4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"ne(4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"ne(5)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"eq(4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"eq(5)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\n\t\t\/\/ Test custom functions\n\t\t{\"identity()\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 4}},\n\t\t{\"passthrough($ > 5)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"passthrough($ > 5 | eq(false))\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"fortyTwo()\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 42}},\n\t\t{\"doesnotexist()\", true, false, &Datapoint{Data: 4}, nil},\n\n\t\t\/\/ wrong number of args on generation\n\t\t{\"passthrough($ > 5 | eq(false), $)\", true, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\n\t\t\/\/ setting values\n\t\t{\"set($, 4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 4}},\n\t\t{\"set($, true)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"set($, \\\"foo\\\")\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: \"foo\"}},\n\t\t{\"set($[\\\"bar\\\"], \\\"foo\\\")\", false, true, &Datapoint{Data: 4}, &Datapoint{Data: \"foo\"}},\n\n\t\t\/\/ maths\n\t\t{\"1 + 1\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 2}},\n\t\t{\"$ + 1\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 5}},\n\t\t{\"$ + \\\"4\\\"\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 8}},\n\t\t{\"$ * 2\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 8}},\n\t\t{\"$ \/ 2\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 2}},\n\t\t{\"1 + 2 * 3 + 4\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 11}},\n\t\t{\"1 + 2 * (3 + 4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 15}},\n\t\t{\"-1 + 2\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 1}},\n\t\t{\"-(1 + 2)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: -3}},\n\t}\n\n\t\/\/ function that should nilt out\n\tidentityFunc := func(name string, children ...TransformFunc) (TransformFunc, error) {\n\t\treturn func(dp *Datapoint) (tdp *Datapoint, err error) {\n\t\t\treturn dp, nil\n\t\t}, nil\n\t}\n\tRegisterCustomFunction(\"identity\", identityFunc)\n\n\t\/\/ passthrough\n\tpassthroughFunc := func(name string, children ...TransformFunc) (TransformFunc, error) {\n\t\tif len(children) != 1 {\n\t\t\treturn pipelineGeneratorIdentity(), errors.New(\"passthrough error\")\n\t\t}\n\t\treturn func(dp *Datapoint) (tdp *Datapoint, err error) {\n\t\t\treturn children[0](dp)\n\t\t}, nil\n\t}\n\tRegisterCustomFunction(\"passthrough\", passthroughFunc)\n\n\tfortyTwo := func(name string, children ...TransformFunc) (TransformFunc, error) {\n\t\treturn func(dp *Datapoint) (tdp *Datapoint, err error) {\n\t\t\tdp.Data = 42\n\t\t\treturn dp, nil\n\t\t}, nil\n\t}\n\tRegisterCustomFunction(\"fortyTwo\", fortyTwo)\n\n\tfor _, c := range testcases {\n\n\t\tresult, err := ParseTransform(c.Pipeline)\n\n\t\tif c.HasSyntaxError {\n\t\t\trequire.Error(t, err)\n\t\t\tcontinue\n\t\t}\n\n\t\trequire.NoError(t, err, duck.JSONString(c))\n\n\t\tdp, err := result(c.Input)\n\t\tif c.Haserror2 {\n\t\t\trequire.Error(t, err, duck.JSONString(c))\n\t\t} else {\n\t\t\trequire.NoError(t, err, duck.JSONString(c))\n\t\t\tif c.Output != nil {\n\t\t\t\trequire.Equal(t, c.Output.String(), dp.String(), duck.JSONString(c))\n\t\t\t} else {\n\t\t\t\trequire.Nil(t, dp, duck.JSONString(c))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestParseTransform(t *testing.T) {\n\t\/\/ Valid pipeline\n\t{\n\t\ttransform, err := ParseTransform(\"42\")\n\t\trequire.Nil(t, err)\n\t\trequire.NotNil(t, transform)\n\t}\n\n\t\/\/ invalid pipeline\n\t{\n\t\ttransform, err := ParseTransform(\"(\")\n\t\trequire.NotNil(t, err)\n\t\trequire.Nil(t, transform)\n\t}\n}\nadded tests for escape sequencespackage transforms\n\nimport (\n\t. \"connectordb\/streamdb\/datastream\"\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com\/connectordb\/duck\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestPipelineGenerator(t *testing.T) {\n\ttestcases := []struct {\n\t\tPipeline string\n\t\tHasSyntaxError bool\n\t\tHaserror2 bool\n\t\tInput *Datapoint\n\t\tOutput *Datapoint\n\t}{\n\t\t\/\/ Identity functions\n\t\t{\"true\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"false\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"45.555\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 45.555}},\n\n\t\t\/\/ String testing -- escaping, unicode and pipes\n\t\t{\"\\\"string\\\"\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: \"string\"}},\n\t\t{\"'string'\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: \"string\"}},\n\t\t{\"'string\\\\n'\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: \"string\\n\"}},\n\t\t{\"'string\\\\t'\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: \"string\\t\"}},\n\t\t{\"'string\\\\\\\\'\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: \"string\\\\\"}},\n\t\t{\"'string\\\\r'\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: \"string\\r\"}},\n\t\t{\"'string\\\"'\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: \"string\\\"\"}},\n\t\t{\"'|'\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: \"|\"}},\n\t\t{\"\\\"❤ ☀ ☆ ☂ ☻ ♞ ☯ ☭ ☢ €\\\"\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: \"❤ ☀ ☆ ☂ ☻ ♞ ☯ ☭ ☢ €\"}},\n\n\t\t\/\/ Literal identity\n\t\t{\"$\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 4}},\n\n\t\t\/\/ Basic Testing\n\t\t{\"4 < 5\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"$ < 5\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\n\t\t\/\/ Logical tests\n\t\t{\"true or false\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"false or false\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"true and false\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"true and (false or true)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"true and true\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"true and not false\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\n\t\t\/\/ Logical filter tests\n\t\t{\"if true\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 4}},\n\t\t{\"if true | 42\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 42}},\n\t\t{\"if false\", false, false, &Datapoint{Data: 4}, nil},\n\t\t{\"if $ < 5\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 4}},\n\n\t\t\/\/ Comparison\n\t\t{\"$ > 4\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"$ > 3\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"$ >= 4\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"$ < 4\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"$ < 5\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"$ <= 4\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"$ != 4\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"$ != 5\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"$ == 4\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"$ == 5\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\n\t\t\/\/ Logical pipelines\n\t\t{\"if $ < 5 and $ > 1\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 4}},\n\t\t{\"if $ < 5 | if $ > 1\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 4}},\n\t\t{\"if $ < 5 | if $ > 33\", false, false, &Datapoint{Data: 4}, nil},\n\t\t{\"if $ < 5 | $ > 33\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"has(\\\"test\\\") | $ < 1\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"if has(\\\"test\\\")| $ < 1\", false, false, &Datapoint{Data: 4}, nil},\n\t\t{\"if has(\\\"test\\\")| $[\\\"test\\\"] < 1\", false, false, &Datapoint{Data: map[string]interface{}{\"test\": 25}}, &Datapoint{Data: false}},\n\t\t{\"if has(\\\"tst\\\")| $[\\\"test\\\"] < 1\", false, false, &Datapoint{Data: map[string]interface{}{\"test\": 25}}, nil},\n\t\t{\"if has(\\\"test\\\")| $[\\\"test\\\"] > 1\", false, false, &Datapoint{Data: map[string]interface{}{\"test\": 25}}, &Datapoint{Data: true}},\n\n\t\t\/\/ Invalid\n\t\t{\"if has(\\\"test\\\"\", true, false, nil, nil},\n\t\t{\"$[\\\"test\\\"]\", false, true, &Datapoint{Data: 4}, nil},\n\n\t\t\/\/ Multiple stage pipeline\n\t\t{\"$ | false | 42\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 42}},\n\n\t\t\/\/ implicit logicals\n\t\t{\"gt(4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"gt(3)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"gte(4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"lt(4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"lt(5)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"lte(4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"ne(4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"ne(5)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"eq(4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"eq(5)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\n\t\t\/\/ Test custom functions\n\t\t{\"identity()\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 4}},\n\t\t{\"passthrough($ > 5)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: false}},\n\t\t{\"passthrough($ > 5 | eq(false))\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"fortyTwo()\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 42}},\n\t\t{\"doesnotexist()\", true, false, &Datapoint{Data: 4}, nil},\n\n\t\t\/\/ wrong number of args on generation\n\t\t{\"passthrough($ > 5 | eq(false), $)\", true, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\n\t\t\/\/ setting values\n\t\t{\"set($, 4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 4}},\n\t\t{\"set($, true)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: true}},\n\t\t{\"set($, \\\"foo\\\")\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: \"foo\"}},\n\t\t{\"set($[\\\"bar\\\"], \\\"foo\\\")\", false, true, &Datapoint{Data: 4}, &Datapoint{Data: \"foo\"}},\n\n\t\t\/\/ maths\n\t\t{\"1 + 1\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 2}},\n\t\t{\"$ + 1\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 5}},\n\t\t{\"$ + \\\"4\\\"\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 8}},\n\t\t{\"$ * 2\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 8}},\n\t\t{\"$ \/ 2\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 2}},\n\t\t{\"1 + 2 * 3 + 4\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 11}},\n\t\t{\"1 + 2 * (3 + 4)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 15}},\n\t\t{\"-1 + 2\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: 1}},\n\t\t{\"-(1 + 2)\", false, false, &Datapoint{Data: 4}, &Datapoint{Data: -3}},\n\t}\n\n\t\/\/ function that should nilt out\n\tidentityFunc := func(name string, children ...TransformFunc) (TransformFunc, error) {\n\t\treturn func(dp *Datapoint) (tdp *Datapoint, err error) {\n\t\t\treturn dp, nil\n\t\t}, nil\n\t}\n\tRegisterCustomFunction(\"identity\", identityFunc)\n\n\t\/\/ passthrough\n\tpassthroughFunc := func(name string, children ...TransformFunc) (TransformFunc, error) {\n\t\tif len(children) != 1 {\n\t\t\treturn pipelineGeneratorIdentity(), errors.New(\"passthrough error\")\n\t\t}\n\t\treturn func(dp *Datapoint) (tdp *Datapoint, err error) {\n\t\t\treturn children[0](dp)\n\t\t}, nil\n\t}\n\tRegisterCustomFunction(\"passthrough\", passthroughFunc)\n\n\tfortyTwo := func(name string, children ...TransformFunc) (TransformFunc, error) {\n\t\treturn func(dp *Datapoint) (tdp *Datapoint, err error) {\n\t\t\tdp.Data = 42\n\t\t\treturn dp, nil\n\t\t}, nil\n\t}\n\tRegisterCustomFunction(\"fortyTwo\", fortyTwo)\n\n\tfor _, c := range testcases {\n\n\t\tresult, err := ParseTransform(c.Pipeline)\n\n\t\tif c.HasSyntaxError {\n\t\t\trequire.Error(t, err)\n\t\t\tcontinue\n\t\t}\n\n\t\trequire.NoError(t, err, duck.JSONString(c))\n\n\t\tdp, err := result(c.Input)\n\t\tif c.Haserror2 {\n\t\t\trequire.Error(t, err, duck.JSONString(c))\n\t\t} else {\n\t\t\trequire.NoError(t, err, duck.JSONString(c))\n\t\t\tif c.Output != nil {\n\t\t\t\trequire.Equal(t, c.Output.String(), dp.String(), duck.JSONString(c))\n\t\t\t} else {\n\t\t\t\trequire.Nil(t, dp, duck.JSONString(c))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestParseTransform(t *testing.T) {\n\t\/\/ Valid pipeline\n\t{\n\t\ttransform, err := ParseTransform(\"42\")\n\t\trequire.Nil(t, err)\n\t\trequire.NotNil(t, transform)\n\t}\n\n\t\/\/ invalid pipeline\n\t{\n\t\ttransform, err := ParseTransform(\"(\")\n\t\trequire.NotNil(t, err)\n\t\trequire.Nil(t, transform)\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2017 Vector Creations Ltd\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/matrix-org\/dendrite\/clientapi\/auth\/storage\/accounts\"\n\t\"github.com\/matrix-org\/dendrite\/clientapi\/auth\/storage\/devices\"\n\t\"github.com\/matrix-org\/dendrite\/clientapi\/producers\"\n\t\"github.com\/matrix-org\/dendrite\/common\"\n\t\"github.com\/matrix-org\/dendrite\/common\/config\"\n\t\"github.com\/matrix-org\/dendrite\/common\/keydb\"\n\t\"github.com\/matrix-org\/gomatrixserverlib\"\n\t\"github.com\/matrix-org\/naffka\"\n\n\tmediaapi_routing \"github.com\/matrix-org\/dendrite\/mediaapi\/routing\"\n\tmediaapi_storage \"github.com\/matrix-org\/dendrite\/mediaapi\/storage\"\n\n\troomserver_alias \"github.com\/matrix-org\/dendrite\/roomserver\/alias\"\n\troomserver_input \"github.com\/matrix-org\/dendrite\/roomserver\/input\"\n\troomserver_query \"github.com\/matrix-org\/dendrite\/roomserver\/query\"\n\troomserver_storage \"github.com\/matrix-org\/dendrite\/roomserver\/storage\"\n\n\tclientapi_consumers \"github.com\/matrix-org\/dendrite\/clientapi\/consumers\"\n\tclientapi_routing \"github.com\/matrix-org\/dendrite\/clientapi\/routing\"\n\n\tsyncapi_consumers \"github.com\/matrix-org\/dendrite\/syncapi\/consumers\"\n\tsyncapi_routing \"github.com\/matrix-org\/dendrite\/syncapi\/routing\"\n\tsyncapi_storage \"github.com\/matrix-org\/dendrite\/syncapi\/storage\"\n\tsyncapi_sync \"github.com\/matrix-org\/dendrite\/syncapi\/sync\"\n\tsyncapi_types \"github.com\/matrix-org\/dendrite\/syncapi\/types\"\n\n\tfederationapi_routing \"github.com\/matrix-org\/dendrite\/federationapi\/routing\"\n\n\tfederationsender_consumers \"github.com\/matrix-org\/dendrite\/federationsender\/consumers\"\n\t\"github.com\/matrix-org\/dendrite\/federationsender\/queue\"\n\tfederationsender_storage \"github.com\/matrix-org\/dendrite\/federationsender\/storage\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\tsarama \"gopkg.in\/Shopify\/sarama.v1\"\n)\n\nvar (\n\tlogDir = os.Getenv(\"LOG_DIR\")\n\tconfigPath = flag.String(\"config\", \"dendrite.yaml\", \"The path to the config file. For more information, see the config file in this repository.\")\n\thttpBindAddr = flag.String(\"http-bind-address\", \":8008\", \"The HTTP listening port for the server\")\n\thttpsBindAddr = flag.String(\"https-bind-address\", \":8448\", \"The HTTPS listening port for the server\")\n\tcertFile = flag.String(\"tls-cert\", \"\", \"The PEM formatted X509 certificate to use for TLS\")\n\tkeyFile = flag.String(\"tls-key\", \"\", \"The PEM private key to use for TLS\")\n)\n\nfunc main() {\n\tcommon.SetupLogging(logDir)\n\n\tflag.Parse()\n\n\tif *configPath == \"\" {\n\t\tlog.Fatal(\"--config must be supplied\")\n\t}\n\tcfg, err := config.LoadMonolithic(*configPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"Invalid config file: %s\", err)\n\t}\n\n\tm := newMonolith(cfg)\n\tm.setupDatabases()\n\tm.setupFederation()\n\tm.setupKafka()\n\tm.setupRoomServer()\n\tm.setupProducers()\n\tm.setupNotifiers()\n\tm.setupConsumers()\n\tm.setupAPIs()\n\n\t\/\/ Expose the matrix APIs directly rather than putting them under a \/api path.\n\tgo func() {\n\t\tlog.Info(\"Listening on \", *httpBindAddr)\n\t\tlog.Fatal(http.ListenAndServe(*httpBindAddr, m.api))\n\t}()\n\t\/\/ Handle HTTPS if certificate and key are provided\n\tgo func() {\n\t\tif *certFile != \"\" && *keyFile != \"\" {\n\t\t\tlog.Info(\"Listening on \", *httpsBindAddr)\n\t\t\tlog.Fatal(http.ListenAndServeTLS(*httpsBindAddr, *certFile, *keyFile, m.api))\n\t\t}\n\t}()\n\n\t\/\/ We want to block forever to let the HTTP and HTTPS handler serve the APIs\n\tselect {}\n}\n\n\/\/ A monolith contains all the dendrite components.\n\/\/ Some of the setup functions depend on previous setup functions, so they must\n\/\/ be called in the same order as they are defined in the file.\ntype monolith struct {\n\tcfg *config.Dendrite\n\tapi *mux.Router\n\n\troomServerDB *roomserver_storage.Database\n\taccountDB *accounts.Database\n\tdeviceDB *devices.Database\n\tkeyDB *keydb.Database\n\tmediaAPIDB *mediaapi_storage.Database\n\tsyncAPIDB *syncapi_storage.SyncServerDatabase\n\tfederationSenderDB *federationsender_storage.Database\n\n\tfederation *gomatrixserverlib.FederationClient\n\tkeyRing gomatrixserverlib.KeyRing\n\n\tinputAPI *roomserver_input.RoomserverInputAPI\n\tqueryAPI *roomserver_query.RoomserverQueryAPI\n\taliasAPI *roomserver_alias.RoomserverAliasAPI\n\n\tkafkaConsumer sarama.Consumer\n\tkafkaProducer sarama.SyncProducer\n\n\troomServerProducer *producers.RoomserverProducer\n\tuserUpdateProducer *producers.UserUpdateProducer\n\tsyncProducer *producers.SyncAPIProducer\n\n\tsyncAPINotifier *syncapi_sync.Notifier\n}\n\nfunc newMonolith(cfg *config.Dendrite) *monolith {\n\treturn &monolith{cfg: cfg, api: mux.NewRouter()}\n}\n\nfunc (m *monolith) setupDatabases() {\n\tvar err error\n\tm.roomServerDB, err = roomserver_storage.Open(string(m.cfg.Database.RoomServer))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tm.accountDB, err = accounts.NewDatabase(string(m.cfg.Database.Account), m.cfg.Matrix.ServerName)\n\tif err != nil {\n\t\tlog.Panicf(\"Failed to setup account database(%q): %s\", m.cfg.Database.Account, err.Error())\n\t}\n\tm.deviceDB, err = devices.NewDatabase(string(m.cfg.Database.Device), m.cfg.Matrix.ServerName)\n\tif err != nil {\n\t\tlog.Panicf(\"Failed to setup device database(%q): %s\", m.cfg.Database.Device, err.Error())\n\t}\n\tm.keyDB, err = keydb.NewDatabase(string(m.cfg.Database.ServerKey))\n\tif err != nil {\n\t\tlog.Panicf(\"Failed to setup key database(%q): %s\", m.cfg.Database.ServerKey, err.Error())\n\t}\n\tm.mediaAPIDB, err = mediaapi_storage.Open(string(m.cfg.Database.MediaAPI))\n\tif err != nil {\n\t\tlog.Panicf(\"Failed to setup sync api database(%q): %s\", m.cfg.Database.MediaAPI, err.Error())\n\t}\n\tm.syncAPIDB, err = syncapi_storage.NewSyncServerDatabase(string(m.cfg.Database.SyncAPI))\n\tif err != nil {\n\t\tlog.Panicf(\"Failed to setup sync api database(%q): %s\", m.cfg.Database.SyncAPI, err.Error())\n\t}\n\tm.federationSenderDB, err = federationsender_storage.NewDatabase(string(m.cfg.Database.FederationSender))\n\tif err != nil {\n\t\tlog.Panicf(\"startup: failed to create federation sender database with data source %s : %s\", m.cfg.Database.FederationSender, err)\n\t}\n}\n\nfunc (m *monolith) setupFederation() {\n\tm.federation = gomatrixserverlib.NewFederationClient(\n\t\tm.cfg.Matrix.ServerName, m.cfg.Matrix.KeyID, m.cfg.Matrix.PrivateKey,\n\t)\n\n\tm.keyRing = gomatrixserverlib.KeyRing{\n\t\tKeyFetchers: []gomatrixserverlib.KeyFetcher{\n\t\t\t\/\/ TODO: Use perspective key fetchers for production.\n\t\t\t&gomatrixserverlib.DirectKeyFetcher{Client: m.federation.Client},\n\t\t},\n\t\tKeyDatabase: m.keyDB,\n\t}\n}\n\nfunc (m *monolith) setupKafka() {\n\tvar err error\n\tif m.cfg.Kafka.UseNaffka {\n\t\tnaff, err := naffka.New(&naffka.MemoryDatabase{})\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\tlog.ErrorKey: err,\n\t\t\t}).Panic(\"Failed to setup naffka\")\n\t\t}\n\t\tm.kafkaConsumer = naff\n\t\tm.kafkaProducer = naff\n\t} else {\n\t\tm.kafkaConsumer, err = sarama.NewConsumer(m.cfg.Kafka.Addresses, nil)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\tlog.ErrorKey: err,\n\t\t\t\t\"addresses\": m.cfg.Kafka.Addresses,\n\t\t\t}).Panic(\"Failed to setup kafka consumers\")\n\t\t}\n\t\tm.kafkaProducer, err = sarama.NewSyncProducer(m.cfg.Kafka.Addresses, nil)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\tlog.ErrorKey: err,\n\t\t\t\t\"addresses\": m.cfg.Kafka.Addresses,\n\t\t\t}).Panic(\"Failed to setup kafka producers\")\n\t\t}\n\t}\n}\n\nfunc (m *monolith) setupRoomServer() {\n\tm.inputAPI = &roomserver_input.RoomserverInputAPI{\n\t\tDB: m.roomServerDB,\n\t\tProducer: m.kafkaProducer,\n\t\tOutputRoomEventTopic: string(m.cfg.Kafka.Topics.OutputRoomEvent),\n\t}\n\n\tm.queryAPI = &roomserver_query.RoomserverQueryAPI{\n\t\tDB: m.roomServerDB,\n\t}\n\n\tm.aliasAPI = &roomserver_alias.RoomserverAliasAPI{\n\t\tDB: m.roomServerDB,\n\t\tCfg: m.cfg,\n\t\tInputAPI: m.inputAPI,\n\t\tQueryAPI: m.queryAPI,\n\t}\n}\n\nfunc (m *monolith) setupProducers() {\n\tm.roomServerProducer = producers.NewRoomserverProducer(m.inputAPI)\n\tm.userUpdateProducer = &producers.UserUpdateProducer{\n\t\tProducer: m.kafkaProducer,\n\t\tTopic: string(m.cfg.Kafka.Topics.UserUpdates),\n\t}\n\tm.syncProducer = &producers.SyncAPIProducer{\n\t\tProducer: m.kafkaProducer,\n\t\tTopic: string(m.cfg.Kafka.Topics.OutputClientData),\n\t}\n}\n\nfunc (m *monolith) setupNotifiers() {\n\tpos, err := m.syncAPIDB.SyncStreamPosition()\n\tif err != nil {\n\t\tlog.Panicf(\"startup: failed to get latest sync stream position : %s\", err)\n\t}\n\n\tm.syncAPINotifier = syncapi_sync.NewNotifier(syncapi_types.StreamPosition(pos))\n\tif err = m.syncAPINotifier.Load(m.syncAPIDB); err != nil {\n\t\tlog.Panicf(\"startup: failed to set up notifier: %s\", err)\n\t}\n}\n\nfunc (m *monolith) setupConsumers() {\n\tvar err error\n\n\tclientAPIConsumer := clientapi_consumers.NewOutputRoomEvent(\n\t\tm.cfg, m.kafkaConsumer, m.accountDB, m.queryAPI,\n\t)\n\tif err = clientAPIConsumer.Start(); err != nil {\n\t\tlog.Panicf(\"startup: failed to start room server consumer\")\n\t}\n\n\tsyncAPIRoomConsumer := syncapi_consumers.NewOutputRoomEvent(\n\t\tm.cfg, m.kafkaConsumer, m.syncAPINotifier, m.syncAPIDB, m.queryAPI,\n\t)\n\tif err = syncAPIRoomConsumer.Start(); err != nil {\n\t\tlog.Panicf(\"startup: failed to start room server consumer: %s\", err)\n\t}\n\n\tsyncAPIClientConsumer := syncapi_consumers.NewOutputClientData(\n\t\tm.cfg, m.kafkaConsumer, m.syncAPINotifier, m.syncAPIDB,\n\t)\n\tif err = syncAPIClientConsumer.Start(); err != nil {\n\t\tlog.Panicf(\"startup: failed to start client API server consumer: %s\", err)\n\t}\n\n\tfederationSenderQueues := queue.NewOutgoingQueues(m.cfg.Matrix.ServerName, m.federation)\n\n\tfederationSenderRoomConsumer := federationsender_consumers.NewOutputRoomEvent(\n\t\tm.cfg, m.kafkaConsumer, federationSenderQueues, m.federationSenderDB, m.queryAPI,\n\t)\n\tif err = federationSenderRoomConsumer.Start(); err != nil {\n\t\tlog.WithError(err).Panicf(\"startup: failed to start room server consumer\")\n\t}\n}\n\nfunc (m *monolith) setupAPIs() {\n\tclientapi_routing.Setup(\n\t\tm.api, http.DefaultClient, *m.cfg, m.roomServerProducer,\n\t\tm.queryAPI, m.aliasAPI, m.accountDB, m.deviceDB, m.federation, m.keyRing,\n\t\tm.userUpdateProducer, m.syncProducer,\n\t)\n\n\tmediaapi_routing.Setup(\n\t\tm.api, http.DefaultClient, m.cfg, m.mediaAPIDB,\n\t)\n\n\tsyncapi_routing.Setup(m.api, syncapi_sync.NewRequestPool(\n\t\tm.syncAPIDB, m.syncAPINotifier, m.accountDB,\n\t), m.deviceDB)\n\n\tfederationapi_routing.Setup(\n\t\tm.api, *m.cfg, m.queryAPI, m.roomServerProducer, m.keyRing, m.federation,\n\t)\n}\nFix kafka consumer setup in monolith. (#184)\/\/ Copyright 2017 Vector Creations Ltd\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/matrix-org\/dendrite\/clientapi\/auth\/storage\/accounts\"\n\t\"github.com\/matrix-org\/dendrite\/clientapi\/auth\/storage\/devices\"\n\t\"github.com\/matrix-org\/dendrite\/clientapi\/producers\"\n\t\"github.com\/matrix-org\/dendrite\/common\"\n\t\"github.com\/matrix-org\/dendrite\/common\/config\"\n\t\"github.com\/matrix-org\/dendrite\/common\/keydb\"\n\t\"github.com\/matrix-org\/gomatrixserverlib\"\n\t\"github.com\/matrix-org\/naffka\"\n\n\tmediaapi_routing \"github.com\/matrix-org\/dendrite\/mediaapi\/routing\"\n\tmediaapi_storage \"github.com\/matrix-org\/dendrite\/mediaapi\/storage\"\n\n\troomserver_alias \"github.com\/matrix-org\/dendrite\/roomserver\/alias\"\n\troomserver_input \"github.com\/matrix-org\/dendrite\/roomserver\/input\"\n\troomserver_query \"github.com\/matrix-org\/dendrite\/roomserver\/query\"\n\troomserver_storage \"github.com\/matrix-org\/dendrite\/roomserver\/storage\"\n\n\tclientapi_consumers \"github.com\/matrix-org\/dendrite\/clientapi\/consumers\"\n\tclientapi_routing \"github.com\/matrix-org\/dendrite\/clientapi\/routing\"\n\n\tsyncapi_consumers \"github.com\/matrix-org\/dendrite\/syncapi\/consumers\"\n\tsyncapi_routing \"github.com\/matrix-org\/dendrite\/syncapi\/routing\"\n\tsyncapi_storage \"github.com\/matrix-org\/dendrite\/syncapi\/storage\"\n\tsyncapi_sync \"github.com\/matrix-org\/dendrite\/syncapi\/sync\"\n\tsyncapi_types \"github.com\/matrix-org\/dendrite\/syncapi\/types\"\n\n\tfederationapi_routing \"github.com\/matrix-org\/dendrite\/federationapi\/routing\"\n\n\tfederationsender_consumers \"github.com\/matrix-org\/dendrite\/federationsender\/consumers\"\n\t\"github.com\/matrix-org\/dendrite\/federationsender\/queue\"\n\tfederationsender_storage \"github.com\/matrix-org\/dendrite\/federationsender\/storage\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\tsarama \"gopkg.in\/Shopify\/sarama.v1\"\n)\n\nvar (\n\tlogDir = os.Getenv(\"LOG_DIR\")\n\tconfigPath = flag.String(\"config\", \"dendrite.yaml\", \"The path to the config file. For more information, see the config file in this repository.\")\n\thttpBindAddr = flag.String(\"http-bind-address\", \":8008\", \"The HTTP listening port for the server\")\n\thttpsBindAddr = flag.String(\"https-bind-address\", \":8448\", \"The HTTPS listening port for the server\")\n\tcertFile = flag.String(\"tls-cert\", \"\", \"The PEM formatted X509 certificate to use for TLS\")\n\tkeyFile = flag.String(\"tls-key\", \"\", \"The PEM private key to use for TLS\")\n)\n\nfunc main() {\n\tcommon.SetupLogging(logDir)\n\n\tflag.Parse()\n\n\tif *configPath == \"\" {\n\t\tlog.Fatal(\"--config must be supplied\")\n\t}\n\tcfg, err := config.LoadMonolithic(*configPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"Invalid config file: %s\", err)\n\t}\n\n\tm := newMonolith(cfg)\n\tm.setupDatabases()\n\tm.setupFederation()\n\tm.setupKafka()\n\tm.setupRoomServer()\n\tm.setupProducers()\n\tm.setupNotifiers()\n\tm.setupConsumers()\n\tm.setupAPIs()\n\n\t\/\/ Expose the matrix APIs directly rather than putting them under a \/api path.\n\tgo func() {\n\t\tlog.Info(\"Listening on \", *httpBindAddr)\n\t\tlog.Fatal(http.ListenAndServe(*httpBindAddr, m.api))\n\t}()\n\t\/\/ Handle HTTPS if certificate and key are provided\n\tgo func() {\n\t\tif *certFile != \"\" && *keyFile != \"\" {\n\t\t\tlog.Info(\"Listening on \", *httpsBindAddr)\n\t\t\tlog.Fatal(http.ListenAndServeTLS(*httpsBindAddr, *certFile, *keyFile, m.api))\n\t\t}\n\t}()\n\n\t\/\/ We want to block forever to let the HTTP and HTTPS handler serve the APIs\n\tselect {}\n}\n\n\/\/ A monolith contains all the dendrite components.\n\/\/ Some of the setup functions depend on previous setup functions, so they must\n\/\/ be called in the same order as they are defined in the file.\ntype monolith struct {\n\tcfg *config.Dendrite\n\tapi *mux.Router\n\n\troomServerDB *roomserver_storage.Database\n\taccountDB *accounts.Database\n\tdeviceDB *devices.Database\n\tkeyDB *keydb.Database\n\tmediaAPIDB *mediaapi_storage.Database\n\tsyncAPIDB *syncapi_storage.SyncServerDatabase\n\tfederationSenderDB *federationsender_storage.Database\n\n\tfederation *gomatrixserverlib.FederationClient\n\tkeyRing gomatrixserverlib.KeyRing\n\n\tinputAPI *roomserver_input.RoomserverInputAPI\n\tqueryAPI *roomserver_query.RoomserverQueryAPI\n\taliasAPI *roomserver_alias.RoomserverAliasAPI\n\n\tnaffka *naffka.Naffka\n\tkafkaProducer sarama.SyncProducer\n\n\troomServerProducer *producers.RoomserverProducer\n\tuserUpdateProducer *producers.UserUpdateProducer\n\tsyncProducer *producers.SyncAPIProducer\n\n\tsyncAPINotifier *syncapi_sync.Notifier\n}\n\nfunc newMonolith(cfg *config.Dendrite) *monolith {\n\treturn &monolith{cfg: cfg, api: mux.NewRouter()}\n}\n\nfunc (m *monolith) setupDatabases() {\n\tvar err error\n\tm.roomServerDB, err = roomserver_storage.Open(string(m.cfg.Database.RoomServer))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tm.accountDB, err = accounts.NewDatabase(string(m.cfg.Database.Account), m.cfg.Matrix.ServerName)\n\tif err != nil {\n\t\tlog.Panicf(\"Failed to setup account database(%q): %s\", m.cfg.Database.Account, err.Error())\n\t}\n\tm.deviceDB, err = devices.NewDatabase(string(m.cfg.Database.Device), m.cfg.Matrix.ServerName)\n\tif err != nil {\n\t\tlog.Panicf(\"Failed to setup device database(%q): %s\", m.cfg.Database.Device, err.Error())\n\t}\n\tm.keyDB, err = keydb.NewDatabase(string(m.cfg.Database.ServerKey))\n\tif err != nil {\n\t\tlog.Panicf(\"Failed to setup key database(%q): %s\", m.cfg.Database.ServerKey, err.Error())\n\t}\n\tm.mediaAPIDB, err = mediaapi_storage.Open(string(m.cfg.Database.MediaAPI))\n\tif err != nil {\n\t\tlog.Panicf(\"Failed to setup sync api database(%q): %s\", m.cfg.Database.MediaAPI, err.Error())\n\t}\n\tm.syncAPIDB, err = syncapi_storage.NewSyncServerDatabase(string(m.cfg.Database.SyncAPI))\n\tif err != nil {\n\t\tlog.Panicf(\"Failed to setup sync api database(%q): %s\", m.cfg.Database.SyncAPI, err.Error())\n\t}\n\tm.federationSenderDB, err = federationsender_storage.NewDatabase(string(m.cfg.Database.FederationSender))\n\tif err != nil {\n\t\tlog.Panicf(\"startup: failed to create federation sender database with data source %s : %s\", m.cfg.Database.FederationSender, err)\n\t}\n}\n\nfunc (m *monolith) setupFederation() {\n\tm.federation = gomatrixserverlib.NewFederationClient(\n\t\tm.cfg.Matrix.ServerName, m.cfg.Matrix.KeyID, m.cfg.Matrix.PrivateKey,\n\t)\n\n\tm.keyRing = gomatrixserverlib.KeyRing{\n\t\tKeyFetchers: []gomatrixserverlib.KeyFetcher{\n\t\t\t\/\/ TODO: Use perspective key fetchers for production.\n\t\t\t&gomatrixserverlib.DirectKeyFetcher{Client: m.federation.Client},\n\t\t},\n\t\tKeyDatabase: m.keyDB,\n\t}\n}\n\nfunc (m *monolith) setupKafka() {\n\tvar err error\n\tif m.cfg.Kafka.UseNaffka {\n\t\tnaff, err := naffka.New(&naffka.MemoryDatabase{})\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\tlog.ErrorKey: err,\n\t\t\t}).Panic(\"Failed to setup naffka\")\n\t\t}\n\t\tm.naffka = naff\n\t\tm.kafkaProducer = naff\n\t} else {\n\t\tm.kafkaProducer, err = sarama.NewSyncProducer(m.cfg.Kafka.Addresses, nil)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\tlog.ErrorKey: err,\n\t\t\t\t\"addresses\": m.cfg.Kafka.Addresses,\n\t\t\t}).Panic(\"Failed to setup kafka producers\")\n\t\t}\n\t}\n}\n\nfunc (m *monolith) kafkaConsumer() sarama.Consumer {\n\tif m.cfg.Kafka.UseNaffka {\n\t\treturn m.naffka\n\t}\n\tconsumer, err := sarama.NewConsumer(m.cfg.Kafka.Addresses, nil)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\tlog.ErrorKey: err,\n\t\t\t\"addresses\": m.cfg.Kafka.Addresses,\n\t\t}).Panic(\"Failed to setup kafka consumers\")\n\t}\n\treturn consumer\n}\n\nfunc (m *monolith) setupRoomServer() {\n\tm.inputAPI = &roomserver_input.RoomserverInputAPI{\n\t\tDB: m.roomServerDB,\n\t\tProducer: m.kafkaProducer,\n\t\tOutputRoomEventTopic: string(m.cfg.Kafka.Topics.OutputRoomEvent),\n\t}\n\n\tm.queryAPI = &roomserver_query.RoomserverQueryAPI{\n\t\tDB: m.roomServerDB,\n\t}\n\n\tm.aliasAPI = &roomserver_alias.RoomserverAliasAPI{\n\t\tDB: m.roomServerDB,\n\t\tCfg: m.cfg,\n\t\tInputAPI: m.inputAPI,\n\t\tQueryAPI: m.queryAPI,\n\t}\n}\n\nfunc (m *monolith) setupProducers() {\n\tm.roomServerProducer = producers.NewRoomserverProducer(m.inputAPI)\n\tm.userUpdateProducer = &producers.UserUpdateProducer{\n\t\tProducer: m.kafkaProducer,\n\t\tTopic: string(m.cfg.Kafka.Topics.UserUpdates),\n\t}\n\tm.syncProducer = &producers.SyncAPIProducer{\n\t\tProducer: m.kafkaProducer,\n\t\tTopic: string(m.cfg.Kafka.Topics.OutputClientData),\n\t}\n}\n\nfunc (m *monolith) setupNotifiers() {\n\tpos, err := m.syncAPIDB.SyncStreamPosition()\n\tif err != nil {\n\t\tlog.Panicf(\"startup: failed to get latest sync stream position : %s\", err)\n\t}\n\n\tm.syncAPINotifier = syncapi_sync.NewNotifier(syncapi_types.StreamPosition(pos))\n\tif err = m.syncAPINotifier.Load(m.syncAPIDB); err != nil {\n\t\tlog.Panicf(\"startup: failed to set up notifier: %s\", err)\n\t}\n}\n\nfunc (m *monolith) setupConsumers() {\n\tvar err error\n\n\tclientAPIConsumer := clientapi_consumers.NewOutputRoomEvent(\n\t\tm.cfg, m.kafkaConsumer(), m.accountDB, m.queryAPI,\n\t)\n\tif err = clientAPIConsumer.Start(); err != nil {\n\t\tlog.Panicf(\"startup: failed to start room server consumer\")\n\t}\n\n\tsyncAPIRoomConsumer := syncapi_consumers.NewOutputRoomEvent(\n\t\tm.cfg, m.kafkaConsumer(), m.syncAPINotifier, m.syncAPIDB, m.queryAPI,\n\t)\n\tif err = syncAPIRoomConsumer.Start(); err != nil {\n\t\tlog.Panicf(\"startup: failed to start room server consumer: %s\", err)\n\t}\n\n\tsyncAPIClientConsumer := syncapi_consumers.NewOutputClientData(\n\t\tm.cfg, m.kafkaConsumer(), m.syncAPINotifier, m.syncAPIDB,\n\t)\n\tif err = syncAPIClientConsumer.Start(); err != nil {\n\t\tlog.Panicf(\"startup: failed to start client API server consumer: %s\", err)\n\t}\n\n\tfederationSenderQueues := queue.NewOutgoingQueues(m.cfg.Matrix.ServerName, m.federation)\n\n\tfederationSenderRoomConsumer := federationsender_consumers.NewOutputRoomEvent(\n\t\tm.cfg, m.kafkaConsumer(), federationSenderQueues, m.federationSenderDB, m.queryAPI,\n\t)\n\tif err = federationSenderRoomConsumer.Start(); err != nil {\n\t\tlog.WithError(err).Panicf(\"startup: failed to start room server consumer\")\n\t}\n}\n\nfunc (m *monolith) setupAPIs() {\n\tclientapi_routing.Setup(\n\t\tm.api, http.DefaultClient, *m.cfg, m.roomServerProducer,\n\t\tm.queryAPI, m.aliasAPI, m.accountDB, m.deviceDB, m.federation, m.keyRing,\n\t\tm.userUpdateProducer, m.syncProducer,\n\t)\n\n\tmediaapi_routing.Setup(\n\t\tm.api, http.DefaultClient, m.cfg, m.mediaAPIDB,\n\t)\n\n\tsyncapi_routing.Setup(m.api, syncapi_sync.NewRequestPool(\n\t\tm.syncAPIDB, m.syncAPINotifier, m.accountDB,\n\t), m.deviceDB)\n\n\tfederationapi_routing.Setup(\n\t\tm.api, *m.cfg, m.queryAPI, m.roomServerProducer, m.keyRing, m.federation,\n\t)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2016 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage clientv3\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\n\tpb \"go.etcd.io\/etcd\/api\/v3\/etcdserverpb\"\n\t\"go.uber.org\/zap\"\n\t\"google.golang.org\/grpc\"\n)\n\ntype (\n\tDefragmentResponse pb.DefragmentResponse\n\tAlarmResponse pb.AlarmResponse\n\tAlarmMember pb.AlarmMember\n\tStatusResponse pb.StatusResponse\n\tHashKVResponse pb.HashKVResponse\n\tMoveLeaderResponse pb.MoveLeaderResponse\n)\n\ntype Maintenance interface {\n\t\/\/ AlarmList gets all active alarms.\n\tAlarmList(ctx context.Context) (*AlarmResponse, error)\n\n\t\/\/ AlarmDisarm disarms a given alarm.\n\tAlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)\n\n\t\/\/ Defragment releases wasted space from internal fragmentation on a given etcd member.\n\t\/\/ Defragment is only needed when deleting a large number of keys and want to reclaim\n\t\/\/ the resources.\n\t\/\/ Defragment is an expensive operation. User should avoid defragmenting multiple members\n\t\/\/ at the same time.\n\t\/\/ To defragment multiple members in the cluster, user need to call defragment multiple\n\t\/\/ times with different endpoints.\n\tDefragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)\n\n\t\/\/ Status gets the status of the endpoint.\n\tStatus(ctx context.Context, endpoint string) (*StatusResponse, error)\n\n\t\/\/ HashKV returns a hash of the KV state at the time of the RPC.\n\t\/\/ If revision is zero, the hash is computed on all keys. If the revision\n\t\/\/ is non-zero, the hash is computed on all keys at or below the given revision.\n\tHashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)\n\n\t\/\/ Snapshot provides a reader for a point-in-time snapshot of etcd.\n\t\/\/ If the context \"ctx\" is canceled or timed out, reading from returned\n\t\/\/ \"io.ReadCloser\" would error out (e.g. context.Canceled, context.DeadlineExceeded).\n\tSnapshot(ctx context.Context) (io.ReadCloser, error)\n\n\t\/\/ MoveLeader requests current leader to transfer its leadership to the transferee.\n\t\/\/ Request must be made to the leader.\n\tMoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)\n}\n\ntype maintenance struct {\n\tlg *zap.Logger\n\tdial func(endpoint string) (pb.MaintenanceClient, func(), error)\n\tremote pb.MaintenanceClient\n\tcallOpts []grpc.CallOption\n}\n\nfunc NewMaintenance(c *Client) Maintenance {\n\tapi := &maintenance{\n\t\tlg: c.lg,\n\t\tdial: func(endpoint string) (pb.MaintenanceClient, func(), error) {\n\t\t\tconn, err := c.Dial(endpoint)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"failed to dial endpoint %s with maintenance client: %v\", endpoint, err)\n\t\t\t}\n\n\t\t\t\/\/get token with established connection\n\t\t\tdctx := c.ctx\n\t\t\tcancel := func() {}\n\t\t\tif c.cfg.DialTimeout > 0 {\n\t\t\t\tdctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)\n\t\t\t}\n\t\t\terr = c.getToken(dctx)\n\t\t\tcancel()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"failed to getToken from endpoint %s with maintenance client: %v\", endpoint, err)\n\t\t\t}\n\t\t\tcancel = func() { conn.Close() }\n\t\t\treturn RetryMaintenanceClient(c, conn), cancel, nil\n\t\t},\n\t\tremote: RetryMaintenanceClient(c, c.conn),\n\t}\n\tif c != nil {\n\t\tapi.callOpts = c.callOpts\n\t}\n\treturn api\n}\n\nfunc NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {\n\tapi := &maintenance{\n\t\tlg: c.lg,\n\t\tdial: func(string) (pb.MaintenanceClient, func(), error) {\n\t\t\treturn remote, func() {}, nil\n\t\t},\n\t\tremote: remote,\n\t}\n\tif c != nil {\n\t\tapi.callOpts = c.callOpts\n\t}\n\treturn api\n}\n\nfunc (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {\n\treq := &pb.AlarmRequest{\n\t\tAction: pb.AlarmRequest_GET,\n\t\tMemberID: 0, \/\/ all\n\t\tAlarm: pb.AlarmType_NONE, \/\/ all\n\t}\n\tresp, err := m.remote.Alarm(ctx, req, m.callOpts...)\n\tif err == nil {\n\t\treturn (*AlarmResponse)(resp), nil\n\t}\n\treturn nil, toErr(ctx, err)\n}\n\nfunc (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {\n\treq := &pb.AlarmRequest{\n\t\tAction: pb.AlarmRequest_DEACTIVATE,\n\t\tMemberID: am.MemberID,\n\t\tAlarm: am.Alarm,\n\t}\n\n\tif req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {\n\t\tar, err := m.AlarmList(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, toErr(ctx, err)\n\t\t}\n\t\tret := AlarmResponse{}\n\t\tfor _, am := range ar.Alarms {\n\t\t\tdresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))\n\t\t\tif derr != nil {\n\t\t\t\treturn nil, toErr(ctx, derr)\n\t\t\t}\n\t\t\tret.Alarms = append(ret.Alarms, dresp.Alarms...)\n\t\t}\n\t\treturn &ret, nil\n\t}\n\n\tresp, err := m.remote.Alarm(ctx, req, m.callOpts...)\n\tif err == nil {\n\t\treturn (*AlarmResponse)(resp), nil\n\t}\n\treturn nil, toErr(ctx, err)\n}\n\nfunc (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {\n\tremote, cancel, err := m.dial(endpoint)\n\tif err != nil {\n\t\treturn nil, toErr(ctx, err)\n\t}\n\tdefer cancel()\n\tresp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)\n\tif err != nil {\n\t\treturn nil, toErr(ctx, err)\n\t}\n\treturn (*DefragmentResponse)(resp), nil\n}\n\nfunc (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {\n\tremote, cancel, err := m.dial(endpoint)\n\tif err != nil {\n\t\treturn nil, toErr(ctx, err)\n\t}\n\tdefer cancel()\n\tresp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)\n\tif err != nil {\n\t\treturn nil, toErr(ctx, err)\n\t}\n\treturn (*StatusResponse)(resp), nil\n}\n\nfunc (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {\n\tremote, cancel, err := m.dial(endpoint)\n\tif err != nil {\n\n\t\treturn nil, toErr(ctx, err)\n\t}\n\tdefer cancel()\n\tresp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)\n\tif err != nil {\n\t\treturn nil, toErr(ctx, err)\n\t}\n\treturn (*HashKVResponse)(resp), nil\n}\n\nfunc (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {\n\tss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)\n\tif err != nil {\n\t\treturn nil, toErr(ctx, err)\n\t}\n\n\tm.lg.Info(\"opened snapshot stream; downloading\")\n\tpr, pw := io.Pipe()\n\tgo func() {\n\t\tfor {\n\t\t\tresp, err := ss.Recv()\n\t\t\tif err != nil {\n\t\t\t\tswitch err {\n\t\t\t\tcase io.EOF:\n\t\t\t\t\tm.lg.Info(\"completed snapshot read; closing\")\n\t\t\t\tdefault:\n\t\t\t\t\tm.lg.Warn(\"failed to receive from snapshot stream; closing\", zap.Error(err))\n\t\t\t\t}\n\t\t\t\tpw.CloseWithError(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ can \"resp == nil && err == nil\"\n\t\t\t\/\/ before we receive snapshot SHA digest?\n\t\t\t\/\/ No, server sends EOF with an empty response\n\t\t\t\/\/ after it sends SHA digest at the end\n\n\t\t\tif _, werr := pw.Write(resp.Blob); werr != nil {\n\t\t\t\tpw.CloseWithError(werr)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil\n}\n\ntype snapshotReadCloser struct {\n\tctx context.Context\n\tio.ReadCloser\n}\n\nfunc (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {\n\tn, err = rc.ReadCloser.Read(p)\n\treturn n, toErr(rc.ctx, err)\n}\n\nfunc (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {\n\tresp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)\n\treturn (*MoveLeaderResponse)(resp), toErr(ctx, err)\n}\nclient\/v3\/maintenance.go: Add Downgrade support to client\/\/ Copyright 2016 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage clientv3\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\n\tpb \"go.etcd.io\/etcd\/api\/v3\/etcdserverpb\"\n\t\"go.uber.org\/zap\"\n\t\"google.golang.org\/grpc\"\n)\n\ntype (\n\tDefragmentResponse pb.DefragmentResponse\n\tAlarmResponse pb.AlarmResponse\n\tAlarmMember pb.AlarmMember\n\tStatusResponse pb.StatusResponse\n\tHashKVResponse pb.HashKVResponse\n\tMoveLeaderResponse pb.MoveLeaderResponse\n\tDowngradeResponse pb.DowngradeResponse\n)\n\ntype Maintenance interface {\n\t\/\/ AlarmList gets all active alarms.\n\tAlarmList(ctx context.Context) (*AlarmResponse, error)\n\n\t\/\/ AlarmDisarm disarms a given alarm.\n\tAlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)\n\n\t\/\/ Defragment releases wasted space from internal fragmentation on a given etcd member.\n\t\/\/ Defragment is only needed when deleting a large number of keys and want to reclaim\n\t\/\/ the resources.\n\t\/\/ Defragment is an expensive operation. User should avoid defragmenting multiple members\n\t\/\/ at the same time.\n\t\/\/ To defragment multiple members in the cluster, user need to call defragment multiple\n\t\/\/ times with different endpoints.\n\tDefragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)\n\n\t\/\/ Status gets the status of the endpoint.\n\tStatus(ctx context.Context, endpoint string) (*StatusResponse, error)\n\n\t\/\/ HashKV returns a hash of the KV state at the time of the RPC.\n\t\/\/ If revision is zero, the hash is computed on all keys. If the revision\n\t\/\/ is non-zero, the hash is computed on all keys at or below the given revision.\n\tHashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)\n\n\t\/\/ Snapshot provides a reader for a point-in-time snapshot of etcd.\n\t\/\/ If the context \"ctx\" is canceled or timed out, reading from returned\n\t\/\/ \"io.ReadCloser\" would error out (e.g. context.Canceled, context.DeadlineExceeded).\n\tSnapshot(ctx context.Context) (io.ReadCloser, error)\n\n\t\/\/ MoveLeader requests current leader to transfer its leadership to the transferee.\n\t\/\/ Request must be made to the leader.\n\tMoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)\n\n\t\/\/ Downgrade requests downgrades, verifies feasibility or cancels downgrade\n\t\/\/ on the cluster version.\n\t\/\/ action is one of the following:\n\t\/\/ VALIDATE = 0;\n\t\/\/ ENABLE = 1;\n\t\/\/ CANCEL = 2;\n\t\/\/ Supported since etcd 3.5.\n\tDowngrade(ctx context.Context, action int32, version string) (*DowngradeResponse, error)\n}\n\ntype maintenance struct {\n\tlg *zap.Logger\n\tdial func(endpoint string) (pb.MaintenanceClient, func(), error)\n\tremote pb.MaintenanceClient\n\tcallOpts []grpc.CallOption\n}\n\nfunc NewMaintenance(c *Client) Maintenance {\n\tapi := &maintenance{\n\t\tlg: c.lg,\n\t\tdial: func(endpoint string) (pb.MaintenanceClient, func(), error) {\n\t\t\tconn, err := c.Dial(endpoint)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"failed to dial endpoint %s with maintenance client: %v\", endpoint, err)\n\t\t\t}\n\n\t\t\t\/\/get token with established connection\n\t\t\tdctx := c.ctx\n\t\t\tcancel := func() {}\n\t\t\tif c.cfg.DialTimeout > 0 {\n\t\t\t\tdctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)\n\t\t\t}\n\t\t\terr = c.getToken(dctx)\n\t\t\tcancel()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"failed to getToken from endpoint %s with maintenance client: %v\", endpoint, err)\n\t\t\t}\n\t\t\tcancel = func() { conn.Close() }\n\t\t\treturn RetryMaintenanceClient(c, conn), cancel, nil\n\t\t},\n\t\tremote: RetryMaintenanceClient(c, c.conn),\n\t}\n\tif c != nil {\n\t\tapi.callOpts = c.callOpts\n\t}\n\treturn api\n}\n\nfunc NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {\n\tapi := &maintenance{\n\t\tlg: c.lg,\n\t\tdial: func(string) (pb.MaintenanceClient, func(), error) {\n\t\t\treturn remote, func() {}, nil\n\t\t},\n\t\tremote: remote,\n\t}\n\tif c != nil {\n\t\tapi.callOpts = c.callOpts\n\t}\n\treturn api\n}\n\nfunc (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {\n\treq := &pb.AlarmRequest{\n\t\tAction: pb.AlarmRequest_GET,\n\t\tMemberID: 0, \/\/ all\n\t\tAlarm: pb.AlarmType_NONE, \/\/ all\n\t}\n\tresp, err := m.remote.Alarm(ctx, req, m.callOpts...)\n\tif err == nil {\n\t\treturn (*AlarmResponse)(resp), nil\n\t}\n\treturn nil, toErr(ctx, err)\n}\n\nfunc (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {\n\treq := &pb.AlarmRequest{\n\t\tAction: pb.AlarmRequest_DEACTIVATE,\n\t\tMemberID: am.MemberID,\n\t\tAlarm: am.Alarm,\n\t}\n\n\tif req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {\n\t\tar, err := m.AlarmList(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, toErr(ctx, err)\n\t\t}\n\t\tret := AlarmResponse{}\n\t\tfor _, am := range ar.Alarms {\n\t\t\tdresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))\n\t\t\tif derr != nil {\n\t\t\t\treturn nil, toErr(ctx, derr)\n\t\t\t}\n\t\t\tret.Alarms = append(ret.Alarms, dresp.Alarms...)\n\t\t}\n\t\treturn &ret, nil\n\t}\n\n\tresp, err := m.remote.Alarm(ctx, req, m.callOpts...)\n\tif err == nil {\n\t\treturn (*AlarmResponse)(resp), nil\n\t}\n\treturn nil, toErr(ctx, err)\n}\n\nfunc (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {\n\tremote, cancel, err := m.dial(endpoint)\n\tif err != nil {\n\t\treturn nil, toErr(ctx, err)\n\t}\n\tdefer cancel()\n\tresp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)\n\tif err != nil {\n\t\treturn nil, toErr(ctx, err)\n\t}\n\treturn (*DefragmentResponse)(resp), nil\n}\n\nfunc (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {\n\tremote, cancel, err := m.dial(endpoint)\n\tif err != nil {\n\t\treturn nil, toErr(ctx, err)\n\t}\n\tdefer cancel()\n\tresp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)\n\tif err != nil {\n\t\treturn nil, toErr(ctx, err)\n\t}\n\treturn (*StatusResponse)(resp), nil\n}\n\nfunc (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {\n\tremote, cancel, err := m.dial(endpoint)\n\tif err != nil {\n\n\t\treturn nil, toErr(ctx, err)\n\t}\n\tdefer cancel()\n\tresp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)\n\tif err != nil {\n\t\treturn nil, toErr(ctx, err)\n\t}\n\treturn (*HashKVResponse)(resp), nil\n}\n\nfunc (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {\n\tss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)\n\tif err != nil {\n\t\treturn nil, toErr(ctx, err)\n\t}\n\n\tm.lg.Info(\"opened snapshot stream; downloading\")\n\tpr, pw := io.Pipe()\n\tgo func() {\n\t\tfor {\n\t\t\tresp, err := ss.Recv()\n\t\t\tif err != nil {\n\t\t\t\tswitch err {\n\t\t\t\tcase io.EOF:\n\t\t\t\t\tm.lg.Info(\"completed snapshot read; closing\")\n\t\t\t\tdefault:\n\t\t\t\t\tm.lg.Warn(\"failed to receive from snapshot stream; closing\", zap.Error(err))\n\t\t\t\t}\n\t\t\t\tpw.CloseWithError(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ can \"resp == nil && err == nil\"\n\t\t\t\/\/ before we receive snapshot SHA digest?\n\t\t\t\/\/ No, server sends EOF with an empty response\n\t\t\t\/\/ after it sends SHA digest at the end\n\n\t\t\tif _, werr := pw.Write(resp.Blob); werr != nil {\n\t\t\t\tpw.CloseWithError(werr)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil\n}\n\ntype snapshotReadCloser struct {\n\tctx context.Context\n\tio.ReadCloser\n}\n\nfunc (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {\n\tn, err = rc.ReadCloser.Read(p)\n\treturn n, toErr(rc.ctx, err)\n}\n\nfunc (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {\n\tresp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)\n\treturn (*MoveLeaderResponse)(resp), toErr(ctx, err)\n}\n\nfunc (m *maintenance) Downgrade(ctx context.Context, action int32, version string) (*DowngradeResponse, error) {\n\tactionType := pb.DowngradeRequest_VALIDATE\n\tswitch action {\n\tcase 0:\n\t\tactionType = pb.DowngradeRequest_VALIDATE\n\tcase 1:\n\t\tactionType = pb.DowngradeRequest_ENABLE\n\tcase 2:\n\t\tactionType = pb.DowngradeRequest_CANCEL\n\tdefault:\n\t\treturn nil, errors.New(\"etcdclient: unknown downgrade action\")\n\t}\n\tresp, err := m.remote.Downgrade(ctx, &pb.DowngradeRequest{Action: actionType, Version: version}, m.callOpts...)\n\treturn (*DowngradeResponse)(resp), toErr(ctx, err)\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/oinume\/lekcije\/server\/bootstrap\"\n\t\"github.com\/oinume\/lekcije\/server\/errors\"\n\t\"github.com\/oinume\/lekcije\/server\/fetcher\"\n\t\"github.com\/oinume\/lekcije\/server\/logger\"\n\t\"github.com\/oinume\/lekcije\/server\/model\"\n\t\"github.com\/oinume\/lekcije\/server\/notifier\"\n\t\"github.com\/pkg\/profile\"\n\t\"github.com\/uber-go\/zap\"\n)\n\nvar (\n\tdryRun = flag.Bool(\"dry-run\", false, \"Don't update database with fetched lessons\")\n\tsendEmail = flag.Bool(\"send-email\", true, \"flag to send email\")\n\tconcurrency = flag.Int(\"concurrency\", 1, \"concurrency of fetcher\")\n\tfetcherCache = flag.Bool(\"fetcher-cache\", false, \"Cache teacher and lesson data in Fetcher\")\n\tlogLevel = flag.String(\"log-level\", \"info\", \"Log level\")\n\tprofileMode = flag.String(\"profile-mode\", \"\", \"block|cpu|mem|trace\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tif err := run(); err != nil {\n\t\tlog.Fatalf(\"err = %v\", err) \/\/ TODO: Error handling\n\t}\n\tos.Exit(0)\n}\n\nfunc run() error {\n\tswitch *profileMode {\n\tcase \"block\":\n\t\tdefer profile.Start(profile.ProfilePath(\".\"), profile.BlockProfile).Stop()\n\tcase \"cpu\":\n\t\tdefer profile.Start(profile.ProfilePath(\".\"), profile.CPUProfile).Stop()\n\tcase \"mem\":\n\t\tdefer profile.Start(profile.ProfilePath(\".\"), profile.MemProfile).Stop()\n\tcase \"trace\":\n\t\tdefer profile.Start(profile.ProfilePath(\".\"), profile.TraceProfile).Stop()\n\t}\n\n\tbootstrap.CheckCLIEnvVars()\n\tstartedAt := time.Now().UTC()\n\tif *logLevel != \"\" {\n\t\tlogger.App.SetLevel(logger.NewLevel(*logLevel))\n\t}\n\tlogger.App.Info(\"notifier started\")\n\tdefer func() {\n\t\telapsed := time.Now().UTC().Sub(startedAt) \/ time.Millisecond\n\t\tlogger.App.Info(\"notifier finished\", zap.Int(\"elapsed\", int(elapsed)))\n\t}()\n\n\t\/\/ TODO: Wrap up as function\n\tvar dbLogging bool\n\t\/\/dbLogging := !config.IsProductionEnv()\n\tif *logLevel == \"debug\" {\n\t\tdbLogging = true\n\t} else {\n\t\tdbLogging = false\n\t}\n\tdb, err := model.OpenDB(bootstrap.CLIEnvVars.DBURL, 1, dbLogging)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\tusers, err := model.NewUserService(db).FindAllEmailVerifiedIsTrue()\n\tif err != nil {\n\t\treturn err\n\t}\n\tmCountries, err := model.NewMCountryService(db).LoadAll()\n\tif err != nil {\n\t\treturn errors.InternalWrapf(err, \"Failed to load all MCountries\")\n\t}\n\tfetcher := fetcher.NewTeacherLessonFetcher(nil, *concurrency, *fetcherCache, mCountries, logger.App)\n\tnotifier := notifier.NewNotifier(db, fetcher, *dryRun, *sendEmail)\n\tdefer notifier.Close()\n\tfor _, user := range users {\n\t\tif err := notifier.SendNotification(user); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\nFix problem of staticcheckpackage main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/oinume\/lekcije\/server\/bootstrap\"\n\t\"github.com\/oinume\/lekcije\/server\/errors\"\n\t\"github.com\/oinume\/lekcije\/server\/fetcher\"\n\t\"github.com\/oinume\/lekcije\/server\/logger\"\n\t\"github.com\/oinume\/lekcije\/server\/model\"\n\t\"github.com\/oinume\/lekcije\/server\/notifier\"\n\t\"github.com\/pkg\/profile\"\n\t\"github.com\/uber-go\/zap\"\n)\n\nvar (\n\tdryRun = flag.Bool(\"dry-run\", false, \"Don't update database with fetched lessons\")\n\tsendEmail = flag.Bool(\"send-email\", true, \"flag to send email\")\n\tconcurrency = flag.Int(\"concurrency\", 1, \"concurrency of fetcher\")\n\tfetcherCache = flag.Bool(\"fetcher-cache\", false, \"Cache teacher and lesson data in Fetcher\")\n\tlogLevel = flag.String(\"log-level\", \"info\", \"Log level\")\n\tprofileMode = flag.String(\"profile-mode\", \"\", \"block|cpu|mem|trace\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tif err := run(); err != nil {\n\t\tlog.Fatalf(\"err = %v\", err) \/\/ TODO: Error handling\n\t}\n\tos.Exit(0)\n}\n\nfunc run() error {\n\tswitch *profileMode {\n\tcase \"block\":\n\t\tdefer profile.Start(profile.ProfilePath(\".\"), profile.BlockProfile).Stop()\n\tcase \"cpu\":\n\t\tdefer profile.Start(profile.ProfilePath(\".\"), profile.CPUProfile).Stop()\n\tcase \"mem\":\n\t\tdefer profile.Start(profile.ProfilePath(\".\"), profile.MemProfile).Stop()\n\tcase \"trace\":\n\t\tdefer profile.Start(profile.ProfilePath(\".\"), profile.TraceProfile).Stop()\n\t}\n\n\tbootstrap.CheckCLIEnvVars()\n\tstartedAt := time.Now().UTC()\n\tif *logLevel != \"\" {\n\t\tlogger.App.SetLevel(logger.NewLevel(*logLevel))\n\t}\n\tlogger.App.Info(\"notifier started\")\n\tdefer func() {\n\t\telapsed := time.Now().UTC().Sub(startedAt) \/ time.Millisecond\n\t\tlogger.App.Info(\"notifier finished\", zap.Int(\"elapsed\", int(elapsed)))\n\t}()\n\n\t\/\/ TODO: Wrap up as function\n\tdbLogging := false\n\t\/\/ TODO: something wrong with staticcheck? this value of dbLogging is never used (SA4006)\n\t\/\/dbLogging := !config.IsProductionEnv()x\n\tif *logLevel == \"debug\" {\n\t\tdbLogging = true\n\t} else {\n\t\tdbLogging = false\n\t}\n\tdb, err := model.OpenDB(bootstrap.CLIEnvVars.DBURL, 1, dbLogging)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\tusers, err := model.NewUserService(db).FindAllEmailVerifiedIsTrue()\n\tif err != nil {\n\t\treturn err\n\t}\n\tmCountries, err := model.NewMCountryService(db).LoadAll()\n\tif err != nil {\n\t\treturn errors.InternalWrapf(err, \"Failed to load all MCountries\")\n\t}\n\tfetcher := fetcher.NewTeacherLessonFetcher(nil, *concurrency, *fetcherCache, mCountries, logger.App)\n\tnotifier := notifier.NewNotifier(db, fetcher, *dryRun, *sendEmail)\n\tdefer notifier.Close()\n\tfor _, user := range users {\n\t\tif err := notifier.SendNotification(user); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"package henchman\n\nimport (\n\t\"log\"\n\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"github.com\/flosch\/pongo2\"\n\n\t\"github.com\/sudharsh\/henchman\/ansi\"\n)\n\nvar statuses = map[string]string{\n\t\"reset\": ansi.ColorCode(\"reset\"),\n\t\"success\": ansi.ColorCode(\"green\"),\n\t\"ignored\": ansi.ColorCode(\"yellow\"),\n\t\"failure\": ansi.ColorCode(\"red\"),\n}\n\n\/\/ Task is the unit of work in henchman.\ntype Task struct {\n\tId string\n\n\tName string\n\tAction string\n\tIgnoreErrors bool `yaml:\"ignore_errors\"`\n}\n\nfunc prepareTemplate(data string, vars *TaskVars, machine *Machine) (string, error) {\n\ttmpl, err := pongo2.FromString(data)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn tmpl.Execute(&pongo2.Context{\"vars\": vars, \"machine\": machine})\n}\n\n\/\/ Renders the template parts in the task field.\n\/\/ Also assigns a new UUID to the task uniquely identifying it.\nfunc (task *Task) prepare(vars *TaskVars, machine *Machine) {\n\tvar err error\n\ttask.Id = uuid.New()\n\ttask.Name, err = prepareTemplate(task.Name, vars, machine)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ttask.Action, err = prepareTemplate(task.Action, vars, machine)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ Runs the task on the machine. The task might mutate `vars` so that other\n\/\/ tasks down the `plan` can see any additions\/updates.\nfunc (task *Task) Run(machine *Machine, vars *TaskVars) string {\n\ttask.prepare(vars, machine)\n\tlog.Printf(\"%s: %s '%s'\\n\", task.Id, machine.Hostname, task.Name)\n\tout, err := machine.Exec(task.Action)\n\tvar taskStatus string = \"success\"\n\tif err != nil {\n\t\tif task.IgnoreErrors {\n\t\t\ttaskStatus = \"ignored\"\n\t\t} else {\n\t\t\ttaskStatus = \"failure\"\n\t\t}\n\t}\n\tescapeCode := statuses[taskStatus]\n\tvar reset string = statuses[\"reset\"]\n\tlog.Printf(\"%s: %s [%s] - %s\", task.Id, escapeCode, taskStatus, out.String()+reset)\n\treturn taskStatus\n}\nFix buildpackage henchman\n\nimport (\n\t\"log\"\n\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"github.com\/flosch\/pongo2\"\n\n\t\"github.com\/sudharsh\/henchman\/ansi\"\n)\n\nvar statuses = map[string]string{\n\t\"reset\": ansi.ColorCode(\"reset\"),\n\t\"success\": ansi.ColorCode(\"green\"),\n\t\"ignored\": ansi.ColorCode(\"yellow\"),\n\t\"failure\": ansi.ColorCode(\"red\"),\n}\n\n\/\/ Task is the unit of work in henchman.\ntype Task struct {\n\tId string\n\n\tName string\n\tAction string\n\tIgnoreErrors bool `yaml:\"ignore_errors\"`\n}\n\nfunc prepareTemplate(data string, vars *TaskVars, machine *Machine) (string, error) {\n\ttmpl, err := pongo2.FromString(data)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tctxt := pongo2.Context{\"vars\": vars, \"machine\": machine}\n\treturn tmpl.Execute(&ctxt)\n}\n\n\/\/ Renders the template parts in the task field.\n\/\/ Also assigns a new UUID to the task uniquely identifying it.\nfunc (task *Task) prepare(vars *TaskVars, machine *Machine) {\n\tvar err error\n\ttask.Id = uuid.New()\n\ttask.Name, err = prepareTemplate(task.Name, vars, machine)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ttask.Action, err = prepareTemplate(task.Action, vars, machine)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ Runs the task on the machine. The task might mutate `vars` so that other\n\/\/ tasks down the `plan` can see any additions\/updates.\nfunc (task *Task) Run(machine *Machine, vars *TaskVars) string {\n\ttask.prepare(vars, machine)\n\tlog.Printf(\"%s: %s '%s'\\n\", task.Id, machine.Hostname, task.Name)\n\tout, err := machine.Exec(task.Action)\n\tvar taskStatus string = \"success\"\n\tif err != nil {\n\t\tif task.IgnoreErrors {\n\t\t\ttaskStatus = \"ignored\"\n\t\t} else {\n\t\t\ttaskStatus = \"failure\"\n\t\t}\n\t}\n\tescapeCode := statuses[taskStatus]\n\tvar reset string = statuses[\"reset\"]\n\tlog.Printf(\"%s: %s [%s] - %s\", task.Id, escapeCode, taskStatus, out.String()+reset)\n\treturn taskStatus\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"github.com\/GeertJohan\/go.rice\/embedded\"\n\t\"github.com\/akavel\/rsrc\/coff\"\n\t\"go\/build\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\ntype sizedBytes []byte\n\nfunc (s sizedBytes) Size() int64 {\n\treturn int64(len(s))\n}\n\nvar tmplEmbeddedSysoHelper *template.Template\n\nfunc init() {\n\tvar err error\n\ttmplEmbeddedSysoHelper, err = template.New(\"embeddedSysoHelper\").Parse(`package {{.Package}}\n\n\/\/ extern char _bricebox_{{.Symname}}[], _ericebox_{{.Symname}};\n\/\/ int get_{{.Symname}}_length() {\n\/\/ \treturn &_ericebox_{{.Symname}} - _bricebox_{{.Symname}};\n\/\/ }\nimport \"C\"\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"github.com\/GeertJohan\/go.rice\/embedded\"\n\t\"unsafe\"\n)\n\nfunc init() {\n\tptr := unsafe.Pointer(&C._bricebox_{{.Symname}})\n\tbts := C.GoBytes(ptr, C.get_{{.Symname}}_length())\n\tembeddedBox := &embedded.EmbeddedBox{}\n\terr := gob.NewDecoder(bytes.NewReader(bts)).Decode(embeddedBox)\n\tif err != nil {\n\t\tpanic(\"error decoding embedded box: \"+err.Error())\n\t}\n\tembeddedBox.Link()\n\tembedded.RegisterEmbeddedBox(embeddedBox.Name, embeddedBox)\n}`)\n\tif err != nil {\n\t\tpanic(\"could not parse template embeddedSysoHelper: \" + err.Error())\n\t}\n}\n\ntype embeddedSysoHelperData struct {\n\tPackage string\n\tSymname string\n}\n\nfunc operationEmbedSyso(pkg *build.Package) {\n\n\tregexpSynameReplacer := regexp.MustCompile(`[^a-z0-9_]`)\n\n\tboxMap := findBoxes(pkg)\n\n\t\/\/ notify user when no calls to rice.FindBox are made (is this an error and therefore os.Exit(1) ?\n\tif len(boxMap) == 0 {\n\t\tfmt.Println(\"no calls to rice.FindBox() found\")\n\t\treturn\n\t}\n\n\tverbosef(\"\\n\")\n\n\tfor boxname := range boxMap {\n\t\t\/\/ find path and filename for this box\n\t\tboxPath := filepath.Join(pkg.Dir, boxname)\n\t\tboxFilename := strings.Replace(boxname, \"\/\", \"-\", -1)\n\t\tboxFilename = strings.Replace(boxFilename, \"..\", \"back\", -1)\n\n\t\t\/\/ verbose info\n\t\tverbosef(\"embedding box '%s'\\n\", boxname)\n\t\tverbosef(\"\\tto file %s\\n\", boxFilename)\n\n\t\t\/\/ create box datastructure (used by template)\n\t\tbox := &embedded.EmbeddedBox{\n\t\t\tName: boxname,\n\t\t\tTime: time.Now(),\n\t\t\tEmbedType: embedded.EmbedTypeSyso,\n\t\t\tFiles: make(map[string]*embedded.EmbeddedFile),\n\t\t\tDirs: make(map[string]*embedded.EmbeddedDir),\n\t\t}\n\n\t\t\/\/ fill box datastructure with file data\n\t\tfilepath.Walk(boxPath, func(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"error walking box: %s\\n\", err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\tfilename := strings.TrimPrefix(path, boxPath)\n\t\t\tfilename = strings.Replace(filename, \"\\\\\", \"\/\", -1)\n\t\t\tfilename = strings.TrimPrefix(filename, \"\/\")\n\t\t\tif info.IsDir() {\n\t\t\t\tembeddedDir := &embedded.EmbeddedDir{\n\t\t\t\t\tFilename: filename,\n\t\t\t\t\tDirModTime: info.ModTime(),\n\t\t\t\t}\n\t\t\t\tverbosef(\"\\tincludes dir: '%s'\\n\", embeddedDir.Filename)\n\t\t\t\tbox.Dirs[embeddedDir.Filename] = embeddedDir\n\n\t\t\t\t\/\/ add tree entry (skip for root, it'll create a recursion)\n\t\t\t\tif embeddedDir.Filename != \"\" {\n\t\t\t\t\tpathParts := strings.Split(embeddedDir.Filename, \"\/\")\n\t\t\t\t\tparentDir := box.Dirs[strings.Join(pathParts[:len(pathParts)-1], \"\/\")]\n\t\t\t\t\tparentDir.ChildDirs = append(parentDir.ChildDirs, embeddedDir)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tembeddedFile := &embedded.EmbeddedFile{\n\t\t\t\t\tFilename: filename,\n\t\t\t\t\tFileModTime: info.ModTime(),\n\t\t\t\t\tContent: \"\",\n\t\t\t\t}\n\t\t\t\tverbosef(\"\\tincludes file: '%s'\\n\", embeddedFile.Filename)\n\t\t\t\tcontentBytes, err := ioutil.ReadFile(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"error reading file content while walking box: %s\\n\", err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tembeddedFile.Content = string(contentBytes)\n\t\t\t\tbox.Files[embeddedFile.Filename] = embeddedFile\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\t\/\/ encode embedded box to gob file\n\t\tboxGobBuf := &bytes.Buffer{}\n\t\terr := gob.NewEncoder(boxGobBuf).Encode(box)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error encoding box to gob: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\t\/\/ write coff\n\t\tsymname := regexpSynameReplacer.ReplaceAllString(boxname, \"_\")\n\t\tcreateCoffSyso(boxname, symname, \"386\", boxGobBuf.Bytes())\n\t\tcreateCoffSyso(boxname, symname, \"amd64\", boxGobBuf.Bytes())\n\n\t\t\/\/ write go\n\t\tsysoHelperData := embeddedSysoHelperData{\n\t\t\tPackage: pkg.Name,\n\t\t\tSymname: symname,\n\t\t}\n\t\tfileSysoHelper, err := os.Create(boxFilename + \".rice-box.go\")\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error creating syso helper: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\terr = tmplEmbeddedSysoHelper.Execute(fileSysoHelper, sysoHelperData)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error executing tmplEmbeddedSysoHelper: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n\nfunc createCoffSyso(boxFilename string, symname string, arch string, data []byte) {\n\tboxCoff := coff.NewRDATA()\n\tswitch arch {\n\tcase \"386\":\n\tcase \"amd64\":\n\t\tboxCoff.FileHeader.Machine = 0x8664\n\tdefault:\n\t\tpanic(\"invalid arch\")\n\t}\n\tboxCoff.AddData(\"_bricebox_\"+symname, sizedBytes(data))\n\tboxCoff.AddData(\"_ericebox_\"+symname, io.NewSectionReader(strings.NewReader(\"\\000\\000\"), 0, 2)) \/\/ TODO: why? copied from rsrc, which copied it from as-generated\n\tboxCoff.Freeze()\n\terr := writeCoff(boxCoff, boxFilename+\"_\"+arch+\".rice-box.syso\")\n\tif err != nil {\n\t\tfmt.Printf(\"error writing %s coff\/.syso: %v\\n\", arch, err)\n\t\tos.Exit(1)\n\t}\n}\nAdd comment for generated codepackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"github.com\/GeertJohan\/go.rice\/embedded\"\n\t\"github.com\/akavel\/rsrc\/coff\"\n\t\"go\/build\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\ntype sizedBytes []byte\n\nfunc (s sizedBytes) Size() int64 {\n\treturn int64(len(s))\n}\n\nvar tmplEmbeddedSysoHelper *template.Template\n\nfunc init() {\n\tvar err error\n\ttmplEmbeddedSysoHelper, err = template.New(\"embeddedSysoHelper\").Parse(`package {{.Package}}\n\/\/ ############# GENERATED CODE #####################\n\/\/ ## This file was generated by the rice tool.\n\/\/ ## Do not edit unless you know what you're doing.\n\/\/ ##################################################\n\n\/\/ extern char _bricebox_{{.Symname}}[], _ericebox_{{.Symname}};\n\/\/ int get_{{.Symname}}_length() {\n\/\/ \treturn &_ericebox_{{.Symname}} - _bricebox_{{.Symname}};\n\/\/ }\nimport \"C\"\nimport (\n\t\"bytes\"\n\t\"encoding\/gob\"\n\t\"github.com\/GeertJohan\/go.rice\/embedded\"\n\t\"unsafe\"\n)\n\nfunc init() {\n\tptr := unsafe.Pointer(&C._bricebox_{{.Symname}})\n\tbts := C.GoBytes(ptr, C.get_{{.Symname}}_length())\n\tembeddedBox := &embedded.EmbeddedBox{}\n\terr := gob.NewDecoder(bytes.NewReader(bts)).Decode(embeddedBox)\n\tif err != nil {\n\t\tpanic(\"error decoding embedded box: \"+err.Error())\n\t}\n\tembeddedBox.Link()\n\tembedded.RegisterEmbeddedBox(embeddedBox.Name, embeddedBox)\n}`)\n\tif err != nil {\n\t\tpanic(\"could not parse template embeddedSysoHelper: \" + err.Error())\n\t}\n}\n\ntype embeddedSysoHelperData struct {\n\tPackage string\n\tSymname string\n}\n\nfunc operationEmbedSyso(pkg *build.Package) {\n\n\tregexpSynameReplacer := regexp.MustCompile(`[^a-z0-9_]`)\n\n\tboxMap := findBoxes(pkg)\n\n\t\/\/ notify user when no calls to rice.FindBox are made (is this an error and therefore os.Exit(1) ?\n\tif len(boxMap) == 0 {\n\t\tfmt.Println(\"no calls to rice.FindBox() found\")\n\t\treturn\n\t}\n\n\tverbosef(\"\\n\")\n\n\tfor boxname := range boxMap {\n\t\t\/\/ find path and filename for this box\n\t\tboxPath := filepath.Join(pkg.Dir, boxname)\n\t\tboxFilename := strings.Replace(boxname, \"\/\", \"-\", -1)\n\t\tboxFilename = strings.Replace(boxFilename, \"..\", \"back\", -1)\n\n\t\t\/\/ verbose info\n\t\tverbosef(\"embedding box '%s'\\n\", boxname)\n\t\tverbosef(\"\\tto file %s\\n\", boxFilename)\n\n\t\t\/\/ create box datastructure (used by template)\n\t\tbox := &embedded.EmbeddedBox{\n\t\t\tName: boxname,\n\t\t\tTime: time.Now(),\n\t\t\tEmbedType: embedded.EmbedTypeSyso,\n\t\t\tFiles: make(map[string]*embedded.EmbeddedFile),\n\t\t\tDirs: make(map[string]*embedded.EmbeddedDir),\n\t\t}\n\n\t\t\/\/ fill box datastructure with file data\n\t\tfilepath.Walk(boxPath, func(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"error walking box: %s\\n\", err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\tfilename := strings.TrimPrefix(path, boxPath)\n\t\t\tfilename = strings.Replace(filename, \"\\\\\", \"\/\", -1)\n\t\t\tfilename = strings.TrimPrefix(filename, \"\/\")\n\t\t\tif info.IsDir() {\n\t\t\t\tembeddedDir := &embedded.EmbeddedDir{\n\t\t\t\t\tFilename: filename,\n\t\t\t\t\tDirModTime: info.ModTime(),\n\t\t\t\t}\n\t\t\t\tverbosef(\"\\tincludes dir: '%s'\\n\", embeddedDir.Filename)\n\t\t\t\tbox.Dirs[embeddedDir.Filename] = embeddedDir\n\n\t\t\t\t\/\/ add tree entry (skip for root, it'll create a recursion)\n\t\t\t\tif embeddedDir.Filename != \"\" {\n\t\t\t\t\tpathParts := strings.Split(embeddedDir.Filename, \"\/\")\n\t\t\t\t\tparentDir := box.Dirs[strings.Join(pathParts[:len(pathParts)-1], \"\/\")]\n\t\t\t\t\tparentDir.ChildDirs = append(parentDir.ChildDirs, embeddedDir)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tembeddedFile := &embedded.EmbeddedFile{\n\t\t\t\t\tFilename: filename,\n\t\t\t\t\tFileModTime: info.ModTime(),\n\t\t\t\t\tContent: \"\",\n\t\t\t\t}\n\t\t\t\tverbosef(\"\\tincludes file: '%s'\\n\", embeddedFile.Filename)\n\t\t\t\tcontentBytes, err := ioutil.ReadFile(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"error reading file content while walking box: %s\\n\", err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tembeddedFile.Content = string(contentBytes)\n\t\t\t\tbox.Files[embeddedFile.Filename] = embeddedFile\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\t\/\/ encode embedded box to gob file\n\t\tboxGobBuf := &bytes.Buffer{}\n\t\terr := gob.NewEncoder(boxGobBuf).Encode(box)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error encoding box to gob: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\t\/\/ write coff\n\t\tsymname := regexpSynameReplacer.ReplaceAllString(boxname, \"_\")\n\t\tcreateCoffSyso(boxname, symname, \"386\", boxGobBuf.Bytes())\n\t\tcreateCoffSyso(boxname, symname, \"amd64\", boxGobBuf.Bytes())\n\n\t\t\/\/ write go\n\t\tsysoHelperData := embeddedSysoHelperData{\n\t\t\tPackage: pkg.Name,\n\t\t\tSymname: symname,\n\t\t}\n\t\tfileSysoHelper, err := os.Create(boxFilename + \".rice-box.go\")\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error creating syso helper: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\terr = tmplEmbeddedSysoHelper.Execute(fileSysoHelper, sysoHelperData)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error executing tmplEmbeddedSysoHelper: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n\nfunc createCoffSyso(boxFilename string, symname string, arch string, data []byte) {\n\tboxCoff := coff.NewRDATA()\n\tswitch arch {\n\tcase \"386\":\n\tcase \"amd64\":\n\t\tboxCoff.FileHeader.Machine = 0x8664\n\tdefault:\n\t\tpanic(\"invalid arch\")\n\t}\n\tboxCoff.AddData(\"_bricebox_\"+symname, sizedBytes(data))\n\tboxCoff.AddData(\"_ericebox_\"+symname, io.NewSectionReader(strings.NewReader(\"\\000\\000\"), 0, 2)) \/\/ TODO: why? copied from rsrc, which copied it from as-generated\n\tboxCoff.Freeze()\n\terr := writeCoff(boxCoff, boxFilename+\"_\"+arch+\".rice-box.syso\")\n\tif err != nil {\n\t\tfmt.Printf(\"error writing %s coff\/.syso: %v\\n\", arch, err)\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/hprose\/hprose-golang\/rpc\"\n)\n\n\/\/ Args ...\ntype Args struct {\n\tA, B int\n}\n\n\/\/ Quotient ...\ntype Quotient struct {\n\tQuo, Rem int\n}\n\n\/\/ Stub ...\ntype Stub struct {\n\t\/\/ Synchronous call\n\tMultiply func(args *Args) int\n\t\/\/ Asynchronous call\n\tDivide func(func(*Quotient, error), *Args)\n}\n\nfunc main() {\n\tclient := rpc.NewClient(\"http:\/\/127.0.0.1:8080\")\n\tvar stub *Stub\n\tclient.UseService(&stub)\n\tfmt.Println(stub.Multiply(&Args{8, 7}))\n\tstub.Divide(func(result *Quotient, err error) {\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"arith error:\", err)\n\t\t} else {\n\t\t\tfmt.Println(result.Quo, result.Rem)\n\t\t}\n\t}, &Args{8, 7})\n\ttime.Sleep(1 * time.Second)\n}\nImproved examplepackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/hprose\/hprose-golang\/rpc\"\n)\n\n\/\/ Args ...\ntype Args struct {\n\tA, B int\n}\n\n\/\/ Quotient ...\ntype Quotient struct {\n\tQuo, Rem int\n}\n\n\/\/ Stub ...\ntype Stub struct {\n\t\/\/ Synchronous call\n\tMultiply func(args *Args) int\n\t\/\/ Asynchronous call\n\tDivide func(func(*Quotient, error), *Args)\n}\n\nfunc main() {\n\tclient := rpc.NewClient(\"http:\/\/127.0.0.1:8080\")\n\tvar stub *Stub\n\tclient.UseService(&stub)\n\tfmt.Println(stub.Multiply(&Args{8, 7}))\n\tdone := make(chan struct{})\n\tstub.Divide(func(result *Quotient, err error) {\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"arith error:\", err)\n\t\t} else {\n\t\t\tfmt.Println(result.Quo, result.Rem)\n\t\t}\n\t\tdone <- struct{}{}\n\t}, &Args{8, 7})\n\t<-done\n}\n<|endoftext|>"} {"text":"package irmaserver\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"github.com\/go-chi\/chi\"\n\t\"github.com\/go-errors\/errors\"\n\t\"github.com\/privacybydesign\/irmago\"\n\t\"github.com\/privacybydesign\/irmago\/server\"\n\t\"github.com\/privacybydesign\/irmago\/server\/irmarequestor\"\n)\n\ntype Configuration struct {\n\t*server.Configuration\n\tPort int\n}\n\nvar s *http.Server\n\n\/\/ Start the server. If successful then it will not return until Stop() is called.\nfunc Start(conf *Configuration) error {\n\tif err := irmarequestor.Initialize(conf.Configuration); err != nil {\n\t\treturn err\n\t}\n\n\trouter := chi.NewRouter()\n\n\t\/\/ Mount server for irmaclient\n\trouter.Mount(\"\/irma\/\", irmarequestor.HttpHandlerFunc(\"\/irma\/\"))\n\n\t\/\/ Server routes\n\trouter.Post(\"\/create\", handleCreate)\n\trouter.Get(\"\/status\/{token}\", handleStatus)\n\trouter.Get(\"\/result\/{token}\", handleResult)\n\n\t\/\/ Start server\n\ts = &http.Server{Addr: fmt.Sprintf(\":%d\", conf.Port), Handler: router}\n\terr := s.ListenAndServe()\n\tif err == http.ErrServerClosed {\n\t\treturn nil \/\/ Server was closed normally\n\t}\n\treturn err\n}\n\nfunc Stop() {\n\ts.Close()\n}\n\nfunc handleCreate(w http.ResponseWriter, r *http.Request) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tserver.WriteError(w, server.ErrorInvalidRequest, err.Error())\n\t\treturn\n\t}\n\trequest, err := parseRequest(body)\n\tif err != nil {\n\t\tserver.WriteError(w, server.ErrorInvalidRequest, err.Error())\n\t\treturn\n\t}\n\n\tqr, _, err := irmarequestor.StartSession(request, nil)\n\tif err != nil {\n\t\tserver.WriteError(w, server.ErrorInvalidRequest, err.Error())\n\t\treturn\n\t}\n\n\tserver.WriteJson(w, qr)\n}\n\nfunc handleStatus(w http.ResponseWriter, r *http.Request) {\n\tres := irmarequestor.GetSessionResult(chi.URLParam(r, \"token\"))\n\tif res == nil {\n\t\tserver.WriteError(w, server.ErrorSessionUnknown, \"\")\n\t\treturn\n\t}\n\tserver.WriteJson(w, res.Status)\n}\n\nfunc handleResult(w http.ResponseWriter, r *http.Request) {\n\tres := irmarequestor.GetSessionResult(chi.URLParam(r, \"token\"))\n\tif res == nil {\n\t\tserver.WriteError(w, server.ErrorSessionUnknown, \"\")\n\t\treturn\n\t}\n\tserver.WriteJson(w, res)\n}\n\nfunc parseRequest(bts []byte) (request irma.SessionRequest, err error) {\n\trequest = &irma.DisclosureRequest{}\n\tif err = irma.UnmarshalValidate(bts, request); err == nil {\n\t\treturn request, nil\n\t}\n\trequest = &irma.SignatureRequest{}\n\tif err = irma.UnmarshalValidate(bts, request); err == nil {\n\t\treturn request, nil\n\t}\n\trequest = &irma.IssuanceRequest{}\n\tif err = irma.UnmarshalValidate(bts, request); err == nil {\n\t\treturn request, nil\n\t}\n\treturn nil, errors.New(\"Invalid session type\")\n}\nAllow irmaserver to be used as librarypackage irmaserver\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"github.com\/go-chi\/chi\"\n\t\"github.com\/go-errors\/errors\"\n\t\"github.com\/privacybydesign\/irmago\"\n\t\"github.com\/privacybydesign\/irmago\/server\"\n\t\"github.com\/privacybydesign\/irmago\/server\/irmarequestor\"\n)\n\ntype Configuration struct {\n\t*server.Configuration\n\tPort int\n}\n\nvar s *http.Server\n\n\/\/ Start the server. If successful then it will not return until Stop() is called.\nfunc Start(conf *Configuration) error {\n\thandler, err := Handler(conf.Configuration)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Start server\n\ts = &http.Server{Addr: fmt.Sprintf(\":%d\", conf.Port), Handler: handler}\n\terr = s.ListenAndServe()\n\tif err == http.ErrServerClosed {\n\t\treturn nil \/\/ Server was closed normally\n\t}\n\n\treturn err\n}\n\nfunc Stop() {\n\ts.Close()\n}\n\nfunc Handler(conf *server.Configuration) (http.Handler, error) {\n\tif err := irmarequestor.Initialize(conf); err != nil {\n\t\treturn nil, err\n\t}\n\n\trouter := chi.NewRouter()\n\n\t\/\/ Mount server for irmaclient\n\trouter.Mount(\"\/irma\/\", irmarequestor.HttpHandlerFunc(\"\/irma\/\"))\n\n\t\/\/ Server routes\n\trouter.Post(\"\/create\", handleCreate)\n\trouter.Get(\"\/status\/{token}\", handleStatus)\n\trouter.Get(\"\/result\/{token}\", handleResult)\n\n\treturn router, nil\n}\n\nfunc handleCreate(w http.ResponseWriter, r *http.Request) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tserver.WriteError(w, server.ErrorInvalidRequest, err.Error())\n\t\treturn\n\t}\n\trequest, err := parseRequest(body)\n\tif err != nil {\n\t\tserver.WriteError(w, server.ErrorInvalidRequest, err.Error())\n\t\treturn\n\t}\n\n\tqr, _, err := irmarequestor.StartSession(request, nil)\n\tif err != nil {\n\t\tserver.WriteError(w, server.ErrorInvalidRequest, err.Error())\n\t\treturn\n\t}\n\n\tserver.WriteJson(w, qr)\n}\n\nfunc handleStatus(w http.ResponseWriter, r *http.Request) {\n\tres := irmarequestor.GetSessionResult(chi.URLParam(r, \"token\"))\n\tif res == nil {\n\t\tserver.WriteError(w, server.ErrorSessionUnknown, \"\")\n\t\treturn\n\t}\n\tserver.WriteJson(w, res.Status)\n}\n\nfunc handleResult(w http.ResponseWriter, r *http.Request) {\n\tres := irmarequestor.GetSessionResult(chi.URLParam(r, \"token\"))\n\tif res == nil {\n\t\tserver.WriteError(w, server.ErrorSessionUnknown, \"\")\n\t\treturn\n\t}\n\tserver.WriteJson(w, res)\n}\n\nfunc parseRequest(bts []byte) (request irma.SessionRequest, err error) {\n\trequest = &irma.DisclosureRequest{}\n\tif err = irma.UnmarshalValidate(bts, request); err == nil {\n\t\treturn request, nil\n\t}\n\trequest = &irma.SignatureRequest{}\n\tif err = irma.UnmarshalValidate(bts, request); err == nil {\n\t\treturn request, nil\n\t}\n\trequest = &irma.IssuanceRequest{}\n\tif err = irma.UnmarshalValidate(bts, request); err == nil {\n\t\treturn request, nil\n\t}\n\treturn nil, errors.New(\"Invalid session type\")\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2012 Google, Inc. All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style license\n\/\/ that can be found in the LICENSE file in the root of the source\n\/\/ tree.\n\n\/\/ This binary provides sample code for using the gopacket TCP assembler and TCP\n\/\/ stream reader. It reads packets off the wire and reconstructs HTTP requests\n\/\/ it sees, logging them.\npackage main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/google\/gopacket\"\n\t\"github.com\/google\/gopacket\/examples\/util\"\n\t\"github.com\/google\/gopacket\/layers\"\n\t\"github.com\/google\/gopacket\/pcap\"\n\t\"github.com\/google\/gopacket\/tcpassembly\"\n\t\"github.com\/google\/gopacket\/tcpassembly\/tcpreader\"\n)\n\nvar iface = flag.String(\"i\", \"eth0\", \"Interface to get packets from\")\nvar fname = flag.String(\"r\", \"\", \"Filename to read from, overrides -i\")\nvar snaplen = flag.Int(\"s\", 1600, \"SnapLen for pcap packet capture\")\nvar filter = flag.String(\"f\", \"tcp and dst port 80\", \"BPF filter for pcap\")\nvar logAllPackets = flag.Bool(\"v\", false, \"Logs every packet in great detail\")\n\n\/\/ Build a simple HTTP request parser using tcpassembly.StreamFactory and tcpassembly.Stream interfaces\n\n\/\/ httpStreamFactory implements tcpassembly.StreamFactory\ntype httpStreamFactory struct{}\n\n\/\/ httpStream will handle the actual decoding of http requests.\ntype httpStream struct {\n\tnet, transport gopacket.Flow\n\tr tcpreader.ReaderStream\n}\n\nfunc (h *httpStreamFactory) New(net, transport gopacket.Flow) tcpassembly.Stream {\n\thstream := &httpStream{\n\t\tnet: net,\n\t\ttransport: transport,\n\t\tr: tcpreader.NewReaderStream(),\n\t}\n\tgo hstream.run() \/\/ Important... we must guarantee that data from the reader stream is read.\n\n\t\/\/ ReaderStream implements tcpassembly.Stream, so we can return a pointer to it.\n\treturn &hstream.r\n}\n\nfunc (h *httpStream) run() {\n\tbuf := bufio.NewReader(&h.r)\n\tfor {\n\t\tresp, err := http.ReadResponse(buf, nil)\n\t\tif err == io.EOF || err == io.ErrUnexpectedEOF {\n\t\t\t\/\/ We must read until we see an EOF... very important!\n\t\t\treturn\n\t\t} else if err != nil {\n\t\t\t\/\/log.Println(\"ERROR IN RESPONSE:\", h.net, \":\", err)\n\t\t} else {\n\n\t\t\tcontentType := resp.Header[\"Content-Type\"]\n\t\t\t\/\/contentEnc := resp.Header[\"Content-Encoding\"]\n\n\t\t\t\/\/log.Println(\"ENCODING:\", resp.TransferEncoding, \":\", contentEnc, \":\", resp.Uncompressed)\n\n\t\t\tif len(contentType) != 0 {\n\n\t\t\t\treader := resp.Body\n\t\t\t\t\/*\n\t\t\t\t\tif len(contentEnc) != 0 {\n\t\t\t\t\t\tif contentEnc[0] == \"gzip\" {\n\t\t\t\t\t\t\tr, qerr := gzip.NewReader(resp.Body)\n\t\t\t\t\t\t\tif qerr != nil {\n\t\t\t\t\t\t\t\tlog.Println(\"ERROR GZIP:\", qerr)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treader = r\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t*\/\n\n\t\t\t\tswitch contentType[0] {\n\t\t\t\t\/\/ TODO: ASCII, ANSI (Windows-1252)\n\t\t\t\tcase \"text\/html\", \"text\/html; charset=utf-8\", \"text\/html; charset=UTF-8\":\n\t\t\t\t\t\/\/ Default charset for HTML5\n\t\t\t\t\t\/\/fmt.Println(\"FOUND ONE:\", contentType)\n\n\t\t\t\t\tlog.Print(\"MATCHED:\", contentType[0])\n\n\t\t\t\t\tb, err := ioutil.ReadAll(reader)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Println(b)\n\t\t\t\t\t\/*\n\t\t\t\t\t\tbody, perr := html.Parse(resp.Body)\n\t\t\t\t\t\tif perr != nil {\n\t\t\t\t\t\t\tlog.Println(\"PARSE ERROR:\", perr)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tdoc := goquery.NewDocumentFromNode(body)\n\t\t\t\t\t\t\tfmt.Println(\"DOC:\", doc.Find(\"h1\").Text())\n\t\t\t\t\t\t}\n\t\t\t\t\t*\/\n\t\t\t\tcase \"text\/html; charset=iso-8859-1\", \"text\/html; charset=ISO-8859-1\":\n\t\t\t\t\t\/\/ Default charset before HTML5\n\t\t\t\t\t\/\/ TODO: Do something with it, e.g. convert with iconv.\n\t\t\t\tdefault:\n\t\t\t\t\t\/\/log.Println(\"UNUSED TYPE:\", contentType)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor {\n\t\t\t\tbytes, err := tcpreader.DiscardBytesToFirstError(resp.Body)\n\n\t\t\t\tif err == io.EOF || err == io.ErrUnexpectedEOF {\n\t\t\t\t\tbreak\n\n\t\t\t\t} else if err != nil && bytes == 0 {\n\t\t\t\t\tlog.Println(\"ERROR BUT ZERO:\", h.net, \":\", err, \":\", bytes)\n\t\t\t\t\tbreak\n\n\t\t\t\t} else if err != nil {\n\t\t\t\t\tlog.Println(\"ERROR NOT ZERO:\", h.net, \":\", err, \":\", bytes)\n\t\t\t\t}\n\t\t\t}\n\t\t\tresp.Body.Close()\n\t\t\t\/\/log.Println(h.net, \":\", contentType, resp.Status)\n\t\t}\n\t}\n}\n\nfunc main() {\n\tdefer util.Run()()\n\tvar handle *pcap.Handle\n\tvar err error\n\n\t\/\/ Set up pcap packet capture\n\tif *fname != \"\" {\n\t\tlog.Printf(\"Reading from pcap dump %q\", *fname)\n\t\thandle, err = pcap.OpenOffline(*fname)\n\t} else {\n\t\tlog.Printf(\"Starting capture on interface %q\", *iface)\n\t\thandle, err = pcap.OpenLive(*iface, int32(*snaplen), true, pcap.BlockForever)\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif err := handle.SetBPFFilter(*filter); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Set up assembly\n\tstreamFactory := &httpStreamFactory{}\n\tstreamPool := tcpassembly.NewStreamPool(streamFactory)\n\tassembler := tcpassembly.NewAssembler(streamPool)\n\n\tlog.Println(\"reading in packets\")\n\t\/\/ Read in packets, pass to assembler.\n\tpacketSource := gopacket.NewPacketSource(handle, handle.LinkType())\n\tpackets := packetSource.Packets()\n\tticker := time.Tick(time.Minute)\n\tfor {\n\t\tselect {\n\t\tcase packet := <-packets:\n\t\t\t\/\/ A nil packet indicates the end of a pcap file.\n\t\t\tif packet == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif *logAllPackets {\n\t\t\t\tlog.Println(packet)\n\t\t\t}\n\t\t\tif packet.NetworkLayer() == nil || packet.TransportLayer() == nil || packet.TransportLayer().LayerType() != layers.LayerTypeTCP {\n\t\t\t\tlog.Println(\"Unusable packet\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttcp := packet.TransportLayer().(*layers.TCP)\n\t\t\tassembler.AssembleWithTimestamp(packet.NetworkLayer().NetworkFlow(), tcp, packet.Metadata().Timestamp)\n\n\t\tcase <-ticker:\n\t\t\t\/\/ Every minute, flush connections that haven't seen activity in the past 2 minutes.\n\t\t\tassembler.FlushOlderThan(time.Now().Add(time.Minute * -2))\n\t\t}\n\t}\n}\nPrune back to try and trace the end of file issues.\/\/ Copyright 2012 Google, Inc. All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style license\n\/\/ that can be found in the LICENSE file in the root of the source\n\/\/ tree.\n\n\/\/ This binary provides sample code for using the gopacket TCP assembler and TCP\n\/\/ stream reader. It reads packets off the wire and reconstructs HTTP requests\n\/\/ it sees, logging them.\npackage main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/google\/gopacket\"\n\t\"github.com\/google\/gopacket\/examples\/util\"\n\t\"github.com\/google\/gopacket\/layers\"\n\t\"github.com\/google\/gopacket\/pcap\"\n\t\"github.com\/google\/gopacket\/tcpassembly\"\n\t\"github.com\/google\/gopacket\/tcpassembly\/tcpreader\"\n)\n\nvar iface = flag.String(\"i\", \"eth0\", \"Interface to get packets from\")\nvar fname = flag.String(\"r\", \"\", \"Filename to read from, overrides -i\")\nvar snaplen = flag.Int(\"s\", 1600, \"SnapLen for pcap packet capture\")\nvar filter = flag.String(\"f\", \"tcp and dst port 80\", \"BPF filter for pcap\")\nvar logAllPackets = flag.Bool(\"v\", false, \"Logs every packet in great detail\")\n\n\/\/ Build a simple HTTP request parser using tcpassembly.StreamFactory and tcpassembly.Stream interfaces\n\n\/\/ httpStreamFactory implements tcpassembly.StreamFactory\ntype httpStreamFactory struct{}\n\n\/\/ httpStream will handle the actual decoding of http requests.\ntype httpStream struct {\n\tnet, transport gopacket.Flow\n\tr tcpreader.ReaderStream\n}\n\nfunc (h *httpStreamFactory) New(net, transport gopacket.Flow) tcpassembly.Stream {\n\thstream := &httpStream{\n\t\tnet: net,\n\t\ttransport: transport,\n\t\tr: tcpreader.NewReaderStream(),\n\t}\n\tgo hstream.run() \/\/ Important... we must guarantee that data from the reader stream is read.\n\n\t\/\/ ReaderStream implements tcpassembly.Stream, so we can return a pointer to it.\n\treturn &hstream.r\n}\n\nfunc (h *httpStream) run() {\n\tbuf := bufio.NewReader(&h.r)\n\tfor {\n\t\tresp, err := http.ReadResponse(buf, nil)\n\t\tif err == io.EOF {\n\t\t\t\/\/ We must read until we see an EOF... very important!\n\t\t\treturn\n\t\t} else if err == io.ErrUnexpectedEOF {\n\t\t\t\/\/ TODO: need to establish if we get these in the header.\n\t\t\t\/\/log.Println(\"UEOF IN RESP:\", h.net, \":\",, err)\n\t\t\treturn\n\t\t} else if err != nil {\n\t\t\tlog.Println(\"ERROR IN RESP:\", h.net, \":\", err)\n\t\t\t\/\/ TODO: What else?\n\t\t} else {\n\n\t\t\tcontentType := resp.Header[\"Content-Type\"]\n\t\t\t\/\/contentEnc := resp.Header[\"Content-Encoding\"]\n\n\t\t\t\/\/log.Println(\"ENCODING:\", resp.TransferEncoding, \":\", contentEnc, \":\", resp.Uncompressed)\n\n\t\t\tif len(contentType) != 0 {\n\n\t\t\t\treader := resp.Body\n\t\t\t\t\/*\n\t\t\t\t\tif len(contentEnc) != 0 {\n\t\t\t\t\t\tif contentEnc[0] == \"gzip\" {\n\t\t\t\t\t\t\tr, qerr := gzip.NewReader(resp.Body)\n\t\t\t\t\t\t\tif qerr != nil {\n\t\t\t\t\t\t\t\tlog.Println(\"ERROR GZIP:\", qerr)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treader = r\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t*\/\n\n\t\t\t\tswitch contentType[0] {\n\t\t\t\t\/\/ TODO: ASCII, ANSI (Windows-1252)\n\t\t\t\tcase \"text\/html\", \"text\/html; charset=utf-8\", \"text\/html; charset=UTF-8\":\n\t\t\t\t\t\/\/ Default charset for HTML5\n\t\t\t\t\tlog.Print(\"MATCHED:\", contentType[0])\n\n\t\t\t\t\tb, err := ioutil.ReadAll(reader)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Println(string(b))\n\t\t\t\t\t\/*\n\t\t\t\t\t\tbody, perr := html.Parse(resp.Body)\n\t\t\t\t\t\tif perr != nil {\n\t\t\t\t\t\t\tlog.Println(\"PARSE ERROR:\", perr)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tdoc := goquery.NewDocumentFromNode(body)\n\t\t\t\t\t\t\tfmt.Println(\"DOC:\", doc.Find(\"h1\").Text())\n\t\t\t\t\t\t}\n\t\t\t\t\t*\/\n\t\t\t\tcase \"text\/html; charset=iso-8859-1\", \"text\/html; charset=ISO-8859-1\":\n\t\t\t\t\t\/\/ Default charset for HTML 2 to 4\n\t\t\t\t\t\/\/ TODO: Do something with it, e.g. convert with iconv.\n\t\t\t\t\tlog.Print(\"MATCHED:\", contentType[0])\n\t\t\t\t\tfallthrough\n\t\t\t\tdefault:\n\t\t\t\t\t\/\/log.Println(\"UNUSED TYPE:\", contentTyp\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor {\n\t\t\t\tbytes, err := tcpreader.DiscardBytesToFirstError(resp.Body)\n\n\t\t\t\tif err == io.EOF || err == io.ErrUnexpectedEOF {\n\t\t\t\t\tbreak\n\n\t\t\t\t} else if err != nil && bytes == 0 {\n\t\t\t\t\tlog.Println(\"ERROR BUT ZERO:\", h.net, \":\", err, \":\", bytes)\n\t\t\t\t\tbreak\n\n\t\t\t\t} else if err != nil {\n\t\t\t\t\tlog.Println(\"ERROR NOT ZERO:\", h.net, \":\", err, \":\", bytes)\n\t\t\t\t}\n\t\t\t}\n\t\t\tresp.Body.Close()\n\t\t\t\/\/log.Println(h.net, \":\", contentType, resp.Status)\n\t\t}\n\t}\n}\n\nfunc main() {\n\tdefer util.Run()()\n\tvar handle *pcap.Handle\n\tvar err error\n\n\t\/\/ Set up pcap packet capture\n\tif *fname != \"\" {\n\t\tlog.Printf(\"Reading from pcap dump %q\", *fname)\n\t\thandle, err = pcap.OpenOffline(*fname)\n\t} else {\n\t\tlog.Printf(\"Starting capture on interface %q\", *iface)\n\t\t\/\/ TODO: Not sure about BlockForever.\n\t\thandle, err = pcap.OpenLive(*iface, int32(*snaplen), true, pcap.BlockForever)\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif err := handle.SetBPFFilter(*filter); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Set up assembly\n\tstreamFactory := &httpStreamFactory{}\n\tstreamPool := tcpassembly.NewStreamPool(streamFactory)\n\tassembler := tcpassembly.NewAssembler(streamPool)\n\n\tlog.Println(\"reading in packets\")\n\t\/\/ Read in packets, pass to assembler.\n\tpacketSource := gopacket.NewPacketSource(handle, handle.LinkType())\n\tpackets := packetSource.Packets()\n\tticker := time.Tick(time.Second * 10)\n\tfor {\n\t\tselect {\n\t\tcase packet := <-packets:\n\t\t\t\/\/ A nil packet indicates the end of a pcap file.\n\t\t\tif packet == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif *logAllPackets {\n\t\t\t\tlog.Println(packet)\n\t\t\t}\n\t\t\tif packet.NetworkLayer() == nil || packet.TransportLayer() == nil || packet.TransportLayer().LayerType() != layers.LayerTypeTCP {\n\t\t\t\tlog.Println(\"Unusable packet\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttcp := packet.TransportLayer().(*layers.TCP)\n\t\t\tassembler.AssembleWithTimestamp(packet.NetworkLayer().NetworkFlow(), tcp, packet.Metadata().Timestamp)\n\n\t\tcase <-ticker:\n\t\t\t\/\/ Was: Every minute, flush connections that haven't seen activity in the past 2 minutes.\n\t\t\tassembler.FlushOlderThan(time.Now().Add(time.Second * -20))\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package routing\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/victorspringer\/trapAdvisor\/authenticating\"\n\t\"github.com\/victorspringer\/trapAdvisor\/handling\"\n\t\"github.com\/victorspringer\/trapAdvisor\/persistence\"\n)\n\ntype route struct {\n\tMethod string\n\tPattern string\n\tName string\n\tHandlerFunc http.HandlerFunc\n}\n\n\/\/ Router initializer.\nfunc Router() *mux.Router {\n\trouter := mux.NewRouter().StrictSlash(true)\n\n\taSvc := authenticating.NewService()\n\n\thSvc := handling.NewService(\n\t\tpersistence.NewTravellerRepository(),\n\t\tpersistence.NewFriendshipRepository(),\n\t\tpersistence.NewTripRepository(),\n\t\tpersistence.NewTouristAttractionRepository(),\n\t)\n\n\troutes := []route{\n\t\troute{\"GET\", \"\/health\", \"Health\", hSvc.Health},\n\t\troute{\"GET\", \"\/login\", \"Login\", aSvc.HandleFacebookLogin},\n\t\troute{\"GET\", \"\/auth_callback\", \"AuthCallback\", aSvc.HandleFacebookCallback},\n\t\troute{\"GET\", \"\/logout\", \"Logout\", aSvc.HandleFacebookLogout},\n\n\t\troute{\"POST\", \"\/v1\/trip\/store\", \"StoreTrip\", aSvc.AuthMiddleware(hSvc.StoreTrip)},\n\t\troute{\"POST\", \"\/v1\/ta\/store\", \"StoreTouristAttraction\", aSvc.AuthMiddleware(hSvc.StoreTouristAttraction)},\n\n\t\troute{\"GET\", \"\/v1\/traveller\/find\/{id}\", \"FindTraveller\", aSvc.AuthMiddleware(hSvc.FindTraveller)},\n\t\troute{\"GET\", \"\/v1\/friendship\/find\/traveller\/{id}\", \"FindFriendshipByTravellerID\", aSvc.AuthMiddleware(hSvc.FindFriendshipByTravellerID)},\n\t\troute{\"GET\", \"\/v1\/trip\/find\/{id}\", \"FindTrip\", aSvc.AuthMiddleware(hSvc.FindTrip)},\n\t\troute{\"GET\", \"\/v1\/trip\/find\/traveller\/{id}\", \"FindTripByTravellerID\", aSvc.AuthMiddleware(hSvc.FindTripByTravellerID)},\n\t\troute{\"GET\", \"\/v1\/ta\/find\/{id}\", \"FindTouristAttraction\", aSvc.AuthMiddleware(hSvc.FindTouristAttraction)},\n\t\troute{\"GET\", \"\/v1\/ta\/find\/trip\/{id}\", \"FindTouristAttractionByTripID\", aSvc.AuthMiddleware(hSvc.FindTouristAttractionByTripID)},\n\t\troute{\"GET\", \"\/v1\/ta\/find\/name_part\/{namePart}\", \"FindTouristAttractionByNamePart\", aSvc.AuthMiddleware(hSvc.FindTouristAttractionByNamePart)},\n\t\troute{\"GET\", \"\/v1\/ta\/most_visited\", \"FindMostVisitedTouristAttractions\", aSvc.AuthMiddleware(hSvc.FindMostVisitedTouristAttractions)},\n\t\troute{\"GET\", \"\/v1\/ta\/best_rated\", \"FindBestRatedTouristAttractions\", aSvc.AuthMiddleware(hSvc.FindBestRatedTouristAttractions)},\n\t}\n\n\tfor _, route := range routes {\n\t\tvar handler http.Handler\n\n\t\thandler = route.HandlerFunc\n\t\thandler = logger(handler, route.Name)\n\n\t\trouter.Methods(route.Method).Path(route.Pattern).Name(route.Name).Handler(handler)\n\t}\n\n\treturn router\n}\n\nfunc logger(inner http.Handler, name string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\tinner.ServeHTTP(w, r)\n\t\tlog.Printf(\"%v\\t%v\\t%v\\t%v\", r.Method, r.RequestURI, name, time.Since(start))\n\t})\n}\nallows corspackage routing\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/rs\/cors\"\n\t\"github.com\/victorspringer\/trapAdvisor\/authenticating\"\n\t\"github.com\/victorspringer\/trapAdvisor\/handling\"\n\t\"github.com\/victorspringer\/trapAdvisor\/persistence\"\n)\n\ntype route struct {\n\tMethod string\n\tPattern string\n\tName string\n\tHandlerFunc http.HandlerFunc\n}\n\n\/\/ Router initializer.\nfunc Router() *mux.Router {\n\trouter := mux.NewRouter().StrictSlash(true)\n\n\taSvc := authenticating.NewService()\n\n\thSvc := handling.NewService(\n\t\tpersistence.NewTravellerRepository(),\n\t\tpersistence.NewFriendshipRepository(),\n\t\tpersistence.NewTripRepository(),\n\t\tpersistence.NewTouristAttractionRepository(),\n\t)\n\n\troutes := []route{\n\t\troute{\"GET\", \"\/health\", \"Health\", hSvc.Health},\n\t\troute{\"GET\", \"\/login\", \"Login\", aSvc.HandleFacebookLogin},\n\t\troute{\"GET\", \"\/auth_callback\", \"AuthCallback\", aSvc.HandleFacebookCallback},\n\t\troute{\"GET\", \"\/logout\", \"Logout\", aSvc.HandleFacebookLogout},\n\n\t\troute{\"POST\", \"\/v1\/trip\/store\", \"StoreTrip\", aSvc.AuthMiddleware(hSvc.StoreTrip)},\n\t\troute{\"POST\", \"\/v1\/ta\/store\", \"StoreTouristAttraction\", aSvc.AuthMiddleware(hSvc.StoreTouristAttraction)},\n\n\t\troute{\"GET\", \"\/v1\/traveller\/find\/{id}\", \"FindTraveller\", aSvc.AuthMiddleware(hSvc.FindTraveller)},\n\t\troute{\"GET\", \"\/v1\/friendship\/find\/traveller\/{id}\", \"FindFriendshipByTravellerID\", aSvc.AuthMiddleware(hSvc.FindFriendshipByTravellerID)},\n\t\troute{\"GET\", \"\/v1\/trip\/find\/{id}\", \"FindTrip\", aSvc.AuthMiddleware(hSvc.FindTrip)},\n\t\troute{\"GET\", \"\/v1\/trip\/find\/traveller\/{id}\", \"FindTripByTravellerID\", aSvc.AuthMiddleware(hSvc.FindTripByTravellerID)},\n\t\troute{\"GET\", \"\/v1\/ta\/find\/{id}\", \"FindTouristAttraction\", aSvc.AuthMiddleware(hSvc.FindTouristAttraction)},\n\t\troute{\"GET\", \"\/v1\/ta\/find\/trip\/{id}\", \"FindTouristAttractionByTripID\", aSvc.AuthMiddleware(hSvc.FindTouristAttractionByTripID)},\n\t\troute{\"GET\", \"\/v1\/ta\/find\/name_part\/{namePart}\", \"FindTouristAttractionByNamePart\", aSvc.AuthMiddleware(hSvc.FindTouristAttractionByNamePart)},\n\t\troute{\"GET\", \"\/v1\/ta\/most_visited\", \"FindMostVisitedTouristAttractions\", aSvc.AuthMiddleware(hSvc.FindMostVisitedTouristAttractions)},\n\t\troute{\"GET\", \"\/v1\/ta\/best_rated\", \"FindBestRatedTouristAttractions\", aSvc.AuthMiddleware(hSvc.FindBestRatedTouristAttractions)},\n\t}\n\n\tfor _, route := range routes {\n\t\tvar handler http.Handler\n\n\t\thandler = route.HandlerFunc\n\t\thandler = logger(handler, route.Name)\n\t\thandler = cors.Default().Handler(handler)\n\n\t\trouter.Methods(route.Method).Path(route.Pattern).Name(route.Name).Handler(handler)\n\t}\n\n\treturn router\n}\n\nfunc logger(inner http.Handler, name string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\tinner.ServeHTTP(w, r)\n\t\tlog.Printf(\"%v\\t%v\\t%v\\t%v\", r.Method, r.RequestURI, name, time.Since(start))\n\t})\n}\n<|endoftext|>"} {"text":"package msg\n\nimport (\n\t\"fmt\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/sns\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/sqs\"\n\t\"github.com\/viant\/endly\/system\/cloud\/ec2\"\n\t\"github.com\/viant\/toolbox\"\n\t\"github.com\/viant\/toolbox\/cred\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype awsPubSub struct {\n\tsession *session.Session\n\tsqs *sqs.SQS\n\tsns *sns.SNS\n\ttimeout time.Duration\n}\n\nfunc (c *awsPubSub) sendMessage(dest *Resource, message *Message) (Result, error) {\n\tqueueURL, err := c.getQueueURL(dest.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinput := &sqs.SendMessageInput{\n\t\tDelaySeconds: aws.Int64(1),\n\t\tMessageAttributes: map[string]*sqs.MessageAttributeValue{},\n\t\tQueueUrl: &queueURL,\n\t}\n\n\tif len(message.Attributes) > 0 {\n\t\tfor k, v := range message.Attributes {\n\t\t\tinput.MessageAttributes[k] = &sqs.MessageAttributeValue{\n\t\t\t\tDataType: aws.String(\"String\"),\n\t\t\t\tStringValue: aws.String(v),\n\t\t\t}\n\t\t}\n\t}\n\tvar body = toolbox.AsString(message.Data)\n\tinput.MessageBody = aws.String(body)\n\tresult, err := c.sqs.SendMessage(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn *result.MessageId, nil\n}\n\nfunc (c *awsPubSub) publishMessage(dest *Resource, message *Message) (Result, error) {\n\ttopicARN, err := c.getTopicARN(dest.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinput := &sns.PublishInput{\n\t\tMessageAttributes: map[string]*sns.MessageAttributeValue{},\n\t\tTopicArn: aws.String(topicARN),\n\t}\n\tif len(message.Attributes) > 0 {\n\t\tfor k, v := range message.Attributes {\n\t\t\tinput.MessageAttributes[k] = &sns.MessageAttributeValue{\n\t\t\t\tDataType: aws.String(\"String\"),\n\t\t\t\tStringValue: aws.String(v),\n\t\t\t}\n\t\t}\n\t}\n\tvar body = toolbox.AsString(message.Data)\n\tinput.Message = aws.String(body)\n\tinput.Subject = aws.String(message.Subject)\n\toutput, err := c.sns.Publish(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn *output.MessageId, nil\n}\n\nfunc (c *awsPubSub) Push(dest *Resource, message *Message) (Result, error) {\n\tswitch dest.Type {\n\tcase ResourceTypeTopic:\n\t\treturn c.publishMessage(dest, message)\n\tcase ResourceTypeQueue:\n\t\treturn c.sendMessage(dest, message)\n\n\t}\n\treturn nil, fmt.Errorf(\"unsupported resource type: %v\", dest.Type)\n}\n\nfunc (c *awsPubSub) PullN(source *Resource, count int, nack bool) ([]*Message, error) {\n\tqueueURL, err := c.getQueueURL(source.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinput := &sqs.ReceiveMessageInput{\n\t\tQueueUrl: aws.String(queueURL),\n\t\tAttributeNames: aws.StringSlice([]string{\n\t\t\t\"All\",\n\t\t}),\n\t\tMaxNumberOfMessages: aws.Int64(int64(count)),\n\t\tMessageAttributeNames: aws.StringSlice([]string{\n\t\t\t\"All\",\n\t\t}),\n\t\tWaitTimeSeconds: aws.Int64(int64(c.timeout * time.Second)),\n\t}\n\t\/\/ Receive a message from the SQS queue with long polling enabled.\n\toutput, err := c.sqs.ReceiveMessage(input)\n\tvar result = make([]*Message, 0)\n\tif err != nil || len(output.Messages) == 0 {\n\t\treturn result, err\n\t}\n\tfor _, msg := range output.Messages {\n\t\tmessage := &Message{\n\t\t\tID: *msg.MessageId,\n\t\t\tAttributes: map[string]string{},\n\t\t}\n\t\tif msg.Body != nil {\n\t\t\tmessage.Data = *msg.Body\n\t\t}\n\t\tif len(msg.MessageAttributes) > 0 {\n\t\t\tfor k, v := range msg.MessageAttributes {\n\t\t\t\tval := \"\"\n\t\t\t\tif v != nil {\n\t\t\t\t\tval = *v.StringValue\n\t\t\t\t}\n\t\t\t\tmessage.Attributes[k] = val\n\t\t\t}\n\t\t}\n\n\t}\n\treturn result, nil\n}\n\nfunc (c *awsPubSub) createSubscription(topicURL, queueURL string) (*Resource, error) {\n\tinput := &sns.SubscribeInput{\n\t\tEndpoint: aws.String(queueURL),\n\t\tProtocol: aws.String(\"sqs\"),\n\t\tTopicArn: aws.String(topicURL),\n\t\tReturnSubscriptionArn: aws.Bool(true),\n\t}\n\toutput, err := c.sns.Subscribe(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Resource{URL: *output.SubscriptionArn}, nil\n}\n\nfunc (c *awsPubSub) createQueue(resource *ResourceSetup) (*Resource, error) {\n\tvar name = resource.Name\n\n\tif resource.Recreate {\n\t\tif _, err := c.getQueueURL(resource.Name); err == nil {\n\t\t\tif err = c.deleteQueue(&resource.Resource); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to delete queue: %v, %v\", name, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tinput := &sqs.CreateQueueInput{\n\t\tQueueName: aws.String(name),\n\t\tAttributes: map[string]*string{},\n\t}\n\tif resource.Config != nil && len(resource.Config.Attributes) > 0 {\n\t\tfor k, v := range resource.Config.Attributes {\n\t\t\tinput.Attributes[k] = aws.String(v)\n\t\t}\n\t}\n\tresult, err := c.sqs.CreateQueue(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resultResource = &Resource{URL: *result.QueueUrl, Name: name}\n\tif resource.Config.Topic != nil {\n\t\ttopicURL, err := c.getTopicARN(resource.Config.Topic.URL)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t_, err = c.createSubscription(topicURL, *result.QueueUrl)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn resultResource, nil\n}\n\nfunc (c *awsPubSub) getTopicARN(topicURL string) (string, error) {\n\tinput := &sns.ListTopicsInput{}\n\tfor { \/\/TODO look into better way to get topic URL\n\t\toutput, err := c.sns.ListTopics(input)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tfor _, topic := range output.Topics {\n\t\t\tparts := strings.Split(*topic.TopicArn, \":\")\n\t\t\tcandidate := parts[len(parts)-1]\n\t\t\tif candidate == topicURL {\n\t\t\t\treturn *topic.TopicArn, nil\n\t\t\t}\n\t\t}\n\t\tinput.NextToken = output.NextToken\n\t\tif output.NextToken == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"failed to lookup topic URL %v\", topicURL)\n}\n\nfunc (c *awsPubSub) getQueueURL(queueName string) (string, error) {\n\tresult, err := c.sqs.GetQueueUrl(&sqs.GetQueueUrlInput{\n\t\tQueueName: aws.String(queueName),\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to lookup queue URL %v\", queueName)\n\t}\n\treturn *result.QueueUrl, nil\n}\n\nfunc (c *awsPubSub) createTopic(resource *ResourceSetup) (*Resource, error) {\n\tvar name = resource.Name\n\n\tif resource.Recreate {\n\t\tif arn, _ := c.getTopicARN(resource.Name); arn != \"\" {\n\t\t\tif err := c.deleteTopic(&resource.Resource); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to delete topic: %v, %v\", name, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tinput := &sns.CreateTopicInput{\n\t\tName: aws.String(name),\n\t}\n\tresult, err := c.sns.CreateTopic(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resultResource = &Resource{URL: *result.TopicArn, Name: resource.Name}\n\treturn resultResource, nil\n}\n\nfunc (c *awsPubSub) Create(resource *ResourceSetup) (*Resource, error) {\n\tswitch resource.Type {\n\tcase ResourceTypeTopic:\n\t\treturn c.createTopic(resource)\n\tcase ResourceTypeQueue:\n\t\treturn c.createQueue(resource)\n\t}\n\treturn nil, fmt.Errorf(\"unsupported resource type: %v\", resource.Type)\n}\n\nfunc (c *awsPubSub) deleteQueue(resource *Resource) error {\n\tqueueURL, err := c.getQueueURL(resource.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.sqs.DeleteQueue(&sqs.DeleteQueueInput{\n\t\tQueueUrl: aws.String(queueURL),\n\t})\n\treturn nil\n}\n\nfunc (c *awsPubSub) deleteTopic(resource *Resource) error {\n\tqueueURL, err := c.getTopicARN(resource.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.sns.DeleteTopic(&sns.DeleteTopicInput{\n\t\tTopicArn: aws.String(queueURL),\n\t})\n\treturn nil\n}\n\nfunc (c *awsPubSub) Delete(resource *Resource) error {\n\tswitch resource.Type {\n\tcase ResourceTypeQueue:\n\t\treturn c.deleteQueue(resource)\n\tcase ResourceTypeTopic:\n\t\treturn c.deleteTopic(resource)\n\t}\n\treturn fmt.Errorf(\"unsupported resource type: %v\", resource.Type)\n}\n\nfunc (c *awsPubSub) Close() error {\n\treturn nil\n}\n\nfunc newAwsSqsClient(credConfig *cred.Config, timeout time.Duration) (Client, error) {\n\tconfig, err := ec2.GetAWSCredentialConfig(credConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar client = &awsPubSub{\n\t\ttimeout: timeout,\n\t}\n\tif client.session, err = session.NewSession(config); err != nil {\n\t\treturn nil, err\n\t}\n\tclient.sqs = sqs.New(client.session)\n\tclient.sns = sns.New(client.session)\n\treturn client, nil\n}\nupdated deppackage msg\n\nimport (\n\t\"fmt\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/sns\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/sqs\"\n\teaws \"github.com\/viant\/endly\/system\/cloud\/aws\"\n\t\"github.com\/viant\/toolbox\"\n\t\"github.com\/viant\/toolbox\/cred\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype awsPubSub struct {\n\tsession *session.Session\n\tsqs *sqs.SQS\n\tsns *sns.SNS\n\ttimeout time.Duration\n}\n\nfunc (c *awsPubSub) sendMessage(dest *Resource, message *Message) (Result, error) {\n\tqueueURL, err := c.getQueueURL(dest.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinput := &sqs.SendMessageInput{\n\t\tDelaySeconds: aws.Int64(1),\n\t\tMessageAttributes: map[string]*sqs.MessageAttributeValue{},\n\t\tQueueUrl: &queueURL,\n\t}\n\n\tif len(message.Attributes) > 0 {\n\t\tfor k, v := range message.Attributes {\n\t\t\tinput.MessageAttributes[k] = &sqs.MessageAttributeValue{\n\t\t\t\tDataType: aws.String(\"String\"),\n\t\t\t\tStringValue: aws.String(v),\n\t\t\t}\n\t\t}\n\t}\n\tvar body = toolbox.AsString(message.Data)\n\tinput.MessageBody = aws.String(body)\n\tresult, err := c.sqs.SendMessage(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn *result.MessageId, nil\n}\n\nfunc (c *awsPubSub) publishMessage(dest *Resource, message *Message) (Result, error) {\n\ttopicARN, err := c.getTopicARN(dest.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinput := &sns.PublishInput{\n\t\tMessageAttributes: map[string]*sns.MessageAttributeValue{},\n\t\tTopicArn: aws.String(topicARN),\n\t}\n\tif len(message.Attributes) > 0 {\n\t\tfor k, v := range message.Attributes {\n\t\t\tinput.MessageAttributes[k] = &sns.MessageAttributeValue{\n\t\t\t\tDataType: aws.String(\"String\"),\n\t\t\t\tStringValue: aws.String(v),\n\t\t\t}\n\t\t}\n\t}\n\tvar body = toolbox.AsString(message.Data)\n\tinput.Message = aws.String(body)\n\tinput.Subject = aws.String(message.Subject)\n\toutput, err := c.sns.Publish(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn *output.MessageId, nil\n}\n\nfunc (c *awsPubSub) Push(dest *Resource, message *Message) (Result, error) {\n\tswitch dest.Type {\n\tcase ResourceTypeTopic:\n\t\treturn c.publishMessage(dest, message)\n\tcase ResourceTypeQueue:\n\t\treturn c.sendMessage(dest, message)\n\n\t}\n\treturn nil, fmt.Errorf(\"unsupported resource type: %v\", dest.Type)\n}\n\nfunc (c *awsPubSub) PullN(source *Resource, count int, nack bool) ([]*Message, error) {\n\tqueueURL, err := c.getQueueURL(source.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinput := &sqs.ReceiveMessageInput{\n\t\tQueueUrl: aws.String(queueURL),\n\t\tAttributeNames: aws.StringSlice([]string{\n\t\t\t\"All\",\n\t\t}),\n\t\tMaxNumberOfMessages: aws.Int64(int64(count)),\n\t\tMessageAttributeNames: aws.StringSlice([]string{\n\t\t\t\"All\",\n\t\t}),\n\t\tWaitTimeSeconds: aws.Int64(int64(c.timeout * time.Second)),\n\t}\n\t\/\/ Receive a message from the SQS queue with long polling enabled.\n\toutput, err := c.sqs.ReceiveMessage(input)\n\tvar result = make([]*Message, 0)\n\tif err != nil || len(output.Messages) == 0 {\n\t\treturn result, err\n\t}\n\tfor _, msg := range output.Messages {\n\t\tmessage := &Message{\n\t\t\tID: *msg.MessageId,\n\t\t\tAttributes: map[string]string{},\n\t\t}\n\t\tif msg.Body != nil {\n\t\t\tmessage.Data = *msg.Body\n\t\t}\n\t\tif len(msg.MessageAttributes) > 0 {\n\t\t\tfor k, v := range msg.MessageAttributes {\n\t\t\t\tval := \"\"\n\t\t\t\tif v != nil {\n\t\t\t\t\tval = *v.StringValue\n\t\t\t\t}\n\t\t\t\tmessage.Attributes[k] = val\n\t\t\t}\n\t\t}\n\n\t}\n\treturn result, nil\n}\n\nfunc (c *awsPubSub) createSubscription(topicURL, queueURL string) (*Resource, error) {\n\tinput := &sns.SubscribeInput{\n\t\tEndpoint: aws.String(queueURL),\n\t\tProtocol: aws.String(\"sqs\"),\n\t\tTopicArn: aws.String(topicURL),\n\t\tReturnSubscriptionArn: aws.Bool(true),\n\t}\n\toutput, err := c.sns.Subscribe(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Resource{URL: *output.SubscriptionArn}, nil\n}\n\nfunc (c *awsPubSub) createQueue(resource *ResourceSetup) (*Resource, error) {\n\tvar name = resource.Name\n\n\tif resource.Recreate {\n\t\tif _, err := c.getQueueURL(resource.Name); err == nil {\n\t\t\tif err = c.deleteQueue(&resource.Resource); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to delete queue: %v, %v\", name, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tinput := &sqs.CreateQueueInput{\n\t\tQueueName: aws.String(name),\n\t\tAttributes: map[string]*string{},\n\t}\n\tif resource.Config != nil && len(resource.Config.Attributes) > 0 {\n\t\tfor k, v := range resource.Config.Attributes {\n\t\t\tinput.Attributes[k] = aws.String(v)\n\t\t}\n\t}\n\tresult, err := c.sqs.CreateQueue(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resultResource = &Resource{URL: *result.QueueUrl, Name: name}\n\tif resource.Config.Topic != nil {\n\t\ttopicURL, err := c.getTopicARN(resource.Config.Topic.URL)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t_, err = c.createSubscription(topicURL, *result.QueueUrl)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn resultResource, nil\n}\n\nfunc (c *awsPubSub) getTopicARN(topicURL string) (string, error) {\n\tinput := &sns.ListTopicsInput{}\n\tfor { \/\/TODO look into better way to get topic URL\n\t\toutput, err := c.sns.ListTopics(input)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tfor _, topic := range output.Topics {\n\t\t\tparts := strings.Split(*topic.TopicArn, \":\")\n\t\t\tcandidate := parts[len(parts)-1]\n\t\t\tif candidate == topicURL {\n\t\t\t\treturn *topic.TopicArn, nil\n\t\t\t}\n\t\t}\n\t\tinput.NextToken = output.NextToken\n\t\tif output.NextToken == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"failed to lookup topic URL %v\", topicURL)\n}\n\nfunc (c *awsPubSub) getQueueURL(queueName string) (string, error) {\n\tresult, err := c.sqs.GetQueueUrl(&sqs.GetQueueUrlInput{\n\t\tQueueName: aws.String(queueName),\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to lookup queue URL %v\", queueName)\n\t}\n\treturn *result.QueueUrl, nil\n}\n\nfunc (c *awsPubSub) createTopic(resource *ResourceSetup) (*Resource, error) {\n\tvar name = resource.Name\n\n\tif resource.Recreate {\n\t\tif arn, _ := c.getTopicARN(resource.Name); arn != \"\" {\n\t\t\tif err := c.deleteTopic(&resource.Resource); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to delete topic: %v, %v\", name, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tinput := &sns.CreateTopicInput{\n\t\tName: aws.String(name),\n\t}\n\tresult, err := c.sns.CreateTopic(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resultResource = &Resource{URL: *result.TopicArn, Name: resource.Name}\n\treturn resultResource, nil\n}\n\nfunc (c *awsPubSub) Create(resource *ResourceSetup) (*Resource, error) {\n\tswitch resource.Type {\n\tcase ResourceTypeTopic:\n\t\treturn c.createTopic(resource)\n\tcase ResourceTypeQueue:\n\t\treturn c.createQueue(resource)\n\t}\n\treturn nil, fmt.Errorf(\"unsupported resource type: %v\", resource.Type)\n}\n\nfunc (c *awsPubSub) deleteQueue(resource *Resource) error {\n\tqueueURL, err := c.getQueueURL(resource.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.sqs.DeleteQueue(&sqs.DeleteQueueInput{\n\t\tQueueUrl: aws.String(queueURL),\n\t})\n\treturn nil\n}\n\nfunc (c *awsPubSub) deleteTopic(resource *Resource) error {\n\tqueueURL, err := c.getTopicARN(resource.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.sns.DeleteTopic(&sns.DeleteTopicInput{\n\t\tTopicArn: aws.String(queueURL),\n\t})\n\treturn nil\n}\n\nfunc (c *awsPubSub) Delete(resource *Resource) error {\n\tswitch resource.Type {\n\tcase ResourceTypeQueue:\n\t\treturn c.deleteQueue(resource)\n\tcase ResourceTypeTopic:\n\t\treturn c.deleteTopic(resource)\n\t}\n\treturn fmt.Errorf(\"unsupported resource type: %v\", resource.Type)\n}\n\nfunc (c *awsPubSub) Close() error {\n\treturn nil\n}\n\nfunc newAwsSqsClient(credConfig *cred.Config, timeout time.Duration) (Client, error) {\n\tconfig, err := eaws.GetAWSCredentialConfig(credConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar client = &awsPubSub{\n\t\ttimeout: timeout,\n\t}\n\tif client.session, err = session.NewSession(config); err != nil {\n\t\treturn nil, err\n\t}\n\tclient.sqs = sqs.New(client.session)\n\tclient.sns = sns.New(client.session)\n\treturn client, nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage integration_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/internal\/canned\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestMountHelper(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype MountHelperTest struct {\n\t\/\/ Path to the mount(8) helper binary.\n\thelperPath string\n\n\t\/\/ A temporary directory into which a file system may be mounted. Removed in\n\t\/\/ TearDown.\n\tdir string\n}\n\nvar _ SetUpInterface = &MountHelperTest{}\nvar _ TearDownInterface = &MountHelperTest{}\n\nfunc init() { RegisterTestSuite(&MountHelperTest{}) }\n\nfunc (t *MountHelperTest) SetUp(_ *TestInfo) {\n\tvar err error\n\n\t\/\/ Set up the appropriate helper path.\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\tt.helperPath = path.Join(gBuildDir, \"sbin\/mount_gcsfuse\")\n\n\tcase \"linux\":\n\t\tt.helperPath = path.Join(gBuildDir, \"sbin\/mount.gcsfuse\")\n\n\tdefault:\n\t\tAddFailure(\"Don't know how to deal with OS: %q\", runtime.GOOS)\n\t\tAbortTest()\n\t}\n\n\t\/\/ Set up the temporary directory.\n\tt.dir, err = ioutil.TempDir(\"\", \"mount_helper_test\")\n\tAssertEq(nil, err)\n}\n\nfunc (t *MountHelperTest) TearDown() {\n\terr := os.Remove(t.dir)\n\tAssertEq(nil, err)\n}\n\nfunc (t *MountHelperTest) mountHelperCommand(args []string) (cmd *exec.Cmd) {\n\tcmd = exec.Command(t.helperPath)\n\tcmd.Args = append(cmd.Args, args...)\n\tcmd.Env = []string{\n\t\tfmt.Sprintf(\"PATH=%s\", path.Join(gBuildDir, \"bin\")),\n\t}\n\n\treturn\n}\n\nfunc (t *MountHelperTest) mount(args []string) (err error) {\n\tcmd := t.mountHelperCommand(args)\n\n\toutput, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"CombinedOutput: %v\\nOutput:\\n%s\", err, output)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *MountHelperTest) BadUsage() {\n\ttestCases := []struct {\n\t\targs []string\n\t\texpectedOutput string\n\t}{\n\t\t\/\/ Too few args\n\t\t0: {\n\t\t\t[]string{canned.FakeBucketName},\n\t\t\t\"two positional arguments\",\n\t\t},\n\n\t\t\/\/ Too many args\n\t\t1: {\n\t\t\t[]string{canned.FakeBucketName, \"a\", \"b\"},\n\t\t\t\"Unexpected arg 3\",\n\t\t},\n\n\t\t\/\/ Trailing -o\n\t\t2: {\n\t\t\t[]string{canned.FakeBucketName, \"a\", \"-o\"},\n\t\t\t\"Unexpected -o\",\n\t\t},\n\t}\n\n\t\/\/ Run each test case.\n\tfor i, tc := range testCases {\n\t\tcmd := t.mountHelperCommand(tc.args)\n\n\t\toutput, err := cmd.CombinedOutput()\n\t\tExpectThat(err, Error(HasSubstr(\"exit status\")), \"case %d\", i)\n\t\tExpectThat(string(output), MatchesRegexp(tc.expectedOutput), \"case %d\", i)\n\t}\n}\n\nfunc (t *MountHelperTest) SuccessfulMount() {\n\tvar err error\n\tvar fi os.FileInfo\n\n\t\/\/ Mount.\n\targs := []string{canned.FakeBucketName, t.dir}\n\n\terr = t.mount(args)\n\tAssertEq(nil, err)\n\tdefer unmount(t.dir)\n\n\t\/\/ Check that the file system is available.\n\tfi, err = os.Lstat(path.Join(t.dir, canned.TopLevelFile))\n\tAssertEq(nil, err)\n\tExpectEq(os.FileMode(0644), fi.Mode())\n\tExpectEq(len(canned.TopLevelFile_Contents), fi.Size())\n}\n\nfunc (t *MountHelperTest) RelativeMountPoint() {\n\tvar err error\n\tvar fi os.FileInfo\n\n\t\/\/ Mount with a relative mount point.\n\tcmd := t.mountHelperCommand([]string{\n\t\tcanned.FakeBucketName,\n\t\tpath.Base(t.dir),\n\t})\n\n\tcmd.Dir = path.Dir(t.dir)\n\n\toutput, err := cmd.CombinedOutput()\n\tAssertEq(nil, err, \"output:\\n%s\", output)\n\n\tdefer unmount(t.dir)\n\n\t\/\/ The file system should be available.\n\tfi, err = os.Lstat(path.Join(t.dir, canned.TopLevelFile))\n\tAssertEq(nil, err)\n\tExpectEq(os.FileMode(0644), fi.Mode())\n\tExpectEq(len(canned.TopLevelFile_Contents), fi.Size())\n}\n\nfunc (t *MountHelperTest) ReadOnlyMode() {\n\tvar err error\n\n\t\/\/ Mount.\n\targs := []string{\"-o\", \"ro\", canned.FakeBucketName, t.dir}\n\n\terr = t.mount(args)\n\tAssertEq(nil, err)\n\tdefer unmount(t.dir)\n\n\t\/\/ Writing to the file system should fail.\n\terr = ioutil.WriteFile(path.Join(t.dir, \"blah\"), []byte{}, 0400)\n\tExpectThat(err, Error(HasSubstr(\"read-only\")))\n}\n\nfunc (t *MountHelperTest) ExtraneousOptions() {\n\tvar err error\n\tvar fi os.FileInfo\n\n\t\/\/ Mount with extra junk that shouldn't be passed on.\n\targs := []string{\n\t\t\"-o\", \"noauto,nouser,auto,user\",\n\t\tcanned.FakeBucketName,\n\t\tt.dir,\n\t}\n\n\terr = t.mount(args)\n\tAssertEq(nil, err)\n\tdefer unmount(t.dir)\n\n\t\/\/ Check that the file system is available.\n\tfi, err = os.Lstat(path.Join(t.dir, canned.TopLevelFile))\n\tAssertEq(nil, err)\n\tExpectEq(os.FileMode(0644), fi.Mode())\n\tExpectEq(len(canned.TopLevelFile_Contents), fi.Size())\n}\n\nfunc (t *MountHelperTest) LinuxArgumentOrder() {\n\tvar err error\n\n\t\/\/ Linux places the options at the end.\n\targs := []string{canned.FakeBucketName, t.dir, \"-o\", \"ro\"}\n\n\terr = t.mount(args)\n\tAssertEq(nil, err)\n\tdefer unmount(t.dir)\n\n\t\/\/ Writing to the file system should fail.\n\terr = ioutil.WriteFile(path.Join(t.dir, \"blah\"), []byte{}, 0400)\n\tExpectThat(err, Error(HasSubstr(\"read-only\")))\n}\n\nfunc (t *MountHelperTest) FuseSubtype() {\n\tvar err error\n\tvar fi os.FileInfo\n\n\t\/\/ This test isn't relevant except on Linux.\n\tif runtime.GOOS != \"linux\" {\n\t\treturn\n\t}\n\n\t\/\/ Mount using the tool that would be invoked by ~mount -t fuse.gcsfuse`.\n\tt.helperPath = path.Join(gBuildDir, \"sbin\/mount.fuse.gcsfuse\")\n\targs := []string{canned.FakeBucketName, t.dir}\n\n\terr = t.mount(args)\n\tAssertEq(nil, err)\n\tdefer unmount(t.dir)\n\n\t\/\/ Check that the file system is available.\n\tfi, err = os.Lstat(path.Join(t.dir, canned.TopLevelFile))\n\tAssertEq(nil, err)\n\tExpectEq(os.FileMode(0644), fi.Mode())\n\tExpectEq(len(canned.TopLevelFile_Contents), fi.Size())\n}\nAdd integration tests for #151.\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage integration_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/internal\/canned\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestMountHelper(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype MountHelperTest struct {\n\t\/\/ Path to the mount(8) helper binary.\n\thelperPath string\n\n\t\/\/ A temporary directory into which a file system may be mounted. Removed in\n\t\/\/ TearDown.\n\tdir string\n}\n\nvar _ SetUpInterface = &MountHelperTest{}\nvar _ TearDownInterface = &MountHelperTest{}\n\nfunc init() { RegisterTestSuite(&MountHelperTest{}) }\n\nfunc (t *MountHelperTest) SetUp(_ *TestInfo) {\n\tvar err error\n\n\t\/\/ Set up the appropriate helper path.\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\tt.helperPath = path.Join(gBuildDir, \"sbin\/mount_gcsfuse\")\n\n\tcase \"linux\":\n\t\tt.helperPath = path.Join(gBuildDir, \"sbin\/mount.gcsfuse\")\n\n\tdefault:\n\t\tAddFailure(\"Don't know how to deal with OS: %q\", runtime.GOOS)\n\t\tAbortTest()\n\t}\n\n\t\/\/ Set up the temporary directory.\n\tt.dir, err = ioutil.TempDir(\"\", \"mount_helper_test\")\n\tAssertEq(nil, err)\n}\n\nfunc (t *MountHelperTest) TearDown() {\n\terr := os.Remove(t.dir)\n\tAssertEq(nil, err)\n}\n\nfunc (t *MountHelperTest) mountHelperCommand(args []string) (cmd *exec.Cmd) {\n\tcmd = exec.Command(t.helperPath)\n\tcmd.Args = append(cmd.Args, args...)\n\tcmd.Env = []string{\n\t\tfmt.Sprintf(\"PATH=%s\", path.Join(gBuildDir, \"bin\")),\n\t}\n\n\treturn\n}\n\nfunc (t *MountHelperTest) mount(args []string) (err error) {\n\tcmd := t.mountHelperCommand(args)\n\n\toutput, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"CombinedOutput: %v\\nOutput:\\n%s\", err, output)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *MountHelperTest) BadUsage() {\n\ttestCases := []struct {\n\t\targs []string\n\t\texpectedOutput string\n\t}{\n\t\t\/\/ Too few args\n\t\t0: {\n\t\t\t[]string{canned.FakeBucketName},\n\t\t\t\"two positional arguments\",\n\t\t},\n\n\t\t\/\/ Too many args\n\t\t1: {\n\t\t\t[]string{canned.FakeBucketName, \"a\", \"b\"},\n\t\t\t\"Unexpected arg 3\",\n\t\t},\n\n\t\t\/\/ Trailing -o\n\t\t2: {\n\t\t\t[]string{canned.FakeBucketName, \"a\", \"-o\"},\n\t\t\t\"Unexpected -o\",\n\t\t},\n\t}\n\n\t\/\/ Run each test case.\n\tfor i, tc := range testCases {\n\t\tcmd := t.mountHelperCommand(tc.args)\n\n\t\toutput, err := cmd.CombinedOutput()\n\t\tExpectThat(err, Error(HasSubstr(\"exit status\")), \"case %d\", i)\n\t\tExpectThat(string(output), MatchesRegexp(tc.expectedOutput), \"case %d\", i)\n\t}\n}\n\nfunc (t *MountHelperTest) SuccessfulMount() {\n\tvar err error\n\tvar fi os.FileInfo\n\n\t\/\/ Mount.\n\targs := []string{canned.FakeBucketName, t.dir}\n\n\terr = t.mount(args)\n\tAssertEq(nil, err)\n\tdefer unmount(t.dir)\n\n\t\/\/ Check that the file system is available.\n\tfi, err = os.Lstat(path.Join(t.dir, canned.TopLevelFile))\n\tAssertEq(nil, err)\n\tExpectEq(os.FileMode(0644), fi.Mode())\n\tExpectEq(len(canned.TopLevelFile_Contents), fi.Size())\n}\n\nfunc (t *MountHelperTest) RelativeMountPoint() {\n\tvar err error\n\tvar fi os.FileInfo\n\n\t\/\/ Mount with a relative mount point.\n\tcmd := t.mountHelperCommand([]string{\n\t\tcanned.FakeBucketName,\n\t\tpath.Base(t.dir),\n\t})\n\n\tcmd.Dir = path.Dir(t.dir)\n\n\toutput, err := cmd.CombinedOutput()\n\tAssertEq(nil, err, \"output:\\n%s\", output)\n\n\tdefer unmount(t.dir)\n\n\t\/\/ The file system should be available.\n\tfi, err = os.Lstat(path.Join(t.dir, canned.TopLevelFile))\n\tAssertEq(nil, err)\n\tExpectEq(os.FileMode(0644), fi.Mode())\n\tExpectEq(len(canned.TopLevelFile_Contents), fi.Size())\n}\n\nfunc (t *MountHelperTest) ReadOnlyMode() {\n\tvar err error\n\n\t\/\/ Mount.\n\targs := []string{\"-o\", \"ro\", canned.FakeBucketName, t.dir}\n\n\terr = t.mount(args)\n\tAssertEq(nil, err)\n\tdefer unmount(t.dir)\n\n\t\/\/ Writing to the file system should fail.\n\terr = ioutil.WriteFile(path.Join(t.dir, \"blah\"), []byte{}, 0400)\n\tExpectThat(err, Error(HasSubstr(\"read-only\")))\n}\n\nfunc (t *MountHelperTest) ExtraneousOptions() {\n\tvar err error\n\tvar fi os.FileInfo\n\n\t\/\/ Mount with extra junk that shouldn't be passed on.\n\targs := []string{\n\t\t\"-o\", \"noauto,nouser,auto,user\",\n\t\tcanned.FakeBucketName,\n\t\tt.dir,\n\t}\n\n\terr = t.mount(args)\n\tAssertEq(nil, err)\n\tdefer unmount(t.dir)\n\n\t\/\/ Check that the file system is available.\n\tfi, err = os.Lstat(path.Join(t.dir, canned.TopLevelFile))\n\tAssertEq(nil, err)\n\tExpectEq(os.FileMode(0644), fi.Mode())\n\tExpectEq(len(canned.TopLevelFile_Contents), fi.Size())\n}\n\nfunc (t *MountHelperTest) LinuxArgumentOrder() {\n\tvar err error\n\n\t\/\/ Linux places the options at the end.\n\targs := []string{canned.FakeBucketName, t.dir, \"-o\", \"ro\"}\n\n\terr = t.mount(args)\n\tAssertEq(nil, err)\n\tdefer unmount(t.dir)\n\n\t\/\/ Writing to the file system should fail.\n\terr = ioutil.WriteFile(path.Join(t.dir, \"blah\"), []byte{}, 0400)\n\tExpectThat(err, Error(HasSubstr(\"read-only\")))\n}\n\nfunc (t *MountHelperTest) FuseSubtype() {\n\tvar err error\n\tvar fi os.FileInfo\n\n\t\/\/ This test isn't relevant except on Linux.\n\tif runtime.GOOS != \"linux\" {\n\t\treturn\n\t}\n\n\t\/\/ Mount using the tool that would be invoked by ~mount -t fuse.gcsfuse`.\n\tt.helperPath = path.Join(gBuildDir, \"sbin\/mount.fuse.gcsfuse\")\n\targs := []string{canned.FakeBucketName, t.dir}\n\n\terr = t.mount(args)\n\tAssertEq(nil, err)\n\tdefer unmount(t.dir)\n\n\t\/\/ Check that the file system is available.\n\tfi, err = os.Lstat(path.Join(t.dir, canned.TopLevelFile))\n\tAssertEq(nil, err)\n\tExpectEq(os.FileMode(0644), fi.Mode())\n\tExpectEq(len(canned.TopLevelFile_Contents), fi.Size())\n}\n\nfunc (t *MountHelperTest) ModeOptions() {\n\tvar err error\n\tvar fi os.FileInfo\n\n\t\/\/ Mount.\n\targs := []string{\n\t\t\"-o\", \"dir_mode=754\",\n\t\t\"-o\", \"file_mode=612\",\n\t\tcanned.FakeBucketName, t.dir,\n\t}\n\n\terr = t.mount(args)\n\tAssertEq(nil, err)\n\tdefer unmount(t.dir)\n\n\t\/\/ Stat the directory.\n\tfi, err = os.Lstat(path.Join(t.dir, canned.TopLevelDir))\n\tAssertEq(nil, err)\n\tExpectEq(os.FileMode(0754)|os.ModeDir, fi.Mode())\n\n\t\/\/ Stat the file.\n\tfi, err = os.Lstat(path.Join(t.dir, canned.TopLevelFile))\n\tAssertEq(nil, err)\n\tExpectEq(os.FileMode(612)|os.ModeDir, fi.Mode())\n}\n\nfunc (t *MountHelperTest) ImplicitDirs() {\n\tvar err error\n\n\t\/\/ Mount.\n\targs := []string{\"-o\", \"implicit_dirs\", canned.FakeBucketName, t.dir}\n\n\terr = t.mount(args)\n\tAssertEq(nil, err)\n\tdefer unmount(t.dir)\n\n\t\/\/ The implicit directory should be visible.\n\tfi, err := os.Lstat(path.Join(t.dir, canned.ImplicitDirFile))\n\tAssertEq(nil, err)\n\tExpectTrue(fi.IsDir())\n}\n<|endoftext|>"} {"text":"\/\/ Package mock contains mock implementations of different task interfaces.\npackage mock\n\nimport (\n\t\"context\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/influxdata\/platform\"\n\t\"github.com\/influxdata\/platform\/task\/backend\"\n\tscheduler \"github.com\/influxdata\/platform\/task\/backend\"\n\t\"go.uber.org\/zap\"\n)\n\n\/\/ Scheduler is a mock implementation of a task scheduler.\ntype Scheduler struct {\n\tsync.Mutex\n\n\tlastTick int64\n\n\tclaims map[string]*Task\n\tmeta map[string]backend.StoreTaskMeta\n\n\tcreateChan chan *Task\n\treleaseChan chan *Task\n\n\tclaimError error\n\treleaseError error\n}\n\n\/\/ Task is a mock implementation of a task.\ntype Task struct {\n\tScript string\n\tStartExecution int64\n\tConcurrencyLimit uint8\n}\n\nfunc NewScheduler() *Scheduler {\n\treturn &Scheduler{\n\t\tclaims: map[string]*Task{},\n\t\tmeta: map[string]backend.StoreTaskMeta{},\n\t}\n}\n\nfunc (s *Scheduler) Tick(now int64) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\ts.lastTick = now\n}\n\nfunc (s *Scheduler) WithLogger(l *zap.Logger) {}\n\nfunc (s *Scheduler) Start(context.Context) {}\n\nfunc (s *Scheduler) Stop() {}\n\nfunc (s *Scheduler) ClaimTask(task *backend.StoreTask, meta *backend.StoreTaskMeta) error {\n\tif s.claimError != nil {\n\t\treturn s.claimError\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\t_, ok := s.claims[task.ID.String()]\n\tif ok {\n\t\treturn errors.New(\"task already in list\")\n\t}\n\ts.meta[task.ID.String()] = *meta\n\n\tt := &Task{Script: task.Script, StartExecution: meta.LatestCompleted, ConcurrencyLimit: uint8(meta.MaxConcurrency)}\n\n\ts.claims[task.ID.String()] = t\n\n\tif s.createChan != nil {\n\t\ts.createChan <- t\n\t}\n\n\treturn nil\n}\n\nfunc (s *Scheduler) ReleaseTask(taskID platform.ID) error {\n\tif s.releaseError != nil {\n\t\treturn s.releaseError\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tt, ok := s.claims[taskID.String()]\n\tif !ok {\n\t\treturn errors.New(\"task not in list\")\n\t}\n\tif s.releaseChan != nil {\n\t\ts.releaseChan <- t\n\t}\n\n\tdelete(s.claims, taskID.String())\n\tdelete(s.meta, taskID.String())\n\n\treturn nil\n}\n\nfunc (s *Scheduler) TaskFor(id platform.ID) *Task {\n\treturn s.claims[id.String()]\n}\n\nfunc (s *Scheduler) TaskCreateChan() <-chan *Task {\n\ts.createChan = make(chan *Task, 10)\n\treturn s.createChan\n}\nfunc (s *Scheduler) TaskReleaseChan() <-chan *Task {\n\ts.releaseChan = make(chan *Task, 10)\n\treturn s.releaseChan\n}\n\n\/\/ ClaimError sets an error to be returned by s.ClaimTask, if err is not nil.\nfunc (s *Scheduler) ClaimError(err error) {\n\ts.claimError = err\n}\n\n\/\/ ReleaseError sets an error to be returned by s.ReleaseTask, if err is not nil.\nfunc (s *Scheduler) ReleaseError(err error) {\n\ts.releaseError = err\n}\n\n\/\/ DesiredState is a mock implementation of DesiredState (used by NewScheduler).\ntype DesiredState struct {\n\tmu sync.Mutex\n\t\/\/ Map of stringified task ID to last ID used for run.\n\trunIDs map[string]uint64\n\n\t\/\/ Map of stringified, concatenated task and platform ID, to runs that have been created.\n\tcreated map[string]backend.QueuedRun\n\n\t\/\/ Map of stringified task ID to task meta.\n\tmeta map[string]backend.StoreTaskMeta\n}\n\nvar _ backend.DesiredState = (*DesiredState)(nil)\n\nfunc NewDesiredState() *DesiredState {\n\treturn &DesiredState{\n\t\trunIDs: make(map[string]uint64),\n\t\tcreated: make(map[string]backend.QueuedRun),\n\t\tmeta: make(map[string]backend.StoreTaskMeta),\n\t}\n}\n\n\/\/ SetTaskMeta sets the task meta for the given task ID.\n\/\/ SetTaskMeta must be called before CreateNextRun, for a given task ID.\nfunc (d *DesiredState) SetTaskMeta(taskID platform.ID, meta backend.StoreTaskMeta) {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\n\td.meta[taskID.String()] = meta\n}\n\n\/\/ CreateNextRun creates the next run for the given task.\n\/\/ Refer to the documentation for SetTaskPeriod to understand how the times are determined.\nfunc (d *DesiredState) CreateNextRun(_ context.Context, taskID platform.ID, now int64) (backend.RunCreation, error) {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\n\ttid := taskID.String()\n\n\tmeta, ok := d.meta[tid]\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"meta not set for task with ID %s\", tid))\n\t}\n\n\tmakeID := func() (platform.ID, error) {\n\t\td.runIDs[tid]++\n\t\trunID := make([]byte, 4)\n\t\tbinary.BigEndian.PutUint32(runID, d.runIDs[tid])\n\t\treturn platform.ID(runID), nil\n\t}\n\n\trc, err := meta.CreateNextRun(now, makeID)\n\tif err != nil {\n\t\treturn backend.RunCreation{}, err\n\t}\n\td.meta[tid] = meta\n\trc.Created.TaskID = append([]byte(nil), taskID...)\n\td.created[tid+rc.Created.RunID.String()] = rc.Created\n\treturn rc, nil\n}\n\nfunc (d *DesiredState) FinishRun(_ context.Context, taskID, runID platform.ID) error {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\n\ttid := taskID.String()\n\trid := runID.String()\n\tm := d.meta[tid]\n\tif !m.FinishRun(runID) {\n\t\tvar knownIDs []string\n\t\tfor _, r := range m.CurrentlyRunning {\n\t\t\tknownIDs = append(knownIDs, platform.ID(r.RunID).String())\n\t\t}\n\t\treturn fmt.Errorf(\"unknown run ID %s; known run IDs: %s\", rid, strings.Join(knownIDs, \", \"))\n\t}\n\td.meta[tid] = m\n\tdelete(d.created, tid+rid)\n\treturn nil\n}\n\nfunc (d *DesiredState) CreatedFor(taskID platform.ID) []backend.QueuedRun {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\n\tvar qrs []backend.QueuedRun\n\tfor _, qr := range d.created {\n\t\tif qr.TaskID == taskID {\n\t\t\tqrs = append(qrs, qr)\n\t\t}\n\t}\n\n\treturn qrs\n}\n\n\/\/ PollForNumberCreated blocks for a small amount of time waiting for exactly the given count of created runs for the given task ID.\n\/\/ If the expected number isn't found in time, it returns an error.\n\/\/\n\/\/ Because the scheduler and executor do a lot of state changes asynchronously, this is useful in test.\nfunc (d *DesiredState) PollForNumberCreated(taskID platform.ID, count int) ([]scheduler.QueuedRun, error) {\n\tconst numAttempts = 50\n\tactualCount := 0\n\tvar created []scheduler.QueuedRun\n\tfor i := 0; i < numAttempts; i++ {\n\t\ttime.Sleep(2 * time.Millisecond) \/\/ we sleep even on first so it becomes more likely that we catch when too many are produced.\n\t\tcreated = d.CreatedFor(taskID)\n\t\tactualCount = len(created)\n\t\tif actualCount == count {\n\t\t\treturn created, nil\n\t\t}\n\t}\n\treturn created, fmt.Errorf(\"did not see count of %d created task(s) for ID %s in time, instead saw %d\", count, taskID.String(), actualCount) \/\/ we return created anyways, to make it easier to debug\n}\n\ntype Executor struct {\n\tmu sync.Mutex\n\n\t\/\/ Map of stringified, concatenated task and run ID, to runs that have begun execution but have not finished.\n\trunning map[string]*RunPromise\n\n\t\/\/ Map of stringified, concatenated task and run ID, to results of runs that have executed and completed.\n\tfinished map[string]backend.RunResult\n}\n\nvar _ backend.Executor = (*Executor)(nil)\n\nfunc NewExecutor() *Executor {\n\treturn &Executor{\n\t\trunning: make(map[string]*RunPromise),\n\t\tfinished: make(map[string]backend.RunResult),\n\t}\n}\n\nfunc (e *Executor) Execute(_ context.Context, run backend.QueuedRun) (backend.RunPromise, error) {\n\trp := NewRunPromise(run)\n\n\tid := run.TaskID.String() + run.RunID.String()\n\te.mu.Lock()\n\te.running[id] = rp\n\te.mu.Unlock()\n\tgo func() {\n\t\tres, _ := rp.Wait()\n\t\te.mu.Lock()\n\t\tdelete(e.running, id)\n\t\te.finished[id] = res\n\t\te.mu.Unlock()\n\t}()\n\treturn rp, nil\n}\n\nfunc (e *Executor) WithLogger(l *zap.Logger) {}\n\n\/\/ RunningFor returns the run promises for the given task.\nfunc (e *Executor) RunningFor(taskID platform.ID) []*RunPromise {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\n\tvar rps []*RunPromise\n\tfor _, rp := range e.running {\n\t\tif rp.Run().TaskID == taskID {\n\t\t\trps = append(rps, rp)\n\t\t}\n\t}\n\n\treturn rps\n}\n\n\/\/ PollForNumberRunning blocks for a small amount of time waiting for exactly the given count of active runs for the given task ID.\n\/\/ If the expected number isn't found in time, it returns an error.\n\/\/\n\/\/ Because the scheduler and executor do a lot of state changes asynchronously, this is useful in test.\nfunc (e *Executor) PollForNumberRunning(taskID platform.ID, count int) ([]*RunPromise, error) {\n\tconst numAttempts = 20\n\tvar running []*RunPromise\n\tfor i := 0; i < numAttempts; i++ {\n\t\tif i > 0 {\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t\trunning = e.RunningFor(taskID)\n\t\tif len(running) == count {\n\t\t\treturn running, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"did not see count of %d running task(s) for ID %s in time; last count was %d\", count, taskID.String(), len(running))\n}\n\n\/\/ RunPromise is a mock RunPromise.\ntype RunPromise struct {\n\tqr backend.QueuedRun\n\n\tsetResultOnce sync.Once\n\n\tmu sync.Mutex\n\tres backend.RunResult\n\terr error\n}\n\nvar _ backend.RunPromise = (*RunPromise)(nil)\n\nfunc NewRunPromise(qr backend.QueuedRun) *RunPromise {\n\tp := &RunPromise{\n\t\tqr: qr,\n\t}\n\tp.mu.Lock() \/\/ Locked so calls to Wait will block until setResultOnce is called.\n\treturn p\n}\n\nfunc (p *RunPromise) Run() backend.QueuedRun {\n\treturn p.qr\n}\n\nfunc (p *RunPromise) Wait() (backend.RunResult, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\treturn p.res, p.err\n}\n\nfunc (p *RunPromise) Cancel() {\n\tp.Finish(nil, backend.ErrRunCanceled)\n}\n\n\/\/ Finish unblocks any call to Wait, to return r and err.\n\/\/ Only the first call to Finish has any effect.\nfunc (p *RunPromise) Finish(r backend.RunResult, err error) {\n\tp.setResultOnce.Do(func() {\n\t\tp.res, p.err = r, err\n\t\tp.mu.Unlock()\n\t})\n}\n\n\/\/ RunResult is a mock implementation of RunResult.\ntype RunResult struct {\n\terr error\n\tisRetryable bool\n}\n\nvar _ backend.RunResult = (*RunResult)(nil)\n\nfunc NewRunResult(err error, isRetryable bool) *RunResult {\n\treturn &RunResult{err: err, isRetryable: isRetryable}\n}\n\nfunc (rr *RunResult) Err() error {\n\treturn rr.err\n}\n\nfunc (rr *RunResult) IsRetryable() bool {\n\treturn rr.isRetryable\n}\nfix(task\/mock): porting to uint64 IDs\/\/ Package mock contains mock implementations of different task interfaces.\npackage mock\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/influxdata\/platform\"\n\t\"github.com\/influxdata\/platform\/task\/backend\"\n\tscheduler \"github.com\/influxdata\/platform\/task\/backend\"\n\t\"go.uber.org\/zap\"\n)\n\n\/\/ Scheduler is a mock implementation of a task scheduler.\ntype Scheduler struct {\n\tsync.Mutex\n\n\tlastTick int64\n\n\tclaims map[string]*Task\n\tmeta map[string]backend.StoreTaskMeta\n\n\tcreateChan chan *Task\n\treleaseChan chan *Task\n\n\tclaimError error\n\treleaseError error\n}\n\n\/\/ Task is a mock implementation of a task.\ntype Task struct {\n\tScript string\n\tStartExecution int64\n\tConcurrencyLimit uint8\n}\n\nfunc NewScheduler() *Scheduler {\n\treturn &Scheduler{\n\t\tclaims: map[string]*Task{},\n\t\tmeta: map[string]backend.StoreTaskMeta{},\n\t}\n}\n\nfunc (s *Scheduler) Tick(now int64) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\ts.lastTick = now\n}\n\nfunc (s *Scheduler) WithLogger(l *zap.Logger) {}\n\nfunc (s *Scheduler) Start(context.Context) {}\n\nfunc (s *Scheduler) Stop() {}\n\nfunc (s *Scheduler) ClaimTask(task *backend.StoreTask, meta *backend.StoreTaskMeta) error {\n\tif s.claimError != nil {\n\t\treturn s.claimError\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\t_, ok := s.claims[task.ID.String()]\n\tif ok {\n\t\treturn errors.New(\"task already in list\")\n\t}\n\ts.meta[task.ID.String()] = *meta\n\n\tt := &Task{Script: task.Script, StartExecution: meta.LatestCompleted, ConcurrencyLimit: uint8(meta.MaxConcurrency)}\n\n\ts.claims[task.ID.String()] = t\n\n\tif s.createChan != nil {\n\t\ts.createChan <- t\n\t}\n\n\treturn nil\n}\n\nfunc (s *Scheduler) ReleaseTask(taskID platform.ID) error {\n\tif s.releaseError != nil {\n\t\treturn s.releaseError\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tt, ok := s.claims[taskID.String()]\n\tif !ok {\n\t\treturn errors.New(\"task not in list\")\n\t}\n\tif s.releaseChan != nil {\n\t\ts.releaseChan <- t\n\t}\n\n\tdelete(s.claims, taskID.String())\n\tdelete(s.meta, taskID.String())\n\n\treturn nil\n}\n\nfunc (s *Scheduler) TaskFor(id platform.ID) *Task {\n\treturn s.claims[id.String()]\n}\n\nfunc (s *Scheduler) TaskCreateChan() <-chan *Task {\n\ts.createChan = make(chan *Task, 10)\n\treturn s.createChan\n}\nfunc (s *Scheduler) TaskReleaseChan() <-chan *Task {\n\ts.releaseChan = make(chan *Task, 10)\n\treturn s.releaseChan\n}\n\n\/\/ ClaimError sets an error to be returned by s.ClaimTask, if err is not nil.\nfunc (s *Scheduler) ClaimError(err error) {\n\ts.claimError = err\n}\n\n\/\/ ReleaseError sets an error to be returned by s.ReleaseTask, if err is not nil.\nfunc (s *Scheduler) ReleaseError(err error) {\n\ts.releaseError = err\n}\n\n\/\/ DesiredState is a mock implementation of DesiredState (used by NewScheduler).\ntype DesiredState struct {\n\tmu sync.Mutex\n\t\/\/ Map of stringified task ID to last ID used for run.\n\trunIDs map[string]uint64\n\n\t\/\/ Map of stringified, concatenated task and platform ID, to runs that have been created.\n\tcreated map[string]backend.QueuedRun\n\n\t\/\/ Map of stringified task ID to task meta.\n\tmeta map[string]backend.StoreTaskMeta\n}\n\nvar _ backend.DesiredState = (*DesiredState)(nil)\n\nfunc NewDesiredState() *DesiredState {\n\treturn &DesiredState{\n\t\trunIDs: make(map[string]uint64),\n\t\tcreated: make(map[string]backend.QueuedRun),\n\t\tmeta: make(map[string]backend.StoreTaskMeta),\n\t}\n}\n\n\/\/ SetTaskMeta sets the task meta for the given task ID.\n\/\/ SetTaskMeta must be called before CreateNextRun, for a given task ID.\nfunc (d *DesiredState) SetTaskMeta(taskID platform.ID, meta backend.StoreTaskMeta) {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\n\td.meta[taskID.String()] = meta\n}\n\n\/\/ CreateNextRun creates the next run for the given task.\n\/\/ Refer to the documentation for SetTaskPeriod to understand how the times are determined.\nfunc (d *DesiredState) CreateNextRun(_ context.Context, taskID platform.ID, now int64) (backend.RunCreation, error) {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\n\ttid := taskID.String()\n\n\tmeta, ok := d.meta[tid]\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"meta not set for task with ID %s\", tid))\n\t}\n\n\tmakeID := func() (platform.ID, error) {\n\t\td.runIDs[tid]++\n\t\trunID := platform.ID(d.runIDs[tid])\n\t\treturn runID, nil\n\t}\n\n\trc, err := meta.CreateNextRun(now, makeID)\n\tif err != nil {\n\t\treturn backend.RunCreation{}, err\n\t}\n\td.meta[tid] = meta\n\trc.Created.TaskID = taskID\n\td.created[tid+rc.Created.RunID.String()] = rc.Created\n\treturn rc, nil\n}\n\nfunc (d *DesiredState) FinishRun(_ context.Context, taskID, runID platform.ID) error {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\n\ttid := taskID.String()\n\trid := runID.String()\n\tm := d.meta[tid]\n\tif !m.FinishRun(runID) {\n\t\tvar knownIDs []string\n\t\tfor _, r := range m.CurrentlyRunning {\n\t\t\tknownIDs = append(knownIDs, platform.ID(r.RunID).String())\n\t\t}\n\t\treturn fmt.Errorf(\"unknown run ID %s; known run IDs: %s\", rid, strings.Join(knownIDs, \", \"))\n\t}\n\td.meta[tid] = m\n\tdelete(d.created, tid+rid)\n\treturn nil\n}\n\nfunc (d *DesiredState) CreatedFor(taskID platform.ID) []backend.QueuedRun {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\n\tvar qrs []backend.QueuedRun\n\tfor _, qr := range d.created {\n\t\tif qr.TaskID == taskID {\n\t\t\tqrs = append(qrs, qr)\n\t\t}\n\t}\n\n\treturn qrs\n}\n\n\/\/ PollForNumberCreated blocks for a small amount of time waiting for exactly the given count of created runs for the given task ID.\n\/\/ If the expected number isn't found in time, it returns an error.\n\/\/\n\/\/ Because the scheduler and executor do a lot of state changes asynchronously, this is useful in test.\nfunc (d *DesiredState) PollForNumberCreated(taskID platform.ID, count int) ([]scheduler.QueuedRun, error) {\n\tconst numAttempts = 50\n\tactualCount := 0\n\tvar created []scheduler.QueuedRun\n\tfor i := 0; i < numAttempts; i++ {\n\t\ttime.Sleep(2 * time.Millisecond) \/\/ we sleep even on first so it becomes more likely that we catch when too many are produced.\n\t\tcreated = d.CreatedFor(taskID)\n\t\tactualCount = len(created)\n\t\tif actualCount == count {\n\t\t\treturn created, nil\n\t\t}\n\t}\n\treturn created, fmt.Errorf(\"did not see count of %d created task(s) for ID %s in time, instead saw %d\", count, taskID.String(), actualCount) \/\/ we return created anyways, to make it easier to debug\n}\n\ntype Executor struct {\n\tmu sync.Mutex\n\n\t\/\/ Map of stringified, concatenated task and run ID, to runs that have begun execution but have not finished.\n\trunning map[string]*RunPromise\n\n\t\/\/ Map of stringified, concatenated task and run ID, to results of runs that have executed and completed.\n\tfinished map[string]backend.RunResult\n}\n\nvar _ backend.Executor = (*Executor)(nil)\n\nfunc NewExecutor() *Executor {\n\treturn &Executor{\n\t\trunning: make(map[string]*RunPromise),\n\t\tfinished: make(map[string]backend.RunResult),\n\t}\n}\n\nfunc (e *Executor) Execute(_ context.Context, run backend.QueuedRun) (backend.RunPromise, error) {\n\trp := NewRunPromise(run)\n\n\tid := run.TaskID.String() + run.RunID.String()\n\te.mu.Lock()\n\te.running[id] = rp\n\te.mu.Unlock()\n\tgo func() {\n\t\tres, _ := rp.Wait()\n\t\te.mu.Lock()\n\t\tdelete(e.running, id)\n\t\te.finished[id] = res\n\t\te.mu.Unlock()\n\t}()\n\treturn rp, nil\n}\n\nfunc (e *Executor) WithLogger(l *zap.Logger) {}\n\n\/\/ RunningFor returns the run promises for the given task.\nfunc (e *Executor) RunningFor(taskID platform.ID) []*RunPromise {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\n\tvar rps []*RunPromise\n\tfor _, rp := range e.running {\n\t\tif rp.Run().TaskID == taskID {\n\t\t\trps = append(rps, rp)\n\t\t}\n\t}\n\n\treturn rps\n}\n\n\/\/ PollForNumberRunning blocks for a small amount of time waiting for exactly the given count of active runs for the given task ID.\n\/\/ If the expected number isn't found in time, it returns an error.\n\/\/\n\/\/ Because the scheduler and executor do a lot of state changes asynchronously, this is useful in test.\nfunc (e *Executor) PollForNumberRunning(taskID platform.ID, count int) ([]*RunPromise, error) {\n\tconst numAttempts = 20\n\tvar running []*RunPromise\n\tfor i := 0; i < numAttempts; i++ {\n\t\tif i > 0 {\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t\trunning = e.RunningFor(taskID)\n\t\tif len(running) == count {\n\t\t\treturn running, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"did not see count of %d running task(s) for ID %s in time; last count was %d\", count, taskID.String(), len(running))\n}\n\n\/\/ RunPromise is a mock RunPromise.\ntype RunPromise struct {\n\tqr backend.QueuedRun\n\n\tsetResultOnce sync.Once\n\n\tmu sync.Mutex\n\tres backend.RunResult\n\terr error\n}\n\nvar _ backend.RunPromise = (*RunPromise)(nil)\n\nfunc NewRunPromise(qr backend.QueuedRun) *RunPromise {\n\tp := &RunPromise{\n\t\tqr: qr,\n\t}\n\tp.mu.Lock() \/\/ Locked so calls to Wait will block until setResultOnce is called.\n\treturn p\n}\n\nfunc (p *RunPromise) Run() backend.QueuedRun {\n\treturn p.qr\n}\n\nfunc (p *RunPromise) Wait() (backend.RunResult, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\treturn p.res, p.err\n}\n\nfunc (p *RunPromise) Cancel() {\n\tp.Finish(nil, backend.ErrRunCanceled)\n}\n\n\/\/ Finish unblocks any call to Wait, to return r and err.\n\/\/ Only the first call to Finish has any effect.\nfunc (p *RunPromise) Finish(r backend.RunResult, err error) {\n\tp.setResultOnce.Do(func() {\n\t\tp.res, p.err = r, err\n\t\tp.mu.Unlock()\n\t})\n}\n\n\/\/ RunResult is a mock implementation of RunResult.\ntype RunResult struct {\n\terr error\n\tisRetryable bool\n}\n\nvar _ backend.RunResult = (*RunResult)(nil)\n\nfunc NewRunResult(err error, isRetryable bool) *RunResult {\n\treturn &RunResult{err: err, isRetryable: isRetryable}\n}\n\nfunc (rr *RunResult) Err() error {\n\treturn rr.err\n}\n\nfunc (rr *RunResult) IsRetryable() bool {\n\treturn rr.isRetryable\n}\n<|endoftext|>"} {"text":"package comet\n\nimport (\n\t\"context\"\n\t\"math\/rand\"\n\t\"time\"\n\n\tlogic \"github.com\/Terry-Mao\/goim\/api\/logic\/grpc\"\n\t\"github.com\/Terry-Mao\/goim\/internal\/comet\/conf\"\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/zhenjl\/cityhash\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/keepalive\"\n)\n\nvar (\n\tmaxInt = 1<<31 - 1\n\t\/\/ grpc options\n\tgrpcKeepAliveTime = time.Duration(10) * time.Second\n\tgrpcKeepAliveTimeout = time.Duration(3) * time.Second\n\tgrpcBackoffMaxDelay = time.Duration(3) * time.Second\n\tgrpcMaxSendMsgSize = 1 << 24\n\tgrpcMaxCallMsgSize = 1 << 24\n)\n\nconst (\n\tclientHeartbeat = time.Second * 90\n\tminSrvHeartbeatSecond = time.Minute * 10\n\tmaxSrvHeartbeatSecond = time.Minute * 30\n\t\/\/ grpc options\n\tgrpcInitialWindowSize = 1 << 24\n\tgrpcInitialConnWindowSize = 1 << 24\n)\n\n\/\/ Server is comet server.\ntype Server struct {\n\tc *conf.Config\n\tround *Round \/\/ accept round store\n\tbuckets []*Bucket \/\/ subkey bucket\n\tbucketIdx uint32\n\n\tserverID string\n\trpcClient logic.LogicClient\n}\n\nfunc newLogicClient(c *conf.RPCClient) logic.LogicClient {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Duration(c.Dial))\n\tdefer cancel()\n\tconn, err := grpc.DialContext(ctx, \"discovery:\/\/default\/goim.logic\",\n\t\t[]grpc.DialOption{\n\t\t\tgrpc.WithInsecure(),\n\t\t\tgrpc.WithInitialWindowSize(grpcInitialWindowSize),\n\t\t\tgrpc.WithInitialConnWindowSize(grpcInitialConnWindowSize),\n\t\t\tgrpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcMaxCallMsgSize)),\n\t\t\tgrpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(grpcMaxSendMsgSize)),\n\t\t\tgrpc.WithBackoffMaxDelay(grpcBackoffMaxDelay),\n\t\t\tgrpc.WithKeepaliveParams(keepalive.ClientParameters{\n\t\t\t\tTime: grpcKeepAliveTime,\n\t\t\t\tTimeout: grpcKeepAliveTimeout,\n\t\t\t\tPermitWithoutStream: true,\n\t\t\t}),\n\t\t}...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn logic.NewLogicClient(conn)\n}\n\n\/\/ NewServer returns a new Server.\nfunc NewServer(c *conf.Config) *Server {\n\ts := &Server{\n\t\tc: c,\n\t\tround: NewRound(c),\n\t\trpcClient: newLogicClient(c.RPCClient),\n\t}\n\t\/\/ init bucket\n\ts.buckets = make([]*Bucket, c.Bucket.Size)\n\ts.bucketIdx = uint32(c.Bucket.Size)\n\tfor i := 0; i < c.Bucket.Size; i++ {\n\t\ts.buckets[i] = NewBucket(c.Bucket)\n\t}\n\ts.serverID = c.Env.Host\n\tgo s.onlineproc()\n\treturn s\n}\n\n\/\/ Buckets return all buckets.\nfunc (s *Server) Buckets() []*Bucket {\n\treturn s.buckets\n}\n\n\/\/ Bucket get the bucket by subkey.\nfunc (s *Server) Bucket(subKey string) *Bucket {\n\tidx := cityhash.CityHash32([]byte(subKey), uint32(len(subKey))) % s.bucketIdx\n\tif conf.Conf.Debug {\n\t\tlog.Infof(\"%s hit channel bucket index: %d use cityhash\", subKey, idx)\n\t}\n\treturn s.buckets[idx]\n}\n\n\/\/ RandServerHearbeat rand server heartbeat.\nfunc (s *Server) RandServerHearbeat() time.Duration {\n\treturn (minSrvHeartbeatSecond + time.Duration(rand.Intn(int(maxSrvHeartbeatSecond-minSrvHeartbeatSecond))))\n}\n\n\/\/ Close close the server.\nfunc (s *Server) Close() (err error) {\n\treturn\n}\n\nfunc (s *Server) onlineproc() {\n\tfor {\n\t\tvar (\n\t\t\tallRoomsCount map[string]int32\n\t\t\terr error\n\t\t)\n\t\troomCount := make(map[string]int32)\n\t\tfor _, bucket := range s.buckets {\n\t\t\tfor roomID, count := range bucket.RoomsCount() {\n\t\t\t\troomCount[roomID] += count\n\t\t\t}\n\t\t}\n\t\tif allRoomsCount, err = s.RenewOnline(context.Background(), s.serverID, roomCount); err != nil {\n\t\t\ttime.Sleep(time.Duration(s.c.OnlineTick))\n\t\t\tcontinue\n\t\t}\n\t\tfor _, bucket := range s.buckets {\n\t\t\tbucket.UpRoomsCount(allRoomsCount)\n\t\t}\n\t\ttime.Sleep(time.Duration(s.c.OnlineTick))\n\t}\n}\nadd grpc balancerpackage comet\n\nimport (\n\t\"context\"\n\t\"math\/rand\"\n\t\"time\"\n\n\tlogic \"github.com\/Terry-Mao\/goim\/api\/logic\/grpc\"\n\t\"github.com\/Terry-Mao\/goim\/internal\/comet\/conf\"\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/zhenjl\/cityhash\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/balancer\/roundrobin\"\n\t\"google.golang.org\/grpc\/keepalive\"\n)\n\nvar (\n\tmaxInt = 1<<31 - 1\n\t\/\/ grpc options\n\tgrpcKeepAliveTime = time.Duration(10) * time.Second\n\tgrpcKeepAliveTimeout = time.Duration(3) * time.Second\n\tgrpcBackoffMaxDelay = time.Duration(3) * time.Second\n\tgrpcMaxSendMsgSize = 1 << 24\n\tgrpcMaxCallMsgSize = 1 << 24\n)\n\nconst (\n\tclientHeartbeat = time.Second * 90\n\tminSrvHeartbeatSecond = time.Minute * 10\n\tmaxSrvHeartbeatSecond = time.Minute * 30\n\t\/\/ grpc options\n\tgrpcInitialWindowSize = 1 << 24\n\tgrpcInitialConnWindowSize = 1 << 24\n)\n\n\/\/ Server is comet server.\ntype Server struct {\n\tc *conf.Config\n\tround *Round \/\/ accept round store\n\tbuckets []*Bucket \/\/ subkey bucket\n\tbucketIdx uint32\n\n\tserverID string\n\trpcClient logic.LogicClient\n}\n\nfunc newLogicClient(c *conf.RPCClient) logic.LogicClient {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Duration(c.Dial))\n\tdefer cancel()\n\tconn, err := grpc.DialContext(ctx, \"discovery:\/\/default\/goim.logic\",\n\t\t[]grpc.DialOption{\n\t\t\tgrpc.WithInsecure(),\n\t\t\tgrpc.WithInitialWindowSize(grpcInitialWindowSize),\n\t\t\tgrpc.WithInitialConnWindowSize(grpcInitialConnWindowSize),\n\t\t\tgrpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcMaxCallMsgSize)),\n\t\t\tgrpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(grpcMaxSendMsgSize)),\n\t\t\tgrpc.WithBackoffMaxDelay(grpcBackoffMaxDelay),\n\t\t\tgrpc.WithKeepaliveParams(keepalive.ClientParameters{\n\t\t\t\tTime: grpcKeepAliveTime,\n\t\t\t\tTimeout: grpcKeepAliveTimeout,\n\t\t\t\tPermitWithoutStream: true,\n\t\t\t}),\n\t\t\tgrpc.WithBalancerName(roundrobin.Name),\n\t\t}...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn logic.NewLogicClient(conn)\n}\n\n\/\/ NewServer returns a new Server.\nfunc NewServer(c *conf.Config) *Server {\n\ts := &Server{\n\t\tc: c,\n\t\tround: NewRound(c),\n\t\trpcClient: newLogicClient(c.RPCClient),\n\t}\n\t\/\/ init bucket\n\ts.buckets = make([]*Bucket, c.Bucket.Size)\n\ts.bucketIdx = uint32(c.Bucket.Size)\n\tfor i := 0; i < c.Bucket.Size; i++ {\n\t\ts.buckets[i] = NewBucket(c.Bucket)\n\t}\n\ts.serverID = c.Env.Host\n\tgo s.onlineproc()\n\treturn s\n}\n\n\/\/ Buckets return all buckets.\nfunc (s *Server) Buckets() []*Bucket {\n\treturn s.buckets\n}\n\n\/\/ Bucket get the bucket by subkey.\nfunc (s *Server) Bucket(subKey string) *Bucket {\n\tidx := cityhash.CityHash32([]byte(subKey), uint32(len(subKey))) % s.bucketIdx\n\tif conf.Conf.Debug {\n\t\tlog.Infof(\"%s hit channel bucket index: %d use cityhash\", subKey, idx)\n\t}\n\treturn s.buckets[idx]\n}\n\n\/\/ RandServerHearbeat rand server heartbeat.\nfunc (s *Server) RandServerHearbeat() time.Duration {\n\treturn (minSrvHeartbeatSecond + time.Duration(rand.Intn(int(maxSrvHeartbeatSecond-minSrvHeartbeatSecond))))\n}\n\n\/\/ Close close the server.\nfunc (s *Server) Close() (err error) {\n\treturn\n}\n\nfunc (s *Server) onlineproc() {\n\tfor {\n\t\tvar (\n\t\t\tallRoomsCount map[string]int32\n\t\t\terr error\n\t\t)\n\t\troomCount := make(map[string]int32)\n\t\tfor _, bucket := range s.buckets {\n\t\t\tfor roomID, count := range bucket.RoomsCount() {\n\t\t\t\troomCount[roomID] += count\n\t\t\t}\n\t\t}\n\t\tif allRoomsCount, err = s.RenewOnline(context.Background(), s.serverID, roomCount); err != nil {\n\t\t\ttime.Sleep(time.Duration(s.c.OnlineTick))\n\t\t\tcontinue\n\t\t}\n\t\tfor _, bucket := range s.buckets {\n\t\t\tbucket.UpRoomsCount(allRoomsCount)\n\t\t}\n\t\ttime.Sleep(time.Duration(s.c.OnlineTick))\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright © 2017 Circonus, Inc. \n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/\n\npackage plugins\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/builtins\"\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/config\"\n\t\"github.com\/maier\/go-appstats\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ Scan the plugin directory for new\/updated plugins\nfunc (p *Plugins) Scan(b *builtins.Builtins) error {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tif p.pluginDir == \"\" {\n\t\treturn nil\n\t}\n\n\t\/\/ initialRun fires each plugin one time. Unlike 'Run' it does\n\t\/\/ not wait for plugins to finish this will provides:\n\t\/\/\n\t\/\/ 1. an initial seeding of results\n\t\/\/ 2. starts any long running plugins without blocking\n\t\/\/\n\tinitialRun := func() error {\n\t\tfor id, plug := range p.active {\n\t\t\tp.logger.Debug().\n\t\t\t\tStr(\"plugin\", id).\n\t\t\t\tMsg(\"Initializing\")\n\t\t\tgo plug.exec()\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ only applicable if dynamic reloading implemented\n\t\/\/ if err := p.Stop(); err != nil {\n\t\/\/ \treturn errors.Wrap(err, \"stopping plugin(s)\")\n\t\/\/ }\n\n\tif err := p.scanPluginDirectory(b); err != nil {\n\t\treturn errors.Wrap(err, \"plugin directory scan\")\n\t}\n\n\tif err := initialRun(); err != nil {\n\t\treturn errors.Wrap(err, \"initializing plugin(s)\")\n\t}\n\n\treturn nil\n}\n\n\/\/ scanPluginDirectory finds and loads plugins\nfunc (p *Plugins) scanPluginDirectory(b *builtins.Builtins) error {\n\tif p.pluginDir == \"\" {\n\t\treturn errors.New(\"invalid plugin directory (none)\")\n\t}\n\n\tp.logger.Info().\n\t\tStr(\"dir\", p.pluginDir).\n\t\tMsg(\"Scanning plugin directory\")\n\n\tf, err := os.Open(p.pluginDir)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"open plugin directory\")\n\t}\n\n\tdefer f.Close()\n\n\tfiles, err := f.Readdir(-1)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"reading plugin directory\")\n\t}\n\n\tttlRx, err := regexp.Compile(`_ttl(.+)$`)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"compiling ttl regex\")\n\t}\n\tttlUnitRx, err := regexp.Compile(`(ms|s|m|h)$`)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"compiling ttl unit regex\")\n\t}\n\n\tfor _, fi := range files {\n\t\tfileName := fi.Name()\n\n\t\tp.logger.Debug().\n\t\t\tStr(\"path\", filepath.Join(p.pluginDir, fileName)).\n\t\t\tMsg(\"checking plugin directory entry\")\n\n\t\tif fi.IsDir() {\n\t\t\tp.logger.Debug().\n\t\t\t\tStr(\"file\", fileName).\n\t\t\t\tMsg(\"directory, ignoring\")\n\t\t\tcontinue\n\t\t}\n\n\t\tfileBase := fileName\n\t\tfileExt := filepath.Ext(fileName)\n\n\t\tif fileExt != \"\" {\n\t\t\tfileBase = strings.Replace(fileName, fileExt, \"\", -1)\n\t\t}\n\n\t\tif fileBase == \"\" || fileExt == \"\" {\n\t\t\tp.logger.Debug().\n\t\t\t\tStr(\"file\", fileName).\n\t\t\t\tMsg(\"invalid file name format, ignoring\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif fileExt == \".conf\" || fileExt == \".json\" {\n\t\t\tp.logger.Debug().\n\t\t\t\tStr(\"file\", fileName).\n\t\t\t\tMsg(\"config file, ignoring\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, reserved := p.reservedNames[fileBase]; reserved {\n\t\t\tp.logger.Warn().\n\t\t\t\tStr(\"file\", fileName).\n\t\t\t\tMsg(\"reserved plugin name, ignoring\")\n\t\t\tcontinue\n\t\t}\n\n\t\tvar cmdName string\n\n\t\tswitch mode := fi.Mode(); {\n\t\tcase mode.IsRegular():\n\t\t\tcmdName = filepath.Join(p.pluginDir, fi.Name())\n\t\tcase mode&os.ModeSymlink != 0:\n\t\t\tresolvedSymlink, err := filepath.EvalSymlinks(filepath.Join(p.pluginDir, fi.Name()))\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Warn().\n\t\t\t\t\tErr(err).\n\t\t\t\t\tStr(\"file\", fi.Name()).\n\t\t\t\t\tMsg(\"Error resolving symlink, ignoring\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcmdName = resolvedSymlink\n\t\tdefault:\n\t\t\tp.logger.Debug().\n\t\t\t\tStr(\"file\", fileName).\n\t\t\t\tMsg(\"not a regular file or symlink, ignoring\")\n\t\t\tcontinue \/\/ just ignore it\n\t\t}\n\n\t\tif perm := fi.Mode().Perm() & 0111; perm != 73 {\n\t\t\tp.logger.Warn().\n\t\t\t\tStr(\"file\", cmdName).\n\t\t\t\tStr(\"perms\", fmt.Sprintf(\"%q\", fi.Mode().Perm())).\n\t\t\t\tMsg(\"executable bit not set, ignoring\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif b != nil && b.IsBuiltin(fileBase) {\n\t\t\tp.logger.Warn().Str(\"id\", fileBase).Msg(\"Builtin collector already enabled, skipping plugin\")\n\t\t\tcontinue\n\t\t}\n\n\t\tvar cfg map[string][]string\n\n\t\t\/\/ check for config file\n\t\tcfgFile := filepath.Join(p.pluginDir, fmt.Sprintf(\"%s.json\", fileBase))\n\t\tif data, err := ioutil.ReadFile(cfgFile); err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\tp.logger.Warn().\n\t\t\t\t\tErr(err).\n\t\t\t\t\tStr(\"config\", cfgFile).\n\t\t\t\t\tStr(\"plugin\", fileBase).Msg(\"plugin config\")\n\t\t\t}\n\t\t} else {\n\t\t\tif len(data) > 0 {\n\t\t\t\terr := json.Unmarshal(data, &cfg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.logger.Warn().\n\t\t\t\t\t\tErr(err).\n\t\t\t\t\t\tStr(\"config\", cfgFile).\n\t\t\t\t\t\tStr(\"plugin\", fileBase).\n\t\t\t\t\t\tStr(\"data\", string(data)).\n\t\t\t\t\t\tMsg(\"parsing config\")\n\t\t\t\t}\n\n\t\t\t\tp.logger.Debug().\n\t\t\t\t\tStr(\"config\", fmt.Sprintf(\"%+v\", cfg)).\n\t\t\t\t\tMsg(\"loaded plugin config\")\n\t\t\t}\n\t\t}\n\n\t\t\/\/ parse fileBase for _ttl(.+)\n\t\tmatches := ttlRx.FindAllStringSubmatch(fileBase, -1)\n\t\tvar runTTL time.Duration\n\t\tif len(matches) > 0 && len(matches[0]) > 1 {\n\t\t\tttl := matches[0][1]\n\t\t\tif ttl != \"\" {\n\t\t\t\tif !ttlUnitRx.MatchString(ttl) {\n\t\t\t\t\tttl += viper.GetString(config.KeyPluginTTLUnits)\n\t\t\t\t}\n\n\t\t\t\tif d, err := time.ParseDuration(ttl); err != nil {\n\t\t\t\t\tp.logger.Warn().Err(err).Str(\"ttl\", ttl).Msg(\"parsing plugin ttl, ignoring ttl\")\n\t\t\t\t} else {\n\t\t\t\t\trunTTL = d\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif cfg == nil {\n\t\t\tplug, ok := p.active[fileBase]\n\t\t\tif !ok {\n\t\t\t\tp.active[fileBase] = &plugin{\n\t\t\t\t\tctx: p.ctx,\n\t\t\t\t\tid: fileBase,\n\t\t\t\t\tname: fileBase,\n\t\t\t\t\tlogger: p.logger.With().Str(\"plugin\", fileBase).Logger(),\n\t\t\t\t\trunDir: p.pluginDir,\n\t\t\t\t\trunTTL: runTTL,\n\t\t\t\t}\n\t\t\t\tplug = p.active[fileBase]\n\t\t\t}\n\n\t\t\tappstats.MapIncrementInt(\"plugins\", \"total\")\n\t\t\tplug.command = cmdName\n\t\t\tp.logger.Info().\n\t\t\t\tStr(\"id\", fileBase).\n\t\t\t\tStr(\"cmd\", cmdName).\n\t\t\t\tMsg(\"Activating plugin\")\n\n\t\t} else {\n\t\t\tfor inst, args := range cfg {\n\t\t\t\tpluginName := fmt.Sprintf(\"%s`%s\", fileBase, inst)\n\t\t\t\tplug, ok := p.active[pluginName]\n\t\t\t\tif !ok {\n\t\t\t\t\tp.active[pluginName] = &plugin{\n\t\t\t\t\t\tctx: p.ctx,\n\t\t\t\t\t\tid: fileBase,\n\t\t\t\t\t\tinstanceID: inst,\n\t\t\t\t\t\tinstanceArgs: args,\n\t\t\t\t\t\tname: pluginName,\n\t\t\t\t\t\tlogger: p.logger.With().Str(\"plugin\", pluginName).Logger(),\n\t\t\t\t\t\trunDir: p.pluginDir,\n\t\t\t\t\t\trunTTL: runTTL,\n\t\t\t\t\t}\n\t\t\t\t\tplug = p.active[pluginName]\n\t\t\t\t}\n\n\t\t\t\tappstats.MapIncrementInt(\"plugins\", \"total\")\n\t\t\t\tplug.command = cmdName\n\t\t\t\tp.logger.Info().\n\t\t\t\t\tStr(\"id\", pluginName).\n\t\t\t\t\tStr(\"cmd\", cmdName).\n\t\t\t\t\tMsg(\"Activating plugin\")\n\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(p.active) == 0 {\n\t\treturn errors.New(\"No active plugins found\")\n\t}\n\n\treturn nil\n}\nfix: skip exec perm check for windows\/\/ Copyright © 2017 Circonus, Inc. \n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/\n\npackage plugins\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/builtins\"\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/config\"\n\t\"github.com\/maier\/go-appstats\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ Scan the plugin directory for new\/updated plugins\nfunc (p *Plugins) Scan(b *builtins.Builtins) error {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tif p.pluginDir == \"\" {\n\t\treturn nil\n\t}\n\n\t\/\/ initialRun fires each plugin one time. Unlike 'Run' it does\n\t\/\/ not wait for plugins to finish this will provides:\n\t\/\/\n\t\/\/ 1. an initial seeding of results\n\t\/\/ 2. starts any long running plugins without blocking\n\t\/\/\n\tinitialRun := func() error {\n\t\tfor id, plug := range p.active {\n\t\t\tp.logger.Debug().\n\t\t\t\tStr(\"plugin\", id).\n\t\t\t\tMsg(\"Initializing\")\n\t\t\tgo plug.exec()\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ only applicable if dynamic reloading implemented\n\t\/\/ if err := p.Stop(); err != nil {\n\t\/\/ \treturn errors.Wrap(err, \"stopping plugin(s)\")\n\t\/\/ }\n\n\tif err := p.scanPluginDirectory(b); err != nil {\n\t\treturn errors.Wrap(err, \"plugin directory scan\")\n\t}\n\n\tif err := initialRun(); err != nil {\n\t\treturn errors.Wrap(err, \"initializing plugin(s)\")\n\t}\n\n\treturn nil\n}\n\n\/\/ scanPluginDirectory finds and loads plugins\nfunc (p *Plugins) scanPluginDirectory(b *builtins.Builtins) error {\n\tif p.pluginDir == \"\" {\n\t\treturn errors.New(\"invalid plugin directory (none)\")\n\t}\n\n\tp.logger.Info().\n\t\tStr(\"dir\", p.pluginDir).\n\t\tMsg(\"Scanning plugin directory\")\n\n\tf, err := os.Open(p.pluginDir)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"open plugin directory\")\n\t}\n\n\tdefer f.Close()\n\n\tfiles, err := f.Readdir(-1)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"reading plugin directory\")\n\t}\n\n\tttlRx, err := regexp.Compile(`_ttl(.+)$`)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"compiling ttl regex\")\n\t}\n\tttlUnitRx, err := regexp.Compile(`(ms|s|m|h)$`)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"compiling ttl unit regex\")\n\t}\n\n\tfor _, fi := range files {\n\t\tfileName := fi.Name()\n\n\t\tp.logger.Debug().\n\t\t\tStr(\"path\", filepath.Join(p.pluginDir, fileName)).\n\t\t\tMsg(\"checking plugin directory entry\")\n\n\t\tif fi.IsDir() {\n\t\t\tp.logger.Debug().\n\t\t\t\tStr(\"file\", fileName).\n\t\t\t\tMsg(\"directory, ignoring\")\n\t\t\tcontinue\n\t\t}\n\n\t\tfileBase := fileName\n\t\tfileExt := filepath.Ext(fileName)\n\n\t\tif fileExt != \"\" {\n\t\t\tfileBase = strings.Replace(fileName, fileExt, \"\", -1)\n\t\t}\n\n\t\tif fileBase == \"\" || fileExt == \"\" {\n\t\t\tp.logger.Debug().\n\t\t\t\tStr(\"file\", fileName).\n\t\t\t\tMsg(\"invalid file name format, ignoring\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif fileExt == \".conf\" || fileExt == \".json\" {\n\t\t\tp.logger.Debug().\n\t\t\t\tStr(\"file\", fileName).\n\t\t\t\tMsg(\"config file, ignoring\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, reserved := p.reservedNames[fileBase]; reserved {\n\t\t\tp.logger.Warn().\n\t\t\t\tStr(\"file\", fileName).\n\t\t\t\tMsg(\"reserved plugin name, ignoring\")\n\t\t\tcontinue\n\t\t}\n\n\t\tvar cmdName string\n\n\t\tswitch mode := fi.Mode(); {\n\t\tcase mode.IsRegular():\n\t\t\tcmdName = filepath.Join(p.pluginDir, fi.Name())\n\t\tcase mode&os.ModeSymlink != 0:\n\t\t\tresolvedSymlink, err := filepath.EvalSymlinks(filepath.Join(p.pluginDir, fi.Name()))\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Warn().\n\t\t\t\t\tErr(err).\n\t\t\t\t\tStr(\"file\", fi.Name()).\n\t\t\t\t\tMsg(\"Error resolving symlink, ignoring\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcmdName = resolvedSymlink\n\t\tdefault:\n\t\t\tp.logger.Debug().\n\t\t\t\tStr(\"file\", fileName).\n\t\t\t\tMsg(\"not a regular file or symlink, ignoring\")\n\t\t\tcontinue \/\/ just ignore it\n\t\t}\n\n\t\tif runtime.GOOS != \"windows\" {\n\t\t\t\/\/ windows doesn't have an e'x'ecutable bit, all files are\n\t\t\t\/\/ 'potentially' executable - binary exe, interpreted scripts, etc.\n\t\t\tif perm := fi.Mode().Perm() & 0111; perm != 73 {\n\t\t\t\tp.logger.Warn().\n\t\t\t\t\tStr(\"file\", cmdName).\n\t\t\t\t\tStr(\"perms\", fmt.Sprintf(\"%q\", fi.Mode().Perm())).\n\t\t\t\t\tMsg(\"executable bit not set, ignoring\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif b != nil && b.IsBuiltin(fileBase) {\n\t\t\tp.logger.Warn().Str(\"id\", fileBase).Msg(\"Builtin collector already enabled, skipping plugin\")\n\t\t\tcontinue\n\t\t}\n\n\t\tvar cfg map[string][]string\n\n\t\t\/\/ check for config file\n\t\tcfgFile := filepath.Join(p.pluginDir, fmt.Sprintf(\"%s.json\", fileBase))\n\t\tif data, err := ioutil.ReadFile(cfgFile); err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\tp.logger.Warn().\n\t\t\t\t\tErr(err).\n\t\t\t\t\tStr(\"config\", cfgFile).\n\t\t\t\t\tStr(\"plugin\", fileBase).Msg(\"plugin config\")\n\t\t\t}\n\t\t} else {\n\t\t\tif len(data) > 0 {\n\t\t\t\terr := json.Unmarshal(data, &cfg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.logger.Warn().\n\t\t\t\t\t\tErr(err).\n\t\t\t\t\t\tStr(\"config\", cfgFile).\n\t\t\t\t\t\tStr(\"plugin\", fileBase).\n\t\t\t\t\t\tStr(\"data\", string(data)).\n\t\t\t\t\t\tMsg(\"parsing config\")\n\t\t\t\t}\n\n\t\t\t\tp.logger.Debug().\n\t\t\t\t\tStr(\"config\", fmt.Sprintf(\"%+v\", cfg)).\n\t\t\t\t\tMsg(\"loaded plugin config\")\n\t\t\t}\n\t\t}\n\n\t\t\/\/ parse fileBase for _ttl(.+)\n\t\tmatches := ttlRx.FindAllStringSubmatch(fileBase, -1)\n\t\tvar runTTL time.Duration\n\t\tif len(matches) > 0 && len(matches[0]) > 1 {\n\t\t\tttl := matches[0][1]\n\t\t\tif ttl != \"\" {\n\t\t\t\tif !ttlUnitRx.MatchString(ttl) {\n\t\t\t\t\tttl += viper.GetString(config.KeyPluginTTLUnits)\n\t\t\t\t}\n\n\t\t\t\tif d, err := time.ParseDuration(ttl); err != nil {\n\t\t\t\t\tp.logger.Warn().Err(err).Str(\"ttl\", ttl).Msg(\"parsing plugin ttl, ignoring ttl\")\n\t\t\t\t} else {\n\t\t\t\t\trunTTL = d\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif cfg == nil {\n\t\t\tplug, ok := p.active[fileBase]\n\t\t\tif !ok {\n\t\t\t\tp.active[fileBase] = &plugin{\n\t\t\t\t\tctx: p.ctx,\n\t\t\t\t\tid: fileBase,\n\t\t\t\t\tname: fileBase,\n\t\t\t\t\tlogger: p.logger.With().Str(\"plugin\", fileBase).Logger(),\n\t\t\t\t\trunDir: p.pluginDir,\n\t\t\t\t\trunTTL: runTTL,\n\t\t\t\t}\n\t\t\t\tplug = p.active[fileBase]\n\t\t\t}\n\n\t\t\tappstats.MapIncrementInt(\"plugins\", \"total\")\n\t\t\tplug.command = cmdName\n\t\t\tp.logger.Info().\n\t\t\t\tStr(\"id\", fileBase).\n\t\t\t\tStr(\"cmd\", cmdName).\n\t\t\t\tMsg(\"Activating plugin\")\n\n\t\t} else {\n\t\t\tfor inst, args := range cfg {\n\t\t\t\tpluginName := fmt.Sprintf(\"%s`%s\", fileBase, inst)\n\t\t\t\tplug, ok := p.active[pluginName]\n\t\t\t\tif !ok {\n\t\t\t\t\tp.active[pluginName] = &plugin{\n\t\t\t\t\t\tctx: p.ctx,\n\t\t\t\t\t\tid: fileBase,\n\t\t\t\t\t\tinstanceID: inst,\n\t\t\t\t\t\tinstanceArgs: args,\n\t\t\t\t\t\tname: pluginName,\n\t\t\t\t\t\tlogger: p.logger.With().Str(\"plugin\", pluginName).Logger(),\n\t\t\t\t\t\trunDir: p.pluginDir,\n\t\t\t\t\t\trunTTL: runTTL,\n\t\t\t\t\t}\n\t\t\t\t\tplug = p.active[pluginName]\n\t\t\t\t}\n\n\t\t\t\tappstats.MapIncrementInt(\"plugins\", \"total\")\n\t\t\t\tplug.command = cmdName\n\t\t\t\tp.logger.Info().\n\t\t\t\t\tStr(\"id\", pluginName).\n\t\t\t\t\tStr(\"cmd\", cmdName).\n\t\t\t\t\tMsg(\"Activating plugin\")\n\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(p.active) == 0 {\n\t\treturn errors.New(\"No active plugins found\")\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2016 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build android ios\n\npackage ui\n\nimport (\n\t\"errors\"\n\t\"image\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/mobile\/app\"\n\t\"golang.org\/x\/mobile\/event\/lifecycle\"\n\t\"golang.org\/x\/mobile\/event\/paint\"\n\t\"golang.org\/x\/mobile\/event\/size\"\n\t\"golang.org\/x\/mobile\/event\/touch\"\n\t\"golang.org\/x\/mobile\/gl\"\n\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/devicescale\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/hooks\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/input\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/opengl\"\n)\n\nvar (\n\tglContextCh = make(chan gl.Context)\n\trenderCh = make(chan struct{})\n\trenderChEnd = make(chan struct{})\n\tcurrentUI = &userInterface{}\n)\n\nfunc Render(chError <-chan error) error {\n\truntime.LockOSThread()\n\tdefer runtime.UnlockOSThread()\n\n\tif chError == nil {\n\t\treturn errors.New(\"ui: chError must not be nil\")\n\t}\n\t\/\/ TODO: Check this is called on the rendering thread\n\tselect {\n\tcase renderCh <- struct{}{}:\n\t\treturn opengl.GetContext().DoWork(chError, renderChEnd)\n\tcase <-time.After(500 * time.Millisecond):\n\t\t\/\/ This function must not be blocked. We need to break for timeout.\n\t\treturn nil\n\t}\n}\n\ntype userInterface struct {\n\twidth int\n\theight int\n\tscale float64\n\tsizeChanged bool\n\n\t\/\/ Used for gomobile-build\n\tfullscreenScale float64\n\tfullscreenWidthPx int\n\tfullscreenHeightPx int\n\n\tm sync.RWMutex\n}\n\nvar (\n\tdeviceScaleVal float64\n\tdeviceScaleM sync.Mutex\n)\n\nfunc deviceScale() float64 {\n\tdeviceScaleM.Lock()\n\tdefer deviceScaleM.Unlock()\n\n\tif deviceScaleVal == 0 {\n\t\tdeviceScaleVal = devicescale.Get()\n\t}\n\treturn deviceScaleVal\n}\n\n\/\/ appMain is the main routine for gomobile-build mode.\nfunc appMain(a app.App) {\n\tvar glctx gl.Context\n\ttouches := map[touch.Sequence]*input.Touch{}\n\tfor e := range a.Events() {\n\t\tswitch e := a.Filter(e).(type) {\n\t\tcase lifecycle.Event:\n\t\t\tswitch e.Crosses(lifecycle.StageVisible) {\n\t\t\tcase lifecycle.CrossOn:\n\t\t\t\tglctx, _ = e.DrawContext.(gl.Context)\n\t\t\t\t\/\/ Assume that glctx is always a same instance.\n\t\t\t\t\/\/ Then, only once initializing should be enough.\n\t\t\t\tif glContextCh != nil {\n\t\t\t\t\tglContextCh <- glctx\n\t\t\t\t\tglContextCh = nil\n\t\t\t\t}\n\t\t\t\ta.Send(paint.Event{})\n\t\t\tcase lifecycle.CrossOff:\n\t\t\t\tglctx = nil\n\t\t\t}\n\t\tcase size.Event:\n\t\t\tsetFullscreen(e.WidthPx, e.HeightPx)\n\t\tcase paint.Event:\n\t\t\tif glctx == nil || e.External {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trenderCh <- struct{}{}\n\t\t\t<-renderChEnd\n\t\t\ta.Publish()\n\t\t\ta.Send(paint.Event{})\n\t\tcase touch.Event:\n\t\t\tswitch e.Type {\n\t\t\tcase touch.TypeBegin, touch.TypeMove:\n\t\t\t\ts := deviceScale()\n\t\t\t\tx, y := float64(e.X)\/s, float64(e.Y)\/s\n\t\t\t\t\/\/ TODO: Is it ok to cast from int64 to int here?\n\t\t\t\tt := input.NewTouch(int(e.Sequence), int(x), int(y))\n\t\t\t\ttouches[e.Sequence] = t\n\t\t\tcase touch.TypeEnd:\n\t\t\t\tdelete(touches, e.Sequence)\n\t\t\t}\n\t\t\tts := []*input.Touch{}\n\t\t\tfor _, t := range touches {\n\t\t\t\tts = append(ts, t)\n\t\t\t}\n\t\t\tUpdateTouches(ts)\n\t\t}\n\t}\n}\n\nfunc Run(width, height int, scale float64, title string, g GraphicsContext, mainloop bool) error {\n\tu := currentUI\n\n\tu.m.Lock()\n\tu.width = width\n\tu.height = height\n\tu.scale = scale\n\tu.sizeChanged = true\n\tu.m.Unlock()\n\t\/\/ title is ignored?\n\n\tif mainloop {\n\t\tctx := <-glContextCh\n\t\topengl.InitWithContext(ctx)\n\t} else {\n\t\topengl.Init()\n\t}\n\n\t\/\/ Force to set the screen size\n\tu.updateGraphicsContext(g)\n\tfor {\n\t\tif err := u.update(g); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n\/\/ RunMainThreadLoop runs the main routine for gomobile-build.\nfunc RunMainThreadLoop(ch <-chan error) error {\n\tgo func() {\n\t\t\/\/ As mobile apps never ends, RunMainThreadLoop can't return.\n\t\t\/\/ Just panic here.\n\t\terr := <-ch\n\t\tpanic(err)\n\t}()\n\tapp.Main(appMain)\n\treturn nil\n}\n\nfunc (u *userInterface) updateGraphicsContext(g GraphicsContext) {\n\twidth, height := 0, 0\n\tactualScale := 0.0\n\n\tu.m.Lock()\n\tsizeChanged := u.sizeChanged\n\tif sizeChanged {\n\t\twidth = u.width\n\t\theight = u.height\n\t\tactualScale = u.scaleImpl() * deviceScale()\n\t}\n\tu.sizeChanged = false\n\tu.m.Unlock()\n\n\tif sizeChanged {\n\t\t\/\/ Sizing also calls GL functions\n\t\tg.SetSize(width, height, actualScale)\n\t}\n}\n\nfunc actualScale() float64 {\n\treturn currentUI.actualScale()\n}\n\nfunc (u *userInterface) actualScale() float64 {\n\tu.m.Lock()\n\ts := u.scaleImpl() * deviceScale()\n\tu.m.Unlock()\n\treturn s\n}\n\nfunc (u *userInterface) scaleImpl() float64 {\n\tscale := u.scale\n\tif u.fullscreenScale != 0 {\n\t\tscale = u.fullscreenScale\n\t}\n\treturn scale\n}\n\nfunc (u *userInterface) update(g GraphicsContext) error {\nrender:\n\tfor {\n\t\tselect {\n\t\tcase <-renderCh:\n\t\t\tbreak render\n\t\tcase <-time.After(500 * time.Millisecond):\n\t\t\thooks.SuspendAudio()\n\t\t\tcontinue\n\t\t}\n\t}\n\thooks.ResumeAudio()\n\n\tdefer func() {\n\t\trenderChEnd <- struct{}{}\n\t}()\n\n\tif err := g.Update(func() {\n\t\tu.updateGraphicsContext(g)\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc screenSize() (int, int) {\n\treturn currentUI.screenSize()\n}\n\nfunc (u *userInterface) screenSize() (int, int) {\n\tu.m.Lock()\n\tw, h := u.width, u.height\n\tu.m.Unlock()\n\treturn w, h\n}\n\nfunc MonitorSize() (int, int) {\n\t\/\/ TODO: This function should return fullscreenWidthPx, fullscreenHeightPx,\n\t\/\/ but these values are not initialized until the main loop starts.\n\treturn 0, 0\n}\n\nfunc SetScreenSize(width, height int) bool {\n\tcurrentUI.setScreenSize(width, height)\n\treturn true\n}\n\nfunc (u *userInterface) setScreenSize(width, height int) {\n\tu.m.Lock()\n\tif u.width != width || u.height != height {\n\t\tu.width = width\n\t\tu.height = height\n\t\tu.updateFullscreenScaleIfNeeded()\n\t\tu.sizeChanged = true\n\t}\n\tu.m.Unlock()\n}\n\nfunc SetScreenScale(scale float64) bool {\n\tcurrentUI.setScreenScale(scale)\n\treturn false\n}\n\nfunc (u *userInterface) setScreenScale(scale float64) {\n\tu.m.Lock()\n\tif u.scale != scale {\n\t\tu.scale = scale\n\t\tu.sizeChanged = true\n\t}\n\tu.m.Unlock()\n}\n\nfunc ScreenScale() float64 {\n\tu := currentUI\n\tu.m.RLock()\n\ts := u.scale\n\tu.m.RUnlock()\n\treturn s\n}\n\nfunc setFullscreen(widthPx, heightPx int) {\n\tcurrentUI.setFullscreen(widthPx, heightPx)\n}\n\nfunc (u *userInterface) setFullscreen(widthPx, heightPx int) {\n\tu.m.Lock()\n\tu.fullscreenWidthPx = widthPx\n\tu.fullscreenHeightPx = heightPx\n\tu.updateFullscreenScaleIfNeeded()\n\tu.sizeChanged = true\n\tu.m.Unlock()\n}\n\nfunc (u *userInterface) updateFullscreenScaleIfNeeded() {\n\tif u.fullscreenWidthPx == 0 || u.fullscreenHeightPx == 0 {\n\t\treturn\n\t}\n\tw, h := u.width, u.height\n\tscaleX := float64(u.fullscreenWidthPx) \/ float64(w)\n\tscaleY := float64(u.fullscreenHeightPx) \/ float64(h)\n\tscale := scaleX\n\tif scale > scaleY {\n\t\tscale = scaleY\n\t}\n\tu.fullscreenScale = scale \/ deviceScale()\n\tu.sizeChanged = true\n}\n\nfunc ScreenPadding() (x0, y0, x1, y1 float64) {\n\treturn currentUI.screenPadding()\n}\n\nfunc (u *userInterface) screenPadding() (x0, y0, x1, y1 float64) {\n\tu.m.Lock()\n\tx0, y0, x1, y1 = u.screenPaddingImpl()\n\tu.m.Unlock()\n\treturn\n}\n\nfunc (u *userInterface) screenPaddingImpl() (x0, y0, x1, y1 float64) {\n\tif u.fullscreenScale == 0 {\n\t\treturn 0, 0, 0, 0\n\t}\n\ts := u.fullscreenScale * deviceScale()\n\tox := (float64(u.fullscreenWidthPx) - float64(u.width)*s) \/ 2\n\toy := (float64(u.fullscreenHeightPx) - float64(u.height)*s) \/ 2\n\treturn ox, oy, ox, oy\n}\n\nfunc AdjustedCursorPosition() (x, y int) {\n\treturn currentUI.adjustPosition(input.Get().CursorPosition())\n}\n\nfunc AdjustedTouches() []*input.Touch {\n\tts := input.Get().Touches()\n\tadjusted := make([]*input.Touch, len(ts))\n\tfor i, t := range ts {\n\t\tx, y := currentUI.adjustPosition(t.Position())\n\t\tadjusted[i] = input.NewTouch(t.ID(), x, y)\n\t}\n\treturn adjusted\n}\n\nfunc (u *userInterface) adjustPosition(x, y int) (int, int) {\n\tu.m.Lock()\n\tox, oy, _, _ := u.screenPaddingImpl()\n\ts := u.scaleImpl()\n\tas := s * deviceScale()\n\tu.m.Unlock()\n\treturn int(float64(x)\/s - ox\/as), int(float64(y)\/s - oy\/as)\n}\n\nfunc IsCursorVisible() bool {\n\treturn false\n}\n\nfunc SetCursorVisible(visible bool) {\n\t\/\/ Do nothing\n}\n\nfunc IsFullscreen() bool {\n\treturn false\n}\n\nfunc SetFullscreen(fullscreen bool) {\n\t\/\/ Do nothing\n}\n\nfunc IsRunnableInBackground() bool {\n\treturn false\n}\n\nfunc SetRunnableInBackground(runnableInBackground bool) {\n\t\/\/ Do nothing\n}\n\nfunc SetWindowTitle(title string) {\n\t\/\/ Do nothing\n}\n\nfunc SetWindowIcon(iconImages []image.Image) {\n\t\/\/ Do nothing\n}\n\nfunc IsWindowDecorated() bool {\n\treturn false\n}\n\nfunc SetWindowDecorated(decorated bool) {\n\t\/\/ Do nothing\n}\n\nfunc IsVsyncEnabled() bool {\n\treturn true\n}\n\nfunc SetVsyncEnabled(enabled bool) {\n\t\/\/ Do nothing\n}\n\nfunc UpdateTouches(touches []*input.Touch) {\n\tinput.Get().UpdateTouches(touches)\n}\nui: Bug fix: compile error on mobiles\/\/ Copyright 2016 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build android ios\n\npackage ui\n\nimport (\n\t\"errors\"\n\t\"image\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/mobile\/app\"\n\t\"golang.org\/x\/mobile\/event\/lifecycle\"\n\t\"golang.org\/x\/mobile\/event\/paint\"\n\t\"golang.org\/x\/mobile\/event\/size\"\n\t\"golang.org\/x\/mobile\/event\/touch\"\n\t\"golang.org\/x\/mobile\/gl\"\n\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/devicescale\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/hooks\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/input\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/opengl\"\n)\n\nvar (\n\tglContextCh = make(chan gl.Context)\n\trenderCh = make(chan struct{})\n\trenderChEnd = make(chan struct{})\n\tcurrentUI = &userInterface{}\n)\n\nfunc Render(chError <-chan error) error {\n\truntime.LockOSThread()\n\tdefer runtime.UnlockOSThread()\n\n\tif chError == nil {\n\t\treturn errors.New(\"ui: chError must not be nil\")\n\t}\n\t\/\/ TODO: Check this is called on the rendering thread\n\tselect {\n\tcase renderCh <- struct{}{}:\n\t\treturn opengl.GetContext().DoWork(chError, renderChEnd)\n\tcase <-time.After(500 * time.Millisecond):\n\t\t\/\/ This function must not be blocked. We need to break for timeout.\n\t\treturn nil\n\t}\n}\n\ntype userInterface struct {\n\twidth int\n\theight int\n\tscale float64\n\tsizeChanged bool\n\n\t\/\/ Used for gomobile-build\n\tfullscreenScale float64\n\tfullscreenWidthPx int\n\tfullscreenHeightPx int\n\n\tm sync.RWMutex\n}\n\nvar (\n\tdeviceScaleVal float64\n\tdeviceScaleM sync.Mutex\n)\n\nfunc getDeviceScale() float64 {\n\tdeviceScaleM.Lock()\n\tdefer deviceScaleM.Unlock()\n\n\tif deviceScaleVal == 0 {\n\t\tdeviceScaleVal = devicescale.Get()\n\t}\n\treturn deviceScaleVal\n}\n\n\/\/ appMain is the main routine for gomobile-build mode.\nfunc appMain(a app.App) {\n\tvar glctx gl.Context\n\ttouches := map[touch.Sequence]*input.Touch{}\n\tfor e := range a.Events() {\n\t\tswitch e := a.Filter(e).(type) {\n\t\tcase lifecycle.Event:\n\t\t\tswitch e.Crosses(lifecycle.StageVisible) {\n\t\t\tcase lifecycle.CrossOn:\n\t\t\t\tglctx, _ = e.DrawContext.(gl.Context)\n\t\t\t\t\/\/ Assume that glctx is always a same instance.\n\t\t\t\t\/\/ Then, only once initializing should be enough.\n\t\t\t\tif glContextCh != nil {\n\t\t\t\t\tglContextCh <- glctx\n\t\t\t\t\tglContextCh = nil\n\t\t\t\t}\n\t\t\t\ta.Send(paint.Event{})\n\t\t\tcase lifecycle.CrossOff:\n\t\t\t\tglctx = nil\n\t\t\t}\n\t\tcase size.Event:\n\t\t\tsetFullscreen(e.WidthPx, e.HeightPx)\n\t\tcase paint.Event:\n\t\t\tif glctx == nil || e.External {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trenderCh <- struct{}{}\n\t\t\t<-renderChEnd\n\t\t\ta.Publish()\n\t\t\ta.Send(paint.Event{})\n\t\tcase touch.Event:\n\t\t\tswitch e.Type {\n\t\t\tcase touch.TypeBegin, touch.TypeMove:\n\t\t\t\ts := getDeviceScale()\n\t\t\t\tx, y := float64(e.X)\/s, float64(e.Y)\/s\n\t\t\t\t\/\/ TODO: Is it ok to cast from int64 to int here?\n\t\t\t\tt := input.NewTouch(int(e.Sequence), int(x), int(y))\n\t\t\t\ttouches[e.Sequence] = t\n\t\t\tcase touch.TypeEnd:\n\t\t\t\tdelete(touches, e.Sequence)\n\t\t\t}\n\t\t\tts := []*input.Touch{}\n\t\t\tfor _, t := range touches {\n\t\t\t\tts = append(ts, t)\n\t\t\t}\n\t\t\tUpdateTouches(ts)\n\t\t}\n\t}\n}\n\nfunc Run(width, height int, scale float64, title string, g GraphicsContext, mainloop bool) error {\n\tu := currentUI\n\n\tu.m.Lock()\n\tu.width = width\n\tu.height = height\n\tu.scale = scale\n\tu.sizeChanged = true\n\tu.m.Unlock()\n\t\/\/ title is ignored?\n\n\tif mainloop {\n\t\tctx := <-glContextCh\n\t\topengl.InitWithContext(ctx)\n\t} else {\n\t\topengl.Init()\n\t}\n\n\t\/\/ Force to set the screen size\n\tu.updateGraphicsContext(g)\n\tfor {\n\t\tif err := u.update(g); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n\/\/ RunMainThreadLoop runs the main routine for gomobile-build.\nfunc RunMainThreadLoop(ch <-chan error) error {\n\tgo func() {\n\t\t\/\/ As mobile apps never ends, RunMainThreadLoop can't return.\n\t\t\/\/ Just panic here.\n\t\terr := <-ch\n\t\tpanic(err)\n\t}()\n\tapp.Main(appMain)\n\treturn nil\n}\n\nfunc (u *userInterface) updateGraphicsContext(g GraphicsContext) {\n\twidth, height := 0, 0\n\tactualScale := 0.0\n\n\tu.m.Lock()\n\tsizeChanged := u.sizeChanged\n\tif sizeChanged {\n\t\twidth = u.width\n\t\theight = u.height\n\t\tactualScale = u.scaleImpl() * getDeviceScale()\n\t}\n\tu.sizeChanged = false\n\tu.m.Unlock()\n\n\tif sizeChanged {\n\t\t\/\/ Sizing also calls GL functions\n\t\tg.SetSize(width, height, actualScale)\n\t}\n}\n\nfunc actualScale() float64 {\n\treturn currentUI.actualScale()\n}\n\nfunc (u *userInterface) actualScale() float64 {\n\tu.m.Lock()\n\ts := u.scaleImpl() * getDeviceScale()\n\tu.m.Unlock()\n\treturn s\n}\n\nfunc (u *userInterface) scaleImpl() float64 {\n\tscale := u.scale\n\tif u.fullscreenScale != 0 {\n\t\tscale = u.fullscreenScale\n\t}\n\treturn scale\n}\n\nfunc (u *userInterface) update(g GraphicsContext) error {\nrender:\n\tfor {\n\t\tselect {\n\t\tcase <-renderCh:\n\t\t\tbreak render\n\t\tcase <-time.After(500 * time.Millisecond):\n\t\t\thooks.SuspendAudio()\n\t\t\tcontinue\n\t\t}\n\t}\n\thooks.ResumeAudio()\n\n\tdefer func() {\n\t\trenderChEnd <- struct{}{}\n\t}()\n\n\tif err := g.Update(func() {\n\t\tu.updateGraphicsContext(g)\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc screenSize() (int, int) {\n\treturn currentUI.screenSize()\n}\n\nfunc (u *userInterface) screenSize() (int, int) {\n\tu.m.Lock()\n\tw, h := u.width, u.height\n\tu.m.Unlock()\n\treturn w, h\n}\n\nfunc MonitorSize() (int, int) {\n\t\/\/ TODO: This function should return fullscreenWidthPx, fullscreenHeightPx,\n\t\/\/ but these values are not initialized until the main loop starts.\n\treturn 0, 0\n}\n\nfunc SetScreenSize(width, height int) bool {\n\tcurrentUI.setScreenSize(width, height)\n\treturn true\n}\n\nfunc (u *userInterface) setScreenSize(width, height int) {\n\tu.m.Lock()\n\tif u.width != width || u.height != height {\n\t\tu.width = width\n\t\tu.height = height\n\t\tu.updateFullscreenScaleIfNeeded()\n\t\tu.sizeChanged = true\n\t}\n\tu.m.Unlock()\n}\n\nfunc SetScreenScale(scale float64) bool {\n\tcurrentUI.setScreenScale(scale)\n\treturn false\n}\n\nfunc (u *userInterface) setScreenScale(scale float64) {\n\tu.m.Lock()\n\tif u.scale != scale {\n\t\tu.scale = scale\n\t\tu.sizeChanged = true\n\t}\n\tu.m.Unlock()\n}\n\nfunc ScreenScale() float64 {\n\tu := currentUI\n\tu.m.RLock()\n\ts := u.scale\n\tu.m.RUnlock()\n\treturn s\n}\n\nfunc setFullscreen(widthPx, heightPx int) {\n\tcurrentUI.setFullscreen(widthPx, heightPx)\n}\n\nfunc (u *userInterface) setFullscreen(widthPx, heightPx int) {\n\tu.m.Lock()\n\tu.fullscreenWidthPx = widthPx\n\tu.fullscreenHeightPx = heightPx\n\tu.updateFullscreenScaleIfNeeded()\n\tu.sizeChanged = true\n\tu.m.Unlock()\n}\n\nfunc (u *userInterface) updateFullscreenScaleIfNeeded() {\n\tif u.fullscreenWidthPx == 0 || u.fullscreenHeightPx == 0 {\n\t\treturn\n\t}\n\tw, h := u.width, u.height\n\tscaleX := float64(u.fullscreenWidthPx) \/ float64(w)\n\tscaleY := float64(u.fullscreenHeightPx) \/ float64(h)\n\tscale := scaleX\n\tif scale > scaleY {\n\t\tscale = scaleY\n\t}\n\tu.fullscreenScale = scale \/ getDeviceScale()\n\tu.sizeChanged = true\n}\n\nfunc ScreenPadding() (x0, y0, x1, y1 float64) {\n\treturn currentUI.screenPadding()\n}\n\nfunc (u *userInterface) screenPadding() (x0, y0, x1, y1 float64) {\n\tu.m.Lock()\n\tx0, y0, x1, y1 = u.screenPaddingImpl()\n\tu.m.Unlock()\n\treturn\n}\n\nfunc (u *userInterface) screenPaddingImpl() (x0, y0, x1, y1 float64) {\n\tif u.fullscreenScale == 0 {\n\t\treturn 0, 0, 0, 0\n\t}\n\ts := u.fullscreenScale * getDeviceScale()\n\tox := (float64(u.fullscreenWidthPx) - float64(u.width)*s) \/ 2\n\toy := (float64(u.fullscreenHeightPx) - float64(u.height)*s) \/ 2\n\treturn ox, oy, ox, oy\n}\n\nfunc AdjustedCursorPosition() (x, y int) {\n\treturn currentUI.adjustPosition(input.Get().CursorPosition())\n}\n\nfunc AdjustedTouches() []*input.Touch {\n\tts := input.Get().Touches()\n\tadjusted := make([]*input.Touch, len(ts))\n\tfor i, t := range ts {\n\t\tx, y := currentUI.adjustPosition(t.Position())\n\t\tadjusted[i] = input.NewTouch(t.ID(), x, y)\n\t}\n\treturn adjusted\n}\n\nfunc (u *userInterface) adjustPosition(x, y int) (int, int) {\n\tu.m.Lock()\n\tox, oy, _, _ := u.screenPaddingImpl()\n\ts := u.scaleImpl()\n\tas := s * getDeviceScale()\n\tu.m.Unlock()\n\treturn int(float64(x)\/s - ox\/as), int(float64(y)\/s - oy\/as)\n}\n\nfunc IsCursorVisible() bool {\n\treturn false\n}\n\nfunc SetCursorVisible(visible bool) {\n\t\/\/ Do nothing\n}\n\nfunc IsFullscreen() bool {\n\treturn false\n}\n\nfunc SetFullscreen(fullscreen bool) {\n\t\/\/ Do nothing\n}\n\nfunc IsRunnableInBackground() bool {\n\treturn false\n}\n\nfunc SetRunnableInBackground(runnableInBackground bool) {\n\t\/\/ Do nothing\n}\n\nfunc SetWindowTitle(title string) {\n\t\/\/ Do nothing\n}\n\nfunc SetWindowIcon(iconImages []image.Image) {\n\t\/\/ Do nothing\n}\n\nfunc IsWindowDecorated() bool {\n\treturn false\n}\n\nfunc SetWindowDecorated(decorated bool) {\n\t\/\/ Do nothing\n}\n\nfunc IsVsyncEnabled() bool {\n\treturn true\n}\n\nfunc SetVsyncEnabled(enabled bool) {\n\t\/\/ Do nothing\n}\n\nfunc UpdateTouches(touches []*input.Touch) {\n\tinput.Get().UpdateTouches(touches)\n}\n<|endoftext|>"} {"text":"package lib\n\nimport \"math\"\n\n\/*UpArrow ...*\/\nfunc UpArrow(base int64, exponant int64, upArrowAmount int64) int64 {\n\tif upArrowAmount <= 0 {\n\t\treturn int64(base * exponant)\n\t} else if upArrowAmount == 1 {\n\t\treturn int64(math.Pow(float64(base), float64(exponant)))\n\t} else if exponant == 1 {\n\t\treturn int64(base)\n\t}\n\treturn UpArrow(base, UpArrow(base, exponant-1, upArrowAmount), upArrowAmount-1)\n}\nprevent negativ exponentpackage lib\n\nimport \"math\"\n\n\/*UpArrow ...*\/\nfunc UpArrow(base uint64, exponant uint64, upArrowAmount uint64) uint64 {\n\tif upArrowAmount <= 0 {\n\t\treturn uint64(base * exponant)\n\t} else if upArrowAmount == 1 {\n\t\treturn uint64(math.Pow(float64(base), float64(exponant)))\n\t} else if exponant <= 1 {\n\t\treturn uint64(base)\n\t}\n\treturn UpArrow(base, UpArrow(base, exponant-1, upArrowAmount), upArrowAmount-1)\n}\n<|endoftext|>"} {"text":"package device\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/lxc\/lxd\/lxd\/cluster\"\n\tdeviceConfig \"github.com\/lxc\/lxd\/lxd\/device\/config\"\n\t\"github.com\/lxc\/lxd\/lxd\/dnsmasq\/dhcpalloc\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\/instancetype\"\n\t\"github.com\/lxc\/lxd\/lxd\/network\"\n\t\"github.com\/lxc\/lxd\/lxd\/network\/openvswitch\"\n\t\"github.com\/lxc\/lxd\/lxd\/project\"\n\t\"github.com\/lxc\/lxd\/lxd\/revert\"\n\t\"github.com\/lxc\/lxd\/lxd\/util\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\tlog \"github.com\/lxc\/lxd\/shared\/log15\"\n)\n\ntype nicOVN struct {\n\tdeviceCommon\n\n\tnetwork network.Network\n}\n\n\/\/ getIntegrationBridgeName returns the OVS integration bridge to use.\nfunc (d *nicOVN) getIntegrationBridgeName() (string, error) {\n\tintegrationBridge, err := cluster.ConfigGetString(d.state.Cluster, \"network.ovn.integration_bridge\")\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"Failed to get OVN integration bridge name\")\n\t}\n\n\treturn integrationBridge, nil\n}\n\n\/\/ validateConfig checks the supplied config for correctness.\nfunc (d *nicOVN) validateConfig(instConf instance.ConfigReader) error {\n\tif !instanceSupported(instConf.Type(), instancetype.Container, instancetype.VM) {\n\t\treturn ErrUnsupportedDevType\n\t}\n\n\trequiredFields := []string{\n\t\t\"network\",\n\t}\n\n\toptionalFields := []string{\n\t\t\"name\",\n\t\t\"hwaddr\",\n\t\t\"host_name\",\n\t\t\"mtu\",\n\t\t\"ipv4.address\",\n\t\t\"ipv6.address\",\n\t\t\"boot.priority\",\n\t}\n\n\t\/\/ The NIC's network may be a non-default project, so lookup project and get network's project name.\n\tnetworkProjectName, _, err := project.NetworkProject(d.state.Cluster, instConf.Project())\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed loading network project name\")\n\t}\n\n\t\/\/ Lookup network settings and apply them to the device's config.\n\tn, err := network.LoadByName(d.state, networkProjectName, d.config[\"network\"])\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Error loading network config for %q\", d.config[\"network\"])\n\t}\n\n\tif n.Status() == api.NetworkStatusPending {\n\t\treturn fmt.Errorf(\"Specified network is not fully created\")\n\t}\n\n\tif n.Type() != \"ovn\" {\n\t\treturn fmt.Errorf(\"Specified network must be of type ovn\")\n\t}\n\n\tbannedKeys := []string{\"mtu\"}\n\tfor _, bannedKey := range bannedKeys {\n\t\tif d.config[bannedKey] != \"\" {\n\t\t\treturn fmt.Errorf(\"Cannot use %q property in conjunction with %q property\", bannedKey, \"network\")\n\t\t}\n\t}\n\n\td.network = n \/\/ Stored loaded instance for use by other functions.\n\tnetConfig := d.network.Config()\n\n\tif d.config[\"ipv4.address\"] != \"\" {\n\t\t\/\/ Check that DHCPv4 is enabled on parent network (needed to use static assigned IPs).\n\t\tif n.DHCPv4Subnet() == nil {\n\t\t\treturn fmt.Errorf(\"Cannot specify %q when DHCP is disabled on network %q\", \"ipv4.address\", d.config[\"network\"])\n\t\t}\n\n\t\t_, subnet, err := net.ParseCIDR(netConfig[\"ipv4.address\"])\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Invalid network ipv4.address\")\n\t\t}\n\n\t\t\/\/ Check the static IP supplied is valid for the linked network. It should be part of the\n\t\t\/\/ network's subnet, but not necessarily part of the dynamic allocation ranges.\n\t\tif !dhcpalloc.DHCPValidIP(subnet, nil, net.ParseIP(d.config[\"ipv4.address\"])) {\n\t\t\treturn fmt.Errorf(\"Device IP address %q not within network %q subnet\", d.config[\"ipv4.address\"], d.config[\"network\"])\n\t\t}\n\t}\n\n\tif d.config[\"ipv6.address\"] != \"\" {\n\t\t\/\/ Check that DHCPv6 is enabled on parent network (needed to use static assigned IPs).\n\t\tif n.DHCPv6Subnet() == nil || !shared.IsTrue(netConfig[\"ipv6.dhcp.stateful\"]) {\n\t\t\treturn fmt.Errorf(\"Cannot specify %q when DHCP or %q are disabled on network %q\", \"ipv6.address\", \"ipv6.dhcp.stateful\", d.config[\"network\"])\n\t\t}\n\n\t\t_, subnet, err := net.ParseCIDR(netConfig[\"ipv6.address\"])\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Invalid network ipv6.address\")\n\t\t}\n\n\t\t\/\/ Check the static IP supplied is valid for the linked network. It should be part of the\n\t\t\/\/ network's subnet, but not necessarily part of the dynamic allocation ranges.\n\t\tif !dhcpalloc.DHCPValidIP(subnet, nil, net.ParseIP(d.config[\"ipv6.address\"])) {\n\t\t\treturn fmt.Errorf(\"Device IP address %q not within network %q subnet\", d.config[\"ipv6.address\"], d.config[\"network\"])\n\t\t}\n\t}\n\n\t\/\/ Apply network level config options to device config before validation.\n\td.config[\"mtu\"] = fmt.Sprintf(\"%s\", netConfig[\"bridge.mtu\"])\n\n\trules := nicValidationRules(requiredFields, optionalFields)\n\n\t\/\/ Now run normal validation.\n\terr = d.config.Validate(rules)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ validateEnvironment checks the runtime environment for correctness.\nfunc (d *nicOVN) validateEnvironment() error {\n\tif d.inst.Type() == instancetype.Container && d.config[\"name\"] == \"\" {\n\t\treturn fmt.Errorf(\"Requires name property to start\")\n\t}\n\n\tintegrationBridge, err := d.getIntegrationBridgeName()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !shared.PathExists(fmt.Sprintf(\"\/sys\/class\/net\/%s\", integrationBridge)) {\n\t\treturn fmt.Errorf(\"OVS integration bridge device %q doesn't exist\", integrationBridge)\n\t}\n\n\treturn nil\n}\n\n\/\/ CanHotPlug returns whether the device can be managed whilst the instance is running, it also\n\/\/ returns a list of fields that can be updated without triggering a device remove & add.\nfunc (d *nicOVN) CanHotPlug() (bool, []string) {\n\treturn true, []string{}\n}\n\n\/\/ Add is run when a device is added to an instance whether or not the instance is running.\nfunc (d *nicOVN) Add() error {\n\treturn nil\n}\n\n\/\/ Start is run when the device is added to a running instance or instance is starting up.\nfunc (d *nicOVN) Start() (*deviceConfig.RunConfig, error) {\n\terr := d.validateEnvironment()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert := revert.New()\n\tdefer revert.Fail()\n\n\tsaveData := make(map[string]string)\n\tsaveData[\"host_name\"] = d.config[\"host_name\"]\n\n\tvar peerName string\n\n\t\/\/ Create veth pair and configure the peer end with custom hwaddr and mtu if supplied.\n\tif d.inst.Type() == instancetype.Container {\n\t\tif saveData[\"host_name\"] == \"\" {\n\t\t\tsaveData[\"host_name\"] = network.RandomDevName(\"veth\")\n\t\t}\n\t\tpeerName, err = networkCreateVethPair(saveData[\"host_name\"], d.config)\n\t} else if d.inst.Type() == instancetype.VM {\n\t\tif saveData[\"host_name\"] == \"\" {\n\t\t\tsaveData[\"host_name\"] = network.RandomDevName(\"tap\")\n\t\t}\n\t\tpeerName = saveData[\"host_name\"] \/\/ VMs use the host_name to link to the TAP FD.\n\t\terr = networkCreateTap(saveData[\"host_name\"], d.config)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert.Add(func() { NetworkRemoveInterface(saveData[\"host_name\"]) })\n\n\t\/\/ Populate device config with volatile fields if needed.\n\tnetworkVethFillFromVolatile(d.config, saveData)\n\n\t\/\/ Apply host-side limits.\n\terr = networkSetupHostVethLimits(d.config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Disable IPv6 on host-side veth interface (prevents host-side interface getting link-local address)\n\t\/\/ which isn't needed because the host-side interface is connected to a bridge.\n\terr = util.SysctlSet(fmt.Sprintf(\"net\/ipv6\/conf\/%s\/disable_ipv6\", saveData[\"host_name\"]), \"1\")\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, err\n\t}\n\n\tmac, err := net.ParseMAC(d.config[\"hwaddr\"])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tips := []net.IP{}\n\tfor _, key := range []string{\"ipv4.address\", \"ipv6.address\"} {\n\t\tif d.config[key] != \"\" {\n\t\t\tip := net.ParseIP(d.config[key])\n\t\t\tif ip == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Invalid %s value %q\", key, d.config[key])\n\t\t\t}\n\t\t\tips = append(ips, ip)\n\t\t}\n\t}\n\n\t\/\/ Add new OVN logical switch port for instance.\n\tlogicalPortName, err := network.OVNInstanceDevicePortAdd(d.network, d.inst.ID(), d.inst.Name(), d.name, mac, ips)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert.Add(func() { network.OVNInstanceDevicePortDelete(d.network, d.inst.ID(), d.name) })\n\n\t\/\/ Attach host side veth interface to bridge.\n\tintegrationBridge, err := d.getIntegrationBridgeName()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tovs := openvswitch.NewOVS()\n\terr = ovs.BridgePortAdd(integrationBridge, saveData[\"host_name\"], true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert.Add(func() { ovs.BridgePortDelete(integrationBridge, saveData[\"host_name\"]) })\n\n\t\/\/ Link OVS port to OVN logical port.\n\terr = ovs.InterfaceAssociateOVNSwitchPort(saveData[\"host_name\"], logicalPortName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Attempt to disable router advertisement acceptance.\n\terr = util.SysctlSet(fmt.Sprintf(\"net\/ipv6\/conf\/%s\/accept_ra\", saveData[\"host_name\"]), \"0\")\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Attempt to disable IPv4 forwarding.\n\terr = util.SysctlSet(fmt.Sprintf(\"net\/ipv4\/conf\/%s\/forwarding\", saveData[\"host_name\"]), \"0\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = d.volatileSet(saveData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trunConf := deviceConfig.RunConfig{}\n\trunConf.NetworkInterface = []deviceConfig.RunConfigItem{\n\t\t{Key: \"name\", Value: d.config[\"name\"]},\n\t\t{Key: \"type\", Value: \"phys\"},\n\t\t{Key: \"flags\", Value: \"up\"},\n\t\t{Key: \"link\", Value: peerName},\n\t}\n\n\tif d.inst.Type() == instancetype.VM {\n\t\trunConf.NetworkInterface = append(runConf.NetworkInterface,\n\t\t\t[]deviceConfig.RunConfigItem{\n\t\t\t\t{Key: \"devName\", Value: d.name},\n\t\t\t\t{Key: \"hwaddr\", Value: d.config[\"hwaddr\"]},\n\t\t\t}...)\n\t}\n\n\trevert.Success()\n\treturn &runConf, nil\n}\n\n\/\/ Update applies configuration changes to a started device.\nfunc (d *nicOVN) Update(oldDevices deviceConfig.Devices, isRunning bool) error {\n\toldConfig := oldDevices[d.name]\n\n\tv := d.volatileGet()\n\n\t\/\/ Populate device config with volatile fields if needed.\n\tnetworkVethFillFromVolatile(d.config, v)\n\n\t\/\/ If instance is running, apply host side limits and filters first before rebuilding\n\t\/\/ dnsmasq config below so that existing config can be used as part of the filter removal.\n\tif isRunning {\n\t\terr := d.validateEnvironment()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Apply host-side limits.\n\t\terr = networkSetupHostVethLimits(d.config)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ If an IPv6 address has changed, if the instance is running we should bounce the host-side\n\t\/\/ veth interface to give the instance a chance to detect the change and re-apply for an\n\t\/\/ updated lease with new IP address.\n\tif d.config[\"ipv6.address\"] != oldConfig[\"ipv6.address\"] && d.config[\"host_name\"] != \"\" && shared.PathExists(fmt.Sprintf(\"\/sys\/class\/net\/%s\", d.config[\"host_name\"])) {\n\t\t_, err := shared.RunCommand(\"ip\", \"link\", \"set\", d.config[\"host_name\"], \"down\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = shared.RunCommand(\"ip\", \"link\", \"set\", d.config[\"host_name\"], \"up\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Stop is run when the device is removed from the instance.\nfunc (d *nicOVN) Stop() (*deviceConfig.RunConfig, error) {\n\trunConf := deviceConfig.RunConfig{\n\t\tPostHooks: []func() error{d.postStop},\n\t}\n\n\terr := network.OVNInstanceDevicePortDelete(d.network, d.inst.ID(), d.name)\n\tif err != nil {\n\t\t\/\/ Don't fail here as we still want the postStop hook to run to clean up the local veth pair.\n\t\td.logger.Error(\"Failed to remove OVN device port\", log.Ctx{\"err\": err})\n\t}\n\n\treturn &runConf, nil\n}\n\n\/\/ postStop is run after the device is removed from the instance.\nfunc (d *nicOVN) postStop() error {\n\tdefer d.volatileSet(map[string]string{\n\t\t\"host_name\": \"\",\n\t})\n\n\tv := d.volatileGet()\n\n\tnetworkVethFillFromVolatile(d.config, v)\n\n\tif d.config[\"host_name\"] != \"\" && shared.PathExists(fmt.Sprintf(\"\/sys\/class\/net\/%s\", d.config[\"host_name\"])) {\n\t\tintegrationBridge, err := d.getIntegrationBridgeName()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tovs := openvswitch.NewOVS()\n\n\t\t\/\/ Detach host-side end of veth pair from bridge (required for openvswitch particularly).\n\t\terr = ovs.BridgePortDelete(integrationBridge, d.config[\"host_name\"])\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Failed to detach interface %q from %q\", d.config[\"host_name\"], integrationBridge)\n\t\t}\n\n\t\t\/\/ Removing host-side end of veth pair will delete the peer end too.\n\t\terr = NetworkRemoveInterface(d.config[\"host_name\"])\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Failed to remove interface %q\", d.config[\"host_name\"])\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Remove is run when the device is removed from the instance or the instance is deleted.\nfunc (d *nicOVN) Remove() error {\n\treturn nil\n}\nlxd\/device\/nic\/ovn: Improves error message in Startpackage device\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/lxc\/lxd\/lxd\/cluster\"\n\tdeviceConfig \"github.com\/lxc\/lxd\/lxd\/device\/config\"\n\t\"github.com\/lxc\/lxd\/lxd\/dnsmasq\/dhcpalloc\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\/instancetype\"\n\t\"github.com\/lxc\/lxd\/lxd\/network\"\n\t\"github.com\/lxc\/lxd\/lxd\/network\/openvswitch\"\n\t\"github.com\/lxc\/lxd\/lxd\/project\"\n\t\"github.com\/lxc\/lxd\/lxd\/revert\"\n\t\"github.com\/lxc\/lxd\/lxd\/util\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\tlog \"github.com\/lxc\/lxd\/shared\/log15\"\n)\n\ntype nicOVN struct {\n\tdeviceCommon\n\n\tnetwork network.Network\n}\n\n\/\/ getIntegrationBridgeName returns the OVS integration bridge to use.\nfunc (d *nicOVN) getIntegrationBridgeName() (string, error) {\n\tintegrationBridge, err := cluster.ConfigGetString(d.state.Cluster, \"network.ovn.integration_bridge\")\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"Failed to get OVN integration bridge name\")\n\t}\n\n\treturn integrationBridge, nil\n}\n\n\/\/ validateConfig checks the supplied config for correctness.\nfunc (d *nicOVN) validateConfig(instConf instance.ConfigReader) error {\n\tif !instanceSupported(instConf.Type(), instancetype.Container, instancetype.VM) {\n\t\treturn ErrUnsupportedDevType\n\t}\n\n\trequiredFields := []string{\n\t\t\"network\",\n\t}\n\n\toptionalFields := []string{\n\t\t\"name\",\n\t\t\"hwaddr\",\n\t\t\"host_name\",\n\t\t\"mtu\",\n\t\t\"ipv4.address\",\n\t\t\"ipv6.address\",\n\t\t\"boot.priority\",\n\t}\n\n\t\/\/ The NIC's network may be a non-default project, so lookup project and get network's project name.\n\tnetworkProjectName, _, err := project.NetworkProject(d.state.Cluster, instConf.Project())\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed loading network project name\")\n\t}\n\n\t\/\/ Lookup network settings and apply them to the device's config.\n\tn, err := network.LoadByName(d.state, networkProjectName, d.config[\"network\"])\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Error loading network config for %q\", d.config[\"network\"])\n\t}\n\n\tif n.Status() == api.NetworkStatusPending {\n\t\treturn fmt.Errorf(\"Specified network is not fully created\")\n\t}\n\n\tif n.Type() != \"ovn\" {\n\t\treturn fmt.Errorf(\"Specified network must be of type ovn\")\n\t}\n\n\tbannedKeys := []string{\"mtu\"}\n\tfor _, bannedKey := range bannedKeys {\n\t\tif d.config[bannedKey] != \"\" {\n\t\t\treturn fmt.Errorf(\"Cannot use %q property in conjunction with %q property\", bannedKey, \"network\")\n\t\t}\n\t}\n\n\td.network = n \/\/ Stored loaded instance for use by other functions.\n\tnetConfig := d.network.Config()\n\n\tif d.config[\"ipv4.address\"] != \"\" {\n\t\t\/\/ Check that DHCPv4 is enabled on parent network (needed to use static assigned IPs).\n\t\tif n.DHCPv4Subnet() == nil {\n\t\t\treturn fmt.Errorf(\"Cannot specify %q when DHCP is disabled on network %q\", \"ipv4.address\", d.config[\"network\"])\n\t\t}\n\n\t\t_, subnet, err := net.ParseCIDR(netConfig[\"ipv4.address\"])\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Invalid network ipv4.address\")\n\t\t}\n\n\t\t\/\/ Check the static IP supplied is valid for the linked network. It should be part of the\n\t\t\/\/ network's subnet, but not necessarily part of the dynamic allocation ranges.\n\t\tif !dhcpalloc.DHCPValidIP(subnet, nil, net.ParseIP(d.config[\"ipv4.address\"])) {\n\t\t\treturn fmt.Errorf(\"Device IP address %q not within network %q subnet\", d.config[\"ipv4.address\"], d.config[\"network\"])\n\t\t}\n\t}\n\n\tif d.config[\"ipv6.address\"] != \"\" {\n\t\t\/\/ Check that DHCPv6 is enabled on parent network (needed to use static assigned IPs).\n\t\tif n.DHCPv6Subnet() == nil || !shared.IsTrue(netConfig[\"ipv6.dhcp.stateful\"]) {\n\t\t\treturn fmt.Errorf(\"Cannot specify %q when DHCP or %q are disabled on network %q\", \"ipv6.address\", \"ipv6.dhcp.stateful\", d.config[\"network\"])\n\t\t}\n\n\t\t_, subnet, err := net.ParseCIDR(netConfig[\"ipv6.address\"])\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Invalid network ipv6.address\")\n\t\t}\n\n\t\t\/\/ Check the static IP supplied is valid for the linked network. It should be part of the\n\t\t\/\/ network's subnet, but not necessarily part of the dynamic allocation ranges.\n\t\tif !dhcpalloc.DHCPValidIP(subnet, nil, net.ParseIP(d.config[\"ipv6.address\"])) {\n\t\t\treturn fmt.Errorf(\"Device IP address %q not within network %q subnet\", d.config[\"ipv6.address\"], d.config[\"network\"])\n\t\t}\n\t}\n\n\t\/\/ Apply network level config options to device config before validation.\n\td.config[\"mtu\"] = fmt.Sprintf(\"%s\", netConfig[\"bridge.mtu\"])\n\n\trules := nicValidationRules(requiredFields, optionalFields)\n\n\t\/\/ Now run normal validation.\n\terr = d.config.Validate(rules)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ validateEnvironment checks the runtime environment for correctness.\nfunc (d *nicOVN) validateEnvironment() error {\n\tif d.inst.Type() == instancetype.Container && d.config[\"name\"] == \"\" {\n\t\treturn fmt.Errorf(\"Requires name property to start\")\n\t}\n\n\tintegrationBridge, err := d.getIntegrationBridgeName()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !shared.PathExists(fmt.Sprintf(\"\/sys\/class\/net\/%s\", integrationBridge)) {\n\t\treturn fmt.Errorf(\"OVS integration bridge device %q doesn't exist\", integrationBridge)\n\t}\n\n\treturn nil\n}\n\n\/\/ CanHotPlug returns whether the device can be managed whilst the instance is running, it also\n\/\/ returns a list of fields that can be updated without triggering a device remove & add.\nfunc (d *nicOVN) CanHotPlug() (bool, []string) {\n\treturn true, []string{}\n}\n\n\/\/ Add is run when a device is added to an instance whether or not the instance is running.\nfunc (d *nicOVN) Add() error {\n\treturn nil\n}\n\n\/\/ Start is run when the device is added to a running instance or instance is starting up.\nfunc (d *nicOVN) Start() (*deviceConfig.RunConfig, error) {\n\terr := d.validateEnvironment()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert := revert.New()\n\tdefer revert.Fail()\n\n\tsaveData := make(map[string]string)\n\tsaveData[\"host_name\"] = d.config[\"host_name\"]\n\n\tvar peerName string\n\n\t\/\/ Create veth pair and configure the peer end with custom hwaddr and mtu if supplied.\n\tif d.inst.Type() == instancetype.Container {\n\t\tif saveData[\"host_name\"] == \"\" {\n\t\t\tsaveData[\"host_name\"] = network.RandomDevName(\"veth\")\n\t\t}\n\t\tpeerName, err = networkCreateVethPair(saveData[\"host_name\"], d.config)\n\t} else if d.inst.Type() == instancetype.VM {\n\t\tif saveData[\"host_name\"] == \"\" {\n\t\t\tsaveData[\"host_name\"] = network.RandomDevName(\"tap\")\n\t\t}\n\t\tpeerName = saveData[\"host_name\"] \/\/ VMs use the host_name to link to the TAP FD.\n\t\terr = networkCreateTap(saveData[\"host_name\"], d.config)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert.Add(func() { NetworkRemoveInterface(saveData[\"host_name\"]) })\n\n\t\/\/ Populate device config with volatile fields if needed.\n\tnetworkVethFillFromVolatile(d.config, saveData)\n\n\t\/\/ Apply host-side limits.\n\terr = networkSetupHostVethLimits(d.config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Disable IPv6 on host-side veth interface (prevents host-side interface getting link-local address)\n\t\/\/ which isn't needed because the host-side interface is connected to a bridge.\n\terr = util.SysctlSet(fmt.Sprintf(\"net\/ipv6\/conf\/%s\/disable_ipv6\", saveData[\"host_name\"]), \"1\")\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, err\n\t}\n\n\tmac, err := net.ParseMAC(d.config[\"hwaddr\"])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tips := []net.IP{}\n\tfor _, key := range []string{\"ipv4.address\", \"ipv6.address\"} {\n\t\tif d.config[key] != \"\" {\n\t\t\tip := net.ParseIP(d.config[key])\n\t\t\tif ip == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Invalid %s value %q\", key, d.config[key])\n\t\t\t}\n\t\t\tips = append(ips, ip)\n\t\t}\n\t}\n\n\t\/\/ Add new OVN logical switch port for instance.\n\tlogicalPortName, err := network.OVNInstanceDevicePortAdd(d.network, d.inst.ID(), d.inst.Name(), d.name, mac, ips)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Failed adding OVN port\")\n\t}\n\n\trevert.Add(func() { network.OVNInstanceDevicePortDelete(d.network, d.inst.ID(), d.name) })\n\n\t\/\/ Attach host side veth interface to bridge.\n\tintegrationBridge, err := d.getIntegrationBridgeName()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tovs := openvswitch.NewOVS()\n\terr = ovs.BridgePortAdd(integrationBridge, saveData[\"host_name\"], true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevert.Add(func() { ovs.BridgePortDelete(integrationBridge, saveData[\"host_name\"]) })\n\n\t\/\/ Link OVS port to OVN logical port.\n\terr = ovs.InterfaceAssociateOVNSwitchPort(saveData[\"host_name\"], logicalPortName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Attempt to disable router advertisement acceptance.\n\terr = util.SysctlSet(fmt.Sprintf(\"net\/ipv6\/conf\/%s\/accept_ra\", saveData[\"host_name\"]), \"0\")\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Attempt to disable IPv4 forwarding.\n\terr = util.SysctlSet(fmt.Sprintf(\"net\/ipv4\/conf\/%s\/forwarding\", saveData[\"host_name\"]), \"0\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = d.volatileSet(saveData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trunConf := deviceConfig.RunConfig{}\n\trunConf.NetworkInterface = []deviceConfig.RunConfigItem{\n\t\t{Key: \"name\", Value: d.config[\"name\"]},\n\t\t{Key: \"type\", Value: \"phys\"},\n\t\t{Key: \"flags\", Value: \"up\"},\n\t\t{Key: \"link\", Value: peerName},\n\t}\n\n\tif d.inst.Type() == instancetype.VM {\n\t\trunConf.NetworkInterface = append(runConf.NetworkInterface,\n\t\t\t[]deviceConfig.RunConfigItem{\n\t\t\t\t{Key: \"devName\", Value: d.name},\n\t\t\t\t{Key: \"hwaddr\", Value: d.config[\"hwaddr\"]},\n\t\t\t}...)\n\t}\n\n\trevert.Success()\n\treturn &runConf, nil\n}\n\n\/\/ Update applies configuration changes to a started device.\nfunc (d *nicOVN) Update(oldDevices deviceConfig.Devices, isRunning bool) error {\n\toldConfig := oldDevices[d.name]\n\n\tv := d.volatileGet()\n\n\t\/\/ Populate device config with volatile fields if needed.\n\tnetworkVethFillFromVolatile(d.config, v)\n\n\t\/\/ If instance is running, apply host side limits and filters first before rebuilding\n\t\/\/ dnsmasq config below so that existing config can be used as part of the filter removal.\n\tif isRunning {\n\t\terr := d.validateEnvironment()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Apply host-side limits.\n\t\terr = networkSetupHostVethLimits(d.config)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ If an IPv6 address has changed, if the instance is running we should bounce the host-side\n\t\/\/ veth interface to give the instance a chance to detect the change and re-apply for an\n\t\/\/ updated lease with new IP address.\n\tif d.config[\"ipv6.address\"] != oldConfig[\"ipv6.address\"] && d.config[\"host_name\"] != \"\" && shared.PathExists(fmt.Sprintf(\"\/sys\/class\/net\/%s\", d.config[\"host_name\"])) {\n\t\t_, err := shared.RunCommand(\"ip\", \"link\", \"set\", d.config[\"host_name\"], \"down\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = shared.RunCommand(\"ip\", \"link\", \"set\", d.config[\"host_name\"], \"up\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Stop is run when the device is removed from the instance.\nfunc (d *nicOVN) Stop() (*deviceConfig.RunConfig, error) {\n\trunConf := deviceConfig.RunConfig{\n\t\tPostHooks: []func() error{d.postStop},\n\t}\n\n\terr := network.OVNInstanceDevicePortDelete(d.network, d.inst.ID(), d.name)\n\tif err != nil {\n\t\t\/\/ Don't fail here as we still want the postStop hook to run to clean up the local veth pair.\n\t\td.logger.Error(\"Failed to remove OVN device port\", log.Ctx{\"err\": err})\n\t}\n\n\treturn &runConf, nil\n}\n\n\/\/ postStop is run after the device is removed from the instance.\nfunc (d *nicOVN) postStop() error {\n\tdefer d.volatileSet(map[string]string{\n\t\t\"host_name\": \"\",\n\t})\n\n\tv := d.volatileGet()\n\n\tnetworkVethFillFromVolatile(d.config, v)\n\n\tif d.config[\"host_name\"] != \"\" && shared.PathExists(fmt.Sprintf(\"\/sys\/class\/net\/%s\", d.config[\"host_name\"])) {\n\t\tintegrationBridge, err := d.getIntegrationBridgeName()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tovs := openvswitch.NewOVS()\n\n\t\t\/\/ Detach host-side end of veth pair from bridge (required for openvswitch particularly).\n\t\terr = ovs.BridgePortDelete(integrationBridge, d.config[\"host_name\"])\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Failed to detach interface %q from %q\", d.config[\"host_name\"], integrationBridge)\n\t\t}\n\n\t\t\/\/ Removing host-side end of veth pair will delete the peer end too.\n\t\terr = NetworkRemoveInterface(d.config[\"host_name\"])\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Failed to remove interface %q\", d.config[\"host_name\"])\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Remove is run when the device is removed from the instance or the instance is deleted.\nfunc (d *nicOVN) Remove() error {\n\treturn nil\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n)\n\ntype storageShared struct {\n\tsType storageType\n\tsTypeName string\n\tsTypeVersion string\n\n\td *Daemon\n\n\tpoolID int64\n\tpool *api.StoragePool\n\n\tvolume *api.StorageVolume\n}\n\nfunc (s *storageShared) GetStorageType() storageType {\n\treturn s.sType\n}\n\nfunc (s *storageShared) GetStorageTypeName() string {\n\treturn s.sTypeName\n}\n\nfunc (s *storageShared) GetStorageTypeVersion() string {\n\treturn s.sTypeVersion\n}\n\nfunc (s *storageShared) shiftRootfs(c container) error {\n\tdpath := c.Path()\n\trpath := c.RootfsPath()\n\n\tshared.LogDebugf(\"Shifting root filesystem \\\"%s\\\" for \\\"%s\\\".\", rpath, c.Name())\n\n\tidmapset := c.IdmapSet()\n\n\tif idmapset == nil {\n\t\treturn fmt.Errorf(\"IdmapSet of container '%s' is nil\", c.Name())\n\t}\n\n\terr := idmapset.ShiftRootfs(rpath)\n\tif err != nil {\n\t\tshared.LogDebugf(\"Shift of rootfs %s failed: %s\", rpath, err)\n\t\treturn err\n\t}\n\n\t\/* Set an acl so the container root can descend the container dir *\/\n\t\/\/ TODO: i changed this so it calls s.setUnprivUserAcl, which does\n\t\/\/ the acl change only if the container is not privileged, think thats right.\n\treturn s.setUnprivUserAcl(c, dpath)\n}\n\nfunc (s *storageShared) setUnprivUserAcl(c container, destPath string) error {\n\tidmapset := c.IdmapSet()\n\n\t\/\/ Skip for privileged containers\n\tif idmapset == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Make sure the map is valid. Skip if container uid 0 == host uid 0\n\tuid, _ := idmapset.ShiftIntoNs(0, 0)\n\tswitch uid {\n\tcase -1:\n\t\treturn fmt.Errorf(\"Container doesn't have a uid 0 in its map\")\n\tcase 0:\n\t\treturn nil\n\t}\n\n\t\/\/ Attempt to set a POSIX ACL first.\n\tacl := fmt.Sprintf(\"%d:rx\", uid)\n\terr := exec.Command(\"setfacl\", \"-m\", acl, destPath).Run()\n\tif err == nil {\n\t\tshared.LogDebugf(\"Failed to set acl permission on container path: %s\", err)\n\t\treturn nil\n\t}\n\n\t\/\/ Fallback to chmod if the fs doesn't support it.\n\terr = exec.Command(\"chmod\", \"+x\", destPath).Run()\n\tif err != nil {\n\t\tshared.LogDebugf(\"Failed to set executable bit on the container path: %s\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *storageShared) createImageDbPoolVolume(fingerprint string) error {\n\t\/\/ Fill in any default volume config.\n\tvolumeConfig := map[string]string{}\n\terr := storageVolumeFillDefault(s.pool.Name, volumeConfig, s.pool)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Create a db entry for the storage volume of the image.\n\t_, err = dbStoragePoolVolumeCreate(s.d.db, fingerprint, storagePoolVolumeTypeImage, s.poolID, volumeConfig)\n\tif err != nil {\n\t\t\/\/ Try to delete the db entry on error.\n\t\ts.deleteImageDbPoolVolume(fingerprint)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *storageShared) deleteImageDbPoolVolume(fingerprint string) error {\n\terr := dbStoragePoolVolumeDelete(s.d.db, fingerprint, storagePoolVolumeTypeImage, s.poolID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\nRemove wrong error messagepackage main\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n)\n\ntype storageShared struct {\n\tsType storageType\n\tsTypeName string\n\tsTypeVersion string\n\n\td *Daemon\n\n\tpoolID int64\n\tpool *api.StoragePool\n\n\tvolume *api.StorageVolume\n}\n\nfunc (s *storageShared) GetStorageType() storageType {\n\treturn s.sType\n}\n\nfunc (s *storageShared) GetStorageTypeName() string {\n\treturn s.sTypeName\n}\n\nfunc (s *storageShared) GetStorageTypeVersion() string {\n\treturn s.sTypeVersion\n}\n\nfunc (s *storageShared) shiftRootfs(c container) error {\n\tdpath := c.Path()\n\trpath := c.RootfsPath()\n\n\tshared.LogDebugf(\"Shifting root filesystem \\\"%s\\\" for \\\"%s\\\".\", rpath, c.Name())\n\n\tidmapset := c.IdmapSet()\n\n\tif idmapset == nil {\n\t\treturn fmt.Errorf(\"IdmapSet of container '%s' is nil\", c.Name())\n\t}\n\n\terr := idmapset.ShiftRootfs(rpath)\n\tif err != nil {\n\t\tshared.LogDebugf(\"Shift of rootfs %s failed: %s\", rpath, err)\n\t\treturn err\n\t}\n\n\t\/* Set an acl so the container root can descend the container dir *\/\n\t\/\/ TODO: i changed this so it calls s.setUnprivUserAcl, which does\n\t\/\/ the acl change only if the container is not privileged, think thats right.\n\treturn s.setUnprivUserAcl(c, dpath)\n}\n\nfunc (s *storageShared) setUnprivUserAcl(c container, destPath string) error {\n\tidmapset := c.IdmapSet()\n\n\t\/\/ Skip for privileged containers\n\tif idmapset == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Make sure the map is valid. Skip if container uid 0 == host uid 0\n\tuid, _ := idmapset.ShiftIntoNs(0, 0)\n\tswitch uid {\n\tcase -1:\n\t\treturn fmt.Errorf(\"Container doesn't have a uid 0 in its map\")\n\tcase 0:\n\t\treturn nil\n\t}\n\n\t\/\/ Attempt to set a POSIX ACL first.\n\tacl := fmt.Sprintf(\"%d:rx\", uid)\n\terr := exec.Command(\"setfacl\", \"-m\", acl, destPath).Run()\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Fallback to chmod if the fs doesn't support it.\n\terr = exec.Command(\"chmod\", \"+x\", destPath).Run()\n\tif err != nil {\n\t\tshared.LogDebugf(\"Failed to set executable bit on the container path: %s\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *storageShared) createImageDbPoolVolume(fingerprint string) error {\n\t\/\/ Fill in any default volume config.\n\tvolumeConfig := map[string]string{}\n\terr := storageVolumeFillDefault(s.pool.Name, volumeConfig, s.pool)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Create a db entry for the storage volume of the image.\n\t_, err = dbStoragePoolVolumeCreate(s.d.db, fingerprint, storagePoolVolumeTypeImage, s.poolID, volumeConfig)\n\tif err != nil {\n\t\t\/\/ Try to delete the db entry on error.\n\t\ts.deleteImageDbPoolVolume(fingerprint)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *storageShared) deleteImageDbPoolVolume(fingerprint string) error {\n\terr := dbStoragePoolVolumeDelete(s.d.db, fingerprint, storagePoolVolumeTypeImage, s.poolID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"package views\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\n\t\"github.com\/cswank\/kcli\/internal\/colors\"\n\t\"github.com\/cswank\/kcli\/internal\/kafka\"\n)\n\n\/\/feeder feeds the screen the data that it craves\ntype feeder interface {\n\tprint()\n\tgetRows() ([]string, error)\n\tpage(page int) error\n\theader() string\n\tenter(row int) (feeder, error)\n\tjump(i int64) error\n\tsearch(s string, cb func(int64, int64)) (int64, error)\n\trow() int\n}\n\ntype root struct {\n\tcli *kafka.Client\n\twidth int\n\theight int\n\ttopics []string\n\tenteredAt int\n\tpg int\n\tflashMessage chan<- string\n}\n\nfunc newRoot(cli *kafka.Client, width, height int, flashMessage chan<- string) (*root, error) {\n\ttopics, err := cli.GetTopics()\n\tif len(topics) == 0 {\n\t\treturn nil, fmt.Errorf(\"no topics found in kafka\")\n\t}\n\n\tsort.Strings(topics)\n\treturn &root{\n\t\tcli: cli,\n\t\twidth: width,\n\t\theight: height,\n\t\ttopics: topics,\n\t\tflashMessage: flashMessage,\n\t}, err\n}\n\nfunc (r *root) print() {\n\tfmt.Println(r.header())\n\tfor _, t := range r.topics {\n\t\tfmt.Println(t)\n\t}\n}\n\nfunc (r *root) page(pg int) error {\n\tif (r.pg == 0 && pg < 0) || (r.pg+pg)*r.height > len(r.topics) {\n\t\treturn nil\n\t}\n\tr.pg += pg\n\treturn nil\n}\n\nfunc (r *root) getRows() ([]string, error) {\n\tstart := r.pg * r.height\n\tend := r.pg*r.height + r.height\n\tif end >= len(r.topics) {\n\t\tend = len(r.topics)\n\t}\n\treturn r.topics[start:end], nil\n}\n\nfunc (r *root) enter(row int) (feeder, error) {\n\tif row >= len(r.topics) {\n\t\tr.flashMessage <- \"nothing to see here\"\n\t\treturn nil, errNoData\n\t}\n\tr.enteredAt = row\n\treturn newTopic(r.cli, r.topics[row], r.width, r.height, r.flashMessage)\n}\n\nfunc (r *root) jump(_ int64) error { return nil }\nfunc (r *root) search(_ string, _ func(int64, int64)) (int64, error) { return -1, nil }\n\nfunc (r *root) row() int { return r.enteredAt }\n\nfunc (r *root) header() string {\n\treturn \"topics\"\n}\n\ntype topic struct {\n\tcli *kafka.Client\n\theight int\n\twidth int\n\toffset int\n\n\ttopic string\n\tpartitions []kafka.Partition\n\tfmt string\n\tenteredAt int\n\tflashMessage chan<- string\n}\n\nfunc newTopic(cli *kafka.Client, t string, width, height int, flashMessage chan<- string) (feeder, error) {\n\tpartitions, err := cli.GetTopic(t)\n\treturn &topic{\n\t\tcli: cli,\n\t\twidth: width,\n\t\theight: height,\n\t\ttopic: t,\n\t\tpartitions: partitions,\n\t\tfmt: c2(\"%-13d %-22d %-22d %-22d %d\"),\n\t\tflashMessage: flashMessage,\n\t}, err\n}\n\nfunc (t *topic) search(s string, cb func(int64, int64)) (int64, error) {\n\tresults, err := t.cli.SearchTopic(t.partitions, s, false, cb)\n\tif err != nil || len(results) == 0 {\n\t\treturn -1, err\n\t}\n\tt.partitions = results\n\n\treturn int64(len(results)), nil\n}\n\nfunc (t *topic) jump(i int64) error {\n\tif int(i) >= len(t.partitions) || int(i) < 0 {\n\t\tt.flashMessage <- \"nothing to see here\"\n\t\treturn nil\n\t}\n\tt.offset = int(i)\n\treturn nil\n}\n\nfunc (t *topic) row() int { return t.enteredAt }\n\nfunc (t *topic) header() string {\n\treturn \"partition 1st offset current offset last offset size\"\n}\n\nfunc (t *topic) setOffset(n int64) error {\n\tfor i, part := range t.partitions {\n\t\tif n > 0 {\n\t\t\tend := part.Offset + n\n\t\t\tif end >= part.End {\n\t\t\t\tend = part.End - 1\n\t\t\t\tif end < 0 {\n\t\t\t\t\tend = 0\n\t\t\t\t}\n\t\t\t}\n\t\t\tpart.Offset = end\n\t\t} else {\n\t\t\tend := part.End + n\n\t\t\tif end <= part.Start {\n\t\t\t\tend = part.Start\n\t\t\t}\n\t\t\tpart.Offset = end\n\t\t}\n\t\tt.partitions[i] = part\n\t}\n\treturn nil\n}\n\nfunc (t *topic) page(pg int) error {\n\toffset := t.offset + (t.height * pg)\n\tif offset > len(t.partitions) {\n\t\treturn nil\n\t}\n\tif offset < 0 {\n\t\toffset = 0\n\t}\n\tt.offset = offset\n\treturn nil\n}\n\nfunc (t *topic) getRows() ([]string, error) {\n\tend := t.offset + t.height\n\tif end >= len(t.partitions) {\n\t\tend = len(t.partitions)\n\t}\n\n\tchunk := t.partitions[t.offset:end]\n\tout := make([]string, len(chunk))\n\tfor i, p := range chunk {\n\t\tout[i] = fmt.Sprintf(t.fmt, p.Partition, p.Start, p.Offset, p.End, p.End-p.Start)\n\t}\n\n\treturn out, nil\n}\n\nfunc (t *topic) enter(row int) (feeder, error) {\n\tt.enteredAt = row\n\trow = t.offset + row\n\tif row >= len(t.partitions) {\n\t\tgo func() { t.flashMessage <- \"nothing to see here\" }()\n\t\treturn nil, errNoData\n\t}\n\tp := t.partitions[row]\n\tif p.End-p.Start == 0 {\n\t\tgo func() { t.flashMessage <- \"nothing to see here\" }()\n\t\treturn nil, errNoData\n\t}\n\treturn newPartition(t.cli, p, t.width, t.height, t.flashMessage)\n}\n\nfunc (t *topic) print() {\n\tfmt.Println(t.header())\n\tf := t.fmt + \"\\n\"\n\tfor _, p := range t.partitions {\n\t\tfmt.Printf(f, p.Partition, p.Start, p.Offset, p.End, p.End-p.Start)\n\t}\n}\n\ntype partition struct {\n\tcli *kafka.Client\n\theight int\n\twidth int\n\tpartition kafka.Partition\n\trows []kafka.Message\n\tenteredAt int\n\tfmt string\n\tpg int\n\tflashMessage chan<- string\n}\n\nfunc newPartition(cli *kafka.Client, p kafka.Partition, width, height int, flashMessage chan<- string) (feeder, error) {\n\trows, err := cli.GetPartition(p, height, func(_ []byte) bool { return true })\n\treturn &partition{\n\t\tcli: cli,\n\t\twidth: width,\n\t\theight: height,\n\t\tpartition: p,\n\t\trows: rows,\n\t\tfmt: \"%-12d %s\",\n\t\tflashMessage: flashMessage,\n\t}, err\n}\n\nfunc (p *partition) search(s string, cb func(int64, int64)) (int64, error) {\n\ti, err := p.cli.Search(p.partition, s, cb)\n\tif err != nil || i == -1 {\n\t\treturn i, err\n\t}\n\n\treturn i, p.jump(i)\n}\n\nfunc (p *partition) jump(i int64) error {\n\tif i >= p.partition.End {\n\t\treturn nil\n\t}\n\n\tp.pg = int(i) \/ p.height\n\tp.partition.Offset = i\n\trows, err := p.cli.GetPartition(p.partition, p.height, func(_ []byte) bool { return true })\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.rows = rows\n\treturn nil\n}\n\nfunc (p *partition) row() int { return p.enteredAt }\n\nfunc (p *partition) header() string {\n\treturn fmt.Sprintf(\n\t\t\"offset message topic: %s partition: %d start: %d end: %d\",\n\t\tp.partition.Topic,\n\t\tp.partition.Partition,\n\t\tp.partition.Start,\n\t\tp.partition.End,\n\t)\n}\n\nfunc (p *partition) getRows() ([]string, error) {\n\tout := make([]string, len(p.rows))\n\tfor i, msg := range p.rows {\n\t\tend := p.width\n\t\tif len(msg.Value) < end {\n\t\t\tend = len(msg.Value)\n\t\t}\n\t\tout[i] = fmt.Sprintf(p.fmt, p.partition.Offset+int64(i), string(msg.Value[:end]))\n\t}\n\n\treturn out, nil\n}\n\nfunc (p *partition) page(pg int) error {\n\tif p.pg == 0 && pg < 0 && p.partition.Offset == p.partition.Start {\n\t\treturn nil\n\t} else if p.pg == 0 && pg < 0 && p.partition.Offset > p.partition.Start {\n\t\tpg = 0\n\t}\n\n\to := int64((p.pg+pg)*p.height) + p.partition.Start\n\tif o >= p.partition.End {\n\t\treturn nil\n\t}\n\tp.pg += pg\n\tp.partition.Offset = o\n\trows, err := p.cli.GetPartition(p.partition, p.height, func(_ []byte) bool { return true })\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.rows = rows\n\treturn nil\n}\n\nfunc (p *partition) enter(row int) (feeder, error) {\n\tif row >= len(p.rows) {\n\t\tgo func() { p.flashMessage <- \"nothing to see here\" }()\n\t\treturn nil, errNoData\n\t}\n\tp.enteredAt = row\n\treturn newMessage(p.rows[row], p.width, p.height, p.flashMessage)\n}\n\nfunc (p *partition) print() {\n\tp.cli.Fetch(p.partition, p.partition.End, func(s string) {\n\t\tfmt.Println(s)\n\t})\n}\n\ntype message struct {\n\theight int\n\twidth int\n\tmsg kafka.Message\n\tenteredAt int\n\tbody []string\n\tpg int\n\tflashMessage chan<- string\n}\n\nfunc newMessage(msg kafka.Message, width, height int, flashMessage chan<- string) (feeder, error) {\n\tbuf, err := prettyMessage(msg.Value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar body []string\n\tscanner := bufio.NewScanner(buf)\n\tfor scanner.Scan() {\n\t\tbody = append(body, scanner.Text())\n\t}\n\n\treturn &message{\n\t\twidth: width,\n\t\theight: height,\n\t\tmsg: msg,\n\t\tbody: body,\n\t\tflashMessage: flashMessage,\n\t}, nil\n}\n\nfunc (m *message) print() {\n\tfor _, r := range m.body {\n\t\tfmt.Println(r)\n\t}\n}\n\nfunc (m *message) search(_ string, _ func(int64, int64)) (int64, error) { return -1, nil }\n\nfunc (m *message) jump(_ int64) error { return nil }\n\nfunc (m *message) row() int { return m.enteredAt }\n\nfunc (m *message) header() string {\n\treturn fmt.Sprintf(\n\t\t\"topic: %s partition: %d offset: %d\",\n\t\tm.msg.Partition.Topic,\n\t\tm.msg.Partition.Partition,\n\t\tm.msg.Offset,\n\t)\n}\n\nfunc (m *message) page(pg int) error {\n\tif m.pg == 0 && pg < 0 {\n\t\treturn nil\n\t}\n\tif (pg+m.pg)*m.height > len(m.body) {\n\t\treturn nil\n\t}\n\tm.pg += pg\n\treturn nil\n}\n\nfunc (m *message) enter(row int) (feeder, error) {\n\tm.enteredAt = row\n\treturn nil, errNoData\n}\n\nfunc (m *message) getRows() ([]string, error) {\n\tstart := m.pg * m.height\n\tend := start + m.height\n\tif end >= len(m.body) {\n\t\tend = len(m.body)\n\t}\n\treturn m.body[start:end], nil\n}\n\nfunc prettyMessage(val []byte) (io.Reader, error) {\n\tvar i interface{}\n\tif err := json.Unmarshal(val, &i); err != nil {\n\t\t\/\/not json, so return original data\n\t\treturn bytes.NewBuffer(val), nil\n\t}\n\n\td, err := colors.Marshal(i)\n\tbuf := bytes.NewBuffer(d)\n\treturn buf, err\n}\nAdded search and jump to multi-line messagespackage views\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/cswank\/kcli\/internal\/colors\"\n\t\"github.com\/cswank\/kcli\/internal\/kafka\"\n)\n\n\/\/feeder feeds the screen the data that it craves\ntype feeder interface {\n\tprint()\n\tgetRows() ([]string, error)\n\tpage(page int) error\n\theader() string\n\tenter(row int) (feeder, error)\n\tjump(i int64) error\n\tsearch(s string, cb func(int64, int64)) (int64, error)\n\trow() int\n}\n\ntype root struct {\n\tcli *kafka.Client\n\twidth int\n\theight int\n\ttopics []string\n\tenteredAt int\n\tpg int\n\tflashMessage chan<- string\n}\n\nfunc newRoot(cli *kafka.Client, width, height int, flashMessage chan<- string) (*root, error) {\n\ttopics, err := cli.GetTopics()\n\tif len(topics) == 0 {\n\t\treturn nil, fmt.Errorf(\"no topics found in kafka\")\n\t}\n\n\tsort.Strings(topics)\n\treturn &root{\n\t\tcli: cli,\n\t\twidth: width,\n\t\theight: height,\n\t\ttopics: topics,\n\t\tflashMessage: flashMessage,\n\t}, err\n}\n\nfunc (r *root) print() {\n\tfmt.Println(r.header())\n\tfor _, t := range r.topics {\n\t\tfmt.Println(t)\n\t}\n}\n\nfunc (r *root) page(pg int) error {\n\tif (r.pg == 0 && pg < 0) || (r.pg+pg)*r.height > len(r.topics) {\n\t\treturn nil\n\t}\n\tr.pg += pg\n\treturn nil\n}\n\nfunc (r *root) getRows() ([]string, error) {\n\tstart := r.pg * r.height\n\tend := r.pg*r.height + r.height\n\tif end >= len(r.topics) {\n\t\tend = len(r.topics)\n\t}\n\treturn r.topics[start:end], nil\n}\n\nfunc (r *root) enter(row int) (feeder, error) {\n\tif row >= len(r.topics) {\n\t\tr.flashMessage <- \"nothing to see here\"\n\t\treturn nil, errNoData\n\t}\n\tr.enteredAt = row\n\treturn newTopic(r.cli, r.topics[row], r.width, r.height, r.flashMessage)\n}\n\nfunc (r *root) jump(_ int64) error { return nil }\nfunc (r *root) search(_ string, _ func(int64, int64)) (int64, error) { return -1, nil }\n\nfunc (r *root) row() int { return r.enteredAt }\n\nfunc (r *root) header() string {\n\treturn \"topics\"\n}\n\ntype topic struct {\n\tcli *kafka.Client\n\theight int\n\twidth int\n\toffset int\n\n\ttopic string\n\tpartitions []kafka.Partition\n\tfmt string\n\tenteredAt int\n\tflashMessage chan<- string\n}\n\nfunc newTopic(cli *kafka.Client, t string, width, height int, flashMessage chan<- string) (feeder, error) {\n\tpartitions, err := cli.GetTopic(t)\n\treturn &topic{\n\t\tcli: cli,\n\t\twidth: width,\n\t\theight: height,\n\t\ttopic: t,\n\t\tpartitions: partitions,\n\t\tfmt: c2(\"%-13d %-22d %-22d %-22d %d\"),\n\t\tflashMessage: flashMessage,\n\t}, err\n}\n\nfunc (t *topic) search(s string, cb func(int64, int64)) (int64, error) {\n\tresults, err := t.cli.SearchTopic(t.partitions, s, false, cb)\n\tif err != nil || len(results) == 0 {\n\t\treturn -1, err\n\t}\n\tt.partitions = results\n\n\treturn int64(len(results)), nil\n}\n\nfunc (t *topic) jump(i int64) error {\n\tif int(i) >= len(t.partitions) || int(i) < 0 {\n\t\tt.flashMessage <- \"nothing to see here\"\n\t\treturn nil\n\t}\n\tt.offset = int(i)\n\treturn nil\n}\n\nfunc (t *topic) row() int { return t.enteredAt }\n\nfunc (t *topic) header() string {\n\treturn \"partition 1st offset current offset last offset size\"\n}\n\nfunc (t *topic) setOffset(n int64) error {\n\tfor i, part := range t.partitions {\n\t\tif n > 0 {\n\t\t\tend := part.Offset + n\n\t\t\tif end >= part.End {\n\t\t\t\tend = part.End - 1\n\t\t\t\tif end < 0 {\n\t\t\t\t\tend = 0\n\t\t\t\t}\n\t\t\t}\n\t\t\tpart.Offset = end\n\t\t} else {\n\t\t\tend := part.End + n\n\t\t\tif end <= part.Start {\n\t\t\t\tend = part.Start\n\t\t\t}\n\t\t\tpart.Offset = end\n\t\t}\n\t\tt.partitions[i] = part\n\t}\n\treturn nil\n}\n\nfunc (t *topic) page(pg int) error {\n\toffset := t.offset + (t.height * pg)\n\tif offset > len(t.partitions) {\n\t\treturn nil\n\t}\n\tif offset < 0 {\n\t\toffset = 0\n\t}\n\tt.offset = offset\n\treturn nil\n}\n\nfunc (t *topic) getRows() ([]string, error) {\n\tend := t.offset + t.height\n\tif end >= len(t.partitions) {\n\t\tend = len(t.partitions)\n\t}\n\n\tchunk := t.partitions[t.offset:end]\n\tout := make([]string, len(chunk))\n\tfor i, p := range chunk {\n\t\tout[i] = fmt.Sprintf(t.fmt, p.Partition, p.Start, p.Offset, p.End, p.End-p.Start)\n\t}\n\n\treturn out, nil\n}\n\nfunc (t *topic) enter(row int) (feeder, error) {\n\tt.enteredAt = row\n\trow = t.offset + row\n\tif row >= len(t.partitions) {\n\t\tgo func() { t.flashMessage <- \"nothing to see here\" }()\n\t\treturn nil, errNoData\n\t}\n\tp := t.partitions[row]\n\tif p.End-p.Start == 0 {\n\t\tgo func() { t.flashMessage <- \"nothing to see here\" }()\n\t\treturn nil, errNoData\n\t}\n\treturn newPartition(t.cli, p, t.width, t.height, t.flashMessage)\n}\n\nfunc (t *topic) print() {\n\tfmt.Println(t.header())\n\tf := t.fmt + \"\\n\"\n\tfor _, p := range t.partitions {\n\t\tfmt.Printf(f, p.Partition, p.Start, p.Offset, p.End, p.End-p.Start)\n\t}\n}\n\ntype partition struct {\n\tcli *kafka.Client\n\theight int\n\twidth int\n\tpartition kafka.Partition\n\trows []kafka.Message\n\tenteredAt int\n\tfmt string\n\tpg int\n\tflashMessage chan<- string\n}\n\nfunc newPartition(cli *kafka.Client, p kafka.Partition, width, height int, flashMessage chan<- string) (feeder, error) {\n\trows, err := cli.GetPartition(p, height, func(_ []byte) bool { return true })\n\treturn &partition{\n\t\tcli: cli,\n\t\twidth: width,\n\t\theight: height,\n\t\tpartition: p,\n\t\trows: rows,\n\t\tfmt: \"%-12d %s\",\n\t\tflashMessage: flashMessage,\n\t}, err\n}\n\nfunc (p *partition) search(s string, cb func(int64, int64)) (int64, error) {\n\ti, err := p.cli.Search(p.partition, s, cb)\n\tif err != nil || i == -1 {\n\t\treturn i, err\n\t}\n\n\treturn i, p.jump(i)\n}\n\nfunc (p *partition) jump(i int64) error {\n\tif i >= p.partition.End {\n\t\treturn nil\n\t}\n\n\tp.pg = int(i) \/ p.height\n\tp.partition.Offset = i\n\trows, err := p.cli.GetPartition(p.partition, p.height, func(_ []byte) bool { return true })\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.rows = rows\n\treturn nil\n}\n\nfunc (p *partition) row() int { return p.enteredAt }\n\nfunc (p *partition) header() string {\n\treturn fmt.Sprintf(\n\t\t\"offset message topic: %s partition: %d start: %d end: %d\",\n\t\tp.partition.Topic,\n\t\tp.partition.Partition,\n\t\tp.partition.Start,\n\t\tp.partition.End,\n\t)\n}\n\nfunc (p *partition) getRows() ([]string, error) {\n\tout := make([]string, len(p.rows))\n\tfor i, msg := range p.rows {\n\t\tend := p.width\n\t\tif len(msg.Value) < end {\n\t\t\tend = len(msg.Value)\n\t\t}\n\t\tout[i] = fmt.Sprintf(p.fmt, p.partition.Offset+int64(i), string(msg.Value[:end]))\n\t}\n\n\treturn out, nil\n}\n\nfunc (p *partition) page(pg int) error {\n\tif p.pg == 0 && pg < 0 && p.partition.Offset == p.partition.Start {\n\t\treturn nil\n\t} else if p.pg == 0 && pg < 0 && p.partition.Offset > p.partition.Start {\n\t\tpg = 0\n\t}\n\n\to := int64((p.pg+pg)*p.height) + p.partition.Start\n\tif o >= p.partition.End {\n\t\treturn nil\n\t}\n\tp.pg += pg\n\tp.partition.Offset = o\n\trows, err := p.cli.GetPartition(p.partition, p.height, func(_ []byte) bool { return true })\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.rows = rows\n\treturn nil\n}\n\nfunc (p *partition) enter(row int) (feeder, error) {\n\tif row >= len(p.rows) {\n\t\tgo func() { p.flashMessage <- \"nothing to see here\" }()\n\t\treturn nil, errNoData\n\t}\n\tp.enteredAt = row\n\treturn newMessage(p.rows[row], p.width, p.height, p.flashMessage)\n}\n\nfunc (p *partition) print() {\n\tp.cli.Fetch(p.partition, p.partition.End, func(s string) {\n\t\tfmt.Println(s)\n\t})\n}\n\ntype message struct {\n\theight int\n\twidth int\n\tmsg kafka.Message\n\tenteredAt int\n\tbody []string\n\tpg int\n\toffset int\n\tflashMessage chan<- string\n}\n\nfunc newMessage(msg kafka.Message, width, height int, flashMessage chan<- string) (feeder, error) {\n\tbuf, err := prettyMessage(msg.Value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar body []string\n\tscanner := bufio.NewScanner(buf)\n\tfor scanner.Scan() {\n\t\tbody = append(body, scanner.Text())\n\t}\n\n\treturn &message{\n\t\twidth: width,\n\t\theight: height,\n\t\tmsg: msg,\n\t\tbody: body,\n\t\tflashMessage: flashMessage,\n\t}, nil\n}\n\nfunc (m *message) print() {\n\tfor _, r := range m.body {\n\t\tfmt.Println(r)\n\t}\n}\n\nfunc (m *message) search(s string, cb func(int64, int64)) (int64, error) {\n\tfor i, r := range m.body {\n\t\tj := strings.Index(r, s)\n\t\tif j > -1 {\n\t\t\treturn int64(j), m.jump(int64(i))\n\t\t}\n\t}\n\n\treturn -1, nil\n}\n\nfunc (m *message) jump(i int64) error {\n\tm.pg = int(i) \/ m.height\n\tm.offset = int(i) % m.height\n\treturn nil\n}\n\nfunc (m *message) row() int { return m.enteredAt }\n\nfunc (m *message) header() string {\n\treturn fmt.Sprintf(\n\t\t\"topic: %s partition: %d offset: %d\",\n\t\tm.msg.Partition.Topic,\n\t\tm.msg.Partition.Partition,\n\t\tm.msg.Offset,\n\t)\n}\n\nfunc (m *message) page(pg int) error {\n\tif m.pg == 0 && pg < 0 {\n\t\tm.offset = 0\n\t\treturn nil\n\t}\n\n\tif ((pg+m.pg)*m.height)+m.offset > len(m.body) {\n\t\treturn nil\n\t}\n\tm.pg += pg\n\tif m.pg == 0 {\n\t\tm.offset = 0\n\t}\n\treturn nil\n}\n\nfunc (m *message) enter(row int) (feeder, error) {\n\tm.enteredAt = row\n\treturn nil, errNoData\n}\n\nfunc (m *message) getRows() ([]string, error) {\n\tstart := (m.pg * m.height) + m.offset\n\tend := start + m.height\n\tif end >= len(m.body) {\n\t\tend = len(m.body)\n\t}\n\treturn m.body[start:end], nil\n}\n\nfunc prettyMessage(val []byte) (io.Reader, error) {\n\tvar i interface{}\n\tif err := json.Unmarshal(val, &i); err != nil {\n\t\t\/\/not json, so return original data\n\t\treturn bytes.NewBuffer(val), nil\n\t}\n\n\td, err := colors.Marshal(i)\n\tbuf := bytes.NewBuffer(d)\n\treturn buf, err\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage worker\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\t\"unicode\/utf8\"\n\n\t\"go.opencensus.io\/trace\"\n\t\"golang.org\/x\/mod\/semver\"\n\t\"golang.org\/x\/pkgsite\/internal\"\n\t\"golang.org\/x\/pkgsite\/internal\/derrors\"\n\t\"golang.org\/x\/pkgsite\/internal\/experiment\"\n\t\"golang.org\/x\/pkgsite\/internal\/fetch\"\n\t\"golang.org\/x\/pkgsite\/internal\/log\"\n\t\"golang.org\/x\/pkgsite\/internal\/postgres\"\n\t\"golang.org\/x\/pkgsite\/internal\/proxy\"\n\t\"golang.org\/x\/pkgsite\/internal\/source\"\n\t\"golang.org\/x\/pkgsite\/internal\/stdlib\"\n)\n\n\/\/ ProxyRemoved is a set of module@version that have been removed from the proxy,\n\/\/ even though they are still in the index.\nvar ProxyRemoved = map[string]bool{}\n\n\/\/ fetchTask represents the result of a fetch task that was processed.\ntype fetchTask struct {\n\tfetch.FetchResult\n\ttimings map[string]time.Duration\n}\n\n\/\/ A Fetcher holds state for fetching modules.\ntype Fetcher struct {\n\tProxyClient *proxy.Client\n\tSourceClient *source.Client\n\tDB *postgres.DB\n}\n\n\/\/ FetchAndUpdateState fetches and processes a module version, and then updates\n\/\/ the module_version_states table according to the result. It returns an HTTP\n\/\/ status code representing the result of the fetch operation, and a non-nil\n\/\/ error if this status code is not 200.\nfunc (f *Fetcher) FetchAndUpdateState(ctx context.Context, modulePath, requestedVersion, appVersionLabel string, disableProxyFetch bool) (_ int, resolvedVersion string, err error) {\n\tdefer derrors.Wrap(&err, \"FetchAndUpdateState(%q, %q, %q, %t)\", modulePath, requestedVersion, appVersionLabel, disableProxyFetch)\n\ttctx, span := trace.StartSpan(ctx, \"FetchAndUpdateState\")\n\tctx = experiment.NewContext(tctx, experiment.FromContext(ctx).Active()...)\n\tctx = log.NewContextWithLabel(ctx, \"fetch\", modulePath+\"@\"+requestedVersion)\n\tif !utf8.ValidString(modulePath) {\n\t\tlog.Errorf(ctx, \"module path %q is not valid UTF-8\", modulePath)\n\t}\n\tif !utf8.ValidString(requestedVersion) {\n\t\tlog.Errorf(ctx, \"requested version %q is not valid UTF-8\", requestedVersion)\n\t}\n\tspan.AddAttributes(\n\t\ttrace.StringAttribute(\"modulePath\", modulePath),\n\t\ttrace.StringAttribute(\"version\", requestedVersion))\n\tdefer span.End()\n\n\tft := f.fetchAndInsertModule(ctx, modulePath, requestedVersion, disableProxyFetch)\n\tspan.AddAttributes(trace.Int64Attribute(\"numPackages\", int64(len(ft.PackageVersionStates))))\n\n\t\/\/ If there were any errors processing the module then we didn't insert it.\n\t\/\/ Delete it in case we are reprocessing an existing module.\n\t\/\/ However, don't delete if the error was internal, or we are shedding load.\n\tif ft.Status >= 400 && ft.Status < 500 {\n\t\tif err := deleteModule(ctx, f.DB, ft); err != nil {\n\t\t\tlog.Error(ctx, err)\n\t\t\tft.Error = err\n\t\t\tft.Status = http.StatusInternalServerError\n\t\t}\n\t\t\/\/ Do not return an error here, because we want to insert into\n\t\t\/\/ module_version_states below.\n\t}\n\t\/\/ Regardless of what the status code is, insert the result into\n\t\/\/ version_map, so that a response can be returned for frontend_fetch.\n\tif err := updateVersionMap(ctx, f.DB, ft); err != nil {\n\t\tlog.Error(ctx, err)\n\t\tif ft.Status != http.StatusInternalServerError {\n\t\t\tft.Error = err\n\t\t\tft.Status = http.StatusInternalServerError\n\t\t}\n\t\t\/\/ Do not return an error here, because we want to insert into\n\t\t\/\/ module_version_states below.\n\t}\n\tif !semver.IsValid(ft.ResolvedVersion) {\n\t\t\/\/ If the requestedVersion was not successfully resolved to a semantic\n\t\t\/\/ version, then at this point it will be the same as the\n\t\t\/\/ resolvedVersion. This fetch request does not need to be recorded in\n\t\t\/\/ module_version_states, since that table is only used to track\n\t\t\/\/ modules that have been published to index.golang.org.\n\t\treturn ft.Status, ft.ResolvedVersion, ft.Error\n\t}\n\n\t\/\/ Update the module_version_states table with the new status of\n\t\/\/ module@version. This must happen last, because if it succeeds with a\n\t\/\/ code < 500 but a later action fails, we will never retry the later\n\t\/\/ action.\n\t\/\/ TODO(golang\/go#39628): Split UpsertModuleVersionState into\n\t\/\/ InsertModuleVersionState and UpdateModuleVersionState.\n\tstart := time.Now()\n\terr = f.DB.UpsertModuleVersionState(ctx, ft.ModulePath, ft.ResolvedVersion, appVersionLabel,\n\t\ttime.Time{}, ft.Status, ft.GoModPath, ft.Error, ft.PackageVersionStates)\n\tft.timings[\"db.UpsertModuleVersionState\"] = time.Since(start)\n\tif err != nil {\n\t\tlog.Error(ctx, err)\n\t\tif ft.Error != nil {\n\t\t\tft.Status = http.StatusInternalServerError\n\t\t\tft.Error = fmt.Errorf(\"db.UpsertModuleVersionState: %v, original error: %v\", err, ft.Error)\n\t\t}\n\t\tlogTaskResult(ctx, ft, \"Failed to update module version state\")\n\t\treturn http.StatusInternalServerError, ft.ResolvedVersion, ft.Error\n\t}\n\tlogTaskResult(ctx, ft, \"Updated module version state\")\n\treturn ft.Status, ft.ResolvedVersion, ft.Error\n}\n\n\/\/ fetchAndInsertModule fetches the given module version from the module proxy\n\/\/ or (in the case of the standard library) from the Go repo and writes the\n\/\/ resulting data to the database.\n\/\/\n\/\/ The given parentCtx is used for tracing, but fetches actually execute in a\n\/\/ detached context with fixed timeout, so that fetches are allowed to complete\n\/\/ even for short-lived requests.\nfunc (f *Fetcher) fetchAndInsertModule(ctx context.Context, modulePath, requestedVersion string, disableProxyFetch bool) *fetchTask {\n\tft := &fetchTask{\n\t\tFetchResult: fetch.FetchResult{\n\t\t\tModulePath: modulePath,\n\t\t\tRequestedVersion: requestedVersion,\n\t\t},\n\t\ttimings: map[string]time.Duration{},\n\t}\n\tdefer func() {\n\t\tderrors.Wrap(&ft.Error, \"fetchAndInsertModule(%q, %q)\", modulePath, requestedVersion)\n\t\tif ft.Error != nil {\n\t\t\tft.Status = derrors.ToStatus(ft.Error)\n\t\t\tft.ResolvedVersion = requestedVersion\n\t\t}\n\t}()\n\n\tif ProxyRemoved[modulePath+\"@\"+requestedVersion] {\n\t\tlog.Infof(ctx, \"not fetching %s@%s because it is on the ProxyRemoved list\", modulePath, requestedVersion)\n\t\tft.Error = derrors.Excluded\n\t\treturn ft\n\t}\n\n\texc, err := f.DB.IsExcluded(ctx, modulePath)\n\tif err != nil {\n\t\tft.Error = err\n\t\treturn ft\n\t}\n\tif exc {\n\t\tft.Error = derrors.Excluded\n\t\treturn ft\n\t}\n\n\t\/\/ Fetch the module, and the current @main and @master version of this module.\n\t\/\/ The @main and @master version will be used to update the version_map\n\t\/\/ target if applicable.\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tstart := time.Now()\n\t\tfr := fetch.FetchModule(ctx, modulePath, requestedVersion, f.ProxyClient, f.SourceClient, disableProxyFetch)\n\t\tif fr == nil {\n\t\t\tpanic(\"fetch.FetchModule should never return a nil FetchResult\")\n\t\t}\n\t\tdefer fr.Defer()\n\t\tft.FetchResult = *fr\n\t\tft.timings[\"fetch.FetchModule\"] = time.Since(start)\n\t}()\n\t\/\/ Do not resolve the @main and @master version if disableProxyFetch is on.\n\tvar main string\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tif !disableProxyFetch {\n\t\t\tmain = resolvedVersion(ctx, modulePath, internal.MainVersion, f.ProxyClient)\n\t\t}\n\t}()\n\tvar master string\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tif !disableProxyFetch {\n\t\t\tmaster = resolvedVersion(ctx, modulePath, internal.MasterVersion, f.ProxyClient)\n\t\t}\n\t}()\n\twg.Wait()\n\tft.MainVersion = main\n\tft.MasterVersion = master\n\n\t\/\/ There was an error fetching this module.\n\tif ft.Error != nil {\n\t\tlogf := log.Infof\n\t\tif ft.Status == http.StatusServiceUnavailable {\n\t\t\tlogf = log.Warningf\n\t\t} else if ft.Status >= 500 && ft.Status != derrors.ToStatus(derrors.ProxyTimedOut) {\n\t\t\tlogf = log.Errorf\n\t\t}\n\t\tlogf(ctx, \"Error executing fetch: %v (code %d)\", ft.Error, ft.Status)\n\t\treturn ft\n\t}\n\n\t\/\/ The module was successfully fetched.\n\tlog.Infof(ctx, \"fetch.FetchModule succeeded for %s@%s\", ft.ModulePath, ft.RequestedVersion)\n\tstart := time.Now()\n\terr = f.DB.InsertModule(ctx, ft.Module)\n\tft.timings[\"db.InsertModule\"] = time.Since(start)\n\tif err != nil {\n\t\tlog.Error(ctx, err)\n\n\t\tft.Status = derrors.ToStatus(err)\n\t\tft.Error = err\n\t\treturn ft\n\t}\n\tlog.Infof(ctx, \"db.InsertModule succeeded for %s@%s\", ft.ModulePath, ft.RequestedVersion)\n\treturn ft\n}\n\nfunc resolvedVersion(ctx context.Context, modulePath, requestedVersion string, proxyClient *proxy.Client) string {\n\tif modulePath == stdlib.ModulePath && requestedVersion == internal.MainVersion {\n\t\treturn \"\"\n\t}\n\tinfo, err := fetch.GetInfo(ctx, modulePath, requestedVersion, proxyClient, false)\n\tif err != nil {\n\t\tif !errors.Is(err, derrors.NotFound) {\n\t\t\t\/\/ If an error occurs, log it and insert the module as\n\t\t\t\/\/ normal.\n\t\t\tlog.Errorf(ctx, \"fetch.GetInfo(ctx, %q, %q, f.ProxyClient, false): %v\", modulePath, requestedVersion, err)\n\t\t}\n\t\tlog.Infof(ctx, \"fetch.GetInfo(ctx, %q, %q, f.ProxyClient, false): %v\", modulePath, requestedVersion, err)\n\t\treturn \"\"\n\t}\n\treturn info.Version\n}\n\nfunc updateVersionMap(ctx context.Context, db *postgres.DB, ft *fetchTask) (err error) {\n\tstart := time.Now()\n\tdefer func() {\n\t\tft.timings[\"worker.updatedVersionMap\"] = time.Since(start)\n\t\tderrors.Wrap(&err, \"updateVersionMap(%q, %q, %q, %d, %v)\",\n\t\t\tft.ModulePath, ft.RequestedVersion, ft.ResolvedVersion, ft.Status, ft.Error)\n\t}()\n\tctx, span := trace.StartSpan(ctx, \"worker.updateVersionMap\")\n\tdefer span.End()\n\n\tvar errMsg string\n\tif ft.Error != nil {\n\t\terrMsg = ft.Error.Error()\n\t}\n\n\t\/\/ If the resolved version for the this module version is also the resolved\n\t\/\/ version for @main or @master, update version_map to match.\n\trequestedVersions := []string{ft.RequestedVersion}\n\tif ft.MainVersion == ft.ResolvedVersion {\n\t\trequestedVersions = append(requestedVersions, internal.MainVersion)\n\t}\n\tif ft.MasterVersion == ft.ResolvedVersion {\n\t\trequestedVersions = append(requestedVersions, internal.MasterVersion)\n\t}\n\tfor _, v := range requestedVersions {\n\t\tv := v\n\t\tvm := &internal.VersionMap{\n\t\t\tModulePath: ft.ModulePath,\n\t\t\tRequestedVersion: v,\n\t\t\tResolvedVersion: ft.ResolvedVersion,\n\t\t\tStatus: ft.Status,\n\t\t\tGoModPath: ft.GoModPath,\n\t\t\tError: errMsg,\n\t\t}\n\t\tif err := db.UpsertVersionMap(ctx, vm); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deleteModule(ctx context.Context, db *postgres.DB, ft *fetchTask) (err error) {\n\tstart := time.Now()\n\tdefer func() {\n\t\tft.timings[\"worker.deleteModule\"] = time.Since(start)\n\t\tderrors.Wrap(&err, \"deleteModule(%q, %q, %q, %d, %v)\",\n\t\t\tft.ModulePath, ft.RequestedVersion, ft.ResolvedVersion, ft.Status, ft.Error)\n\t}()\n\tctx, span := trace.StartSpan(ctx, \"worker.deleteModule\")\n\tdefer span.End()\n\n\tlog.Infof(ctx, \"%s@%s: code=%d, deleting\", ft.ModulePath, ft.ResolvedVersion, ft.Status)\n\tif err := db.DeleteModule(ctx, ft.ModulePath, ft.ResolvedVersion); err != nil {\n\t\treturn err\n\t}\n\t\/\/ If this was an alternative path (ft.Status == 491) and there is an older\n\t\/\/ version in search_documents, delete it. This is the case where a module's\n\t\/\/ canonical path was changed by the addition of a go.mod file. For example,\n\t\/\/ versions of logrus before it acquired a go.mod file could have the path\n\t\/\/ github.com\/Sirupsen\/logrus, but once the go.mod file specifies that the\n\t\/\/ path is all lower-case, the old versions should not show up in search. We\n\t\/\/ still leave their pages in the database so users of those old versions\n\t\/\/ can still view documentation.\n\tif ft.Status == derrors.ToStatus(derrors.AlternativeModule) {\n\t\tlog.Infof(ctx, \"%s@%s: code=491, deleting older version from search\", ft.ModulePath, ft.ResolvedVersion)\n\t\tif err := db.DeleteOlderVersionFromSearchDocuments(ctx, ft.ModulePath, ft.ResolvedVersion); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc logTaskResult(ctx context.Context, ft *fetchTask, prefix string) {\n\tvar times []string\n\tfor k, v := range ft.timings {\n\t\ttimes = append(times, fmt.Sprintf(\"%s=%.3fs\", k, v.Seconds()))\n\t}\n\tsort.Strings(times)\n\tmsg := strings.Join(times, \", \")\n\tlogf := log.Infof\n\tif ft.Status == http.StatusInternalServerError {\n\t\tlogf = log.Errorf\n\t}\n\tlogf(ctx, \"%s for %s@%s: code=%d, num_packages=%d, err=%v; timings: %s\",\n\t\tprefix, ft.ModulePath, ft.ResolvedVersion, ft.Status, len(ft.PackageVersionStates), ft.Error, msg)\n}\ninternal\/worker: delete unnecessary log\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage worker\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\t\"unicode\/utf8\"\n\n\t\"go.opencensus.io\/trace\"\n\t\"golang.org\/x\/mod\/semver\"\n\t\"golang.org\/x\/pkgsite\/internal\"\n\t\"golang.org\/x\/pkgsite\/internal\/derrors\"\n\t\"golang.org\/x\/pkgsite\/internal\/experiment\"\n\t\"golang.org\/x\/pkgsite\/internal\/fetch\"\n\t\"golang.org\/x\/pkgsite\/internal\/log\"\n\t\"golang.org\/x\/pkgsite\/internal\/postgres\"\n\t\"golang.org\/x\/pkgsite\/internal\/proxy\"\n\t\"golang.org\/x\/pkgsite\/internal\/source\"\n\t\"golang.org\/x\/pkgsite\/internal\/stdlib\"\n)\n\n\/\/ ProxyRemoved is a set of module@version that have been removed from the proxy,\n\/\/ even though they are still in the index.\nvar ProxyRemoved = map[string]bool{}\n\n\/\/ fetchTask represents the result of a fetch task that was processed.\ntype fetchTask struct {\n\tfetch.FetchResult\n\ttimings map[string]time.Duration\n}\n\n\/\/ A Fetcher holds state for fetching modules.\ntype Fetcher struct {\n\tProxyClient *proxy.Client\n\tSourceClient *source.Client\n\tDB *postgres.DB\n}\n\n\/\/ FetchAndUpdateState fetches and processes a module version, and then updates\n\/\/ the module_version_states table according to the result. It returns an HTTP\n\/\/ status code representing the result of the fetch operation, and a non-nil\n\/\/ error if this status code is not 200.\nfunc (f *Fetcher) FetchAndUpdateState(ctx context.Context, modulePath, requestedVersion, appVersionLabel string, disableProxyFetch bool) (_ int, resolvedVersion string, err error) {\n\tdefer derrors.Wrap(&err, \"FetchAndUpdateState(%q, %q, %q, %t)\", modulePath, requestedVersion, appVersionLabel, disableProxyFetch)\n\ttctx, span := trace.StartSpan(ctx, \"FetchAndUpdateState\")\n\tctx = experiment.NewContext(tctx, experiment.FromContext(ctx).Active()...)\n\tctx = log.NewContextWithLabel(ctx, \"fetch\", modulePath+\"@\"+requestedVersion)\n\tif !utf8.ValidString(modulePath) {\n\t\tlog.Errorf(ctx, \"module path %q is not valid UTF-8\", modulePath)\n\t}\n\tif !utf8.ValidString(requestedVersion) {\n\t\tlog.Errorf(ctx, \"requested version %q is not valid UTF-8\", requestedVersion)\n\t}\n\tspan.AddAttributes(\n\t\ttrace.StringAttribute(\"modulePath\", modulePath),\n\t\ttrace.StringAttribute(\"version\", requestedVersion))\n\tdefer span.End()\n\n\tft := f.fetchAndInsertModule(ctx, modulePath, requestedVersion, disableProxyFetch)\n\tspan.AddAttributes(trace.Int64Attribute(\"numPackages\", int64(len(ft.PackageVersionStates))))\n\n\t\/\/ If there were any errors processing the module then we didn't insert it.\n\t\/\/ Delete it in case we are reprocessing an existing module.\n\t\/\/ However, don't delete if the error was internal, or we are shedding load.\n\tif ft.Status >= 400 && ft.Status < 500 {\n\t\tif err := deleteModule(ctx, f.DB, ft); err != nil {\n\t\t\tlog.Error(ctx, err)\n\t\t\tft.Error = err\n\t\t\tft.Status = http.StatusInternalServerError\n\t\t}\n\t\t\/\/ Do not return an error here, because we want to insert into\n\t\t\/\/ module_version_states below.\n\t}\n\t\/\/ Regardless of what the status code is, insert the result into\n\t\/\/ version_map, so that a response can be returned for frontend_fetch.\n\tif err := updateVersionMap(ctx, f.DB, ft); err != nil {\n\t\tlog.Error(ctx, err)\n\t\tif ft.Status != http.StatusInternalServerError {\n\t\t\tft.Error = err\n\t\t\tft.Status = http.StatusInternalServerError\n\t\t}\n\t\t\/\/ Do not return an error here, because we want to insert into\n\t\t\/\/ module_version_states below.\n\t}\n\tif !semver.IsValid(ft.ResolvedVersion) {\n\t\t\/\/ If the requestedVersion was not successfully resolved to a semantic\n\t\t\/\/ version, then at this point it will be the same as the\n\t\t\/\/ resolvedVersion. This fetch request does not need to be recorded in\n\t\t\/\/ module_version_states, since that table is only used to track\n\t\t\/\/ modules that have been published to index.golang.org.\n\t\treturn ft.Status, ft.ResolvedVersion, ft.Error\n\t}\n\n\t\/\/ Update the module_version_states table with the new status of\n\t\/\/ module@version. This must happen last, because if it succeeds with a\n\t\/\/ code < 500 but a later action fails, we will never retry the later\n\t\/\/ action.\n\t\/\/ TODO(golang\/go#39628): Split UpsertModuleVersionState into\n\t\/\/ InsertModuleVersionState and UpdateModuleVersionState.\n\tstart := time.Now()\n\terr = f.DB.UpsertModuleVersionState(ctx, ft.ModulePath, ft.ResolvedVersion, appVersionLabel,\n\t\ttime.Time{}, ft.Status, ft.GoModPath, ft.Error, ft.PackageVersionStates)\n\tft.timings[\"db.UpsertModuleVersionState\"] = time.Since(start)\n\tif err != nil {\n\t\tlog.Error(ctx, err)\n\t\tif ft.Error != nil {\n\t\t\tft.Status = http.StatusInternalServerError\n\t\t\tft.Error = fmt.Errorf(\"db.UpsertModuleVersionState: %v, original error: %v\", err, ft.Error)\n\t\t}\n\t\tlogTaskResult(ctx, ft, \"Failed to update module version state\")\n\t\treturn http.StatusInternalServerError, ft.ResolvedVersion, ft.Error\n\t}\n\tlogTaskResult(ctx, ft, \"Updated module version state\")\n\treturn ft.Status, ft.ResolvedVersion, ft.Error\n}\n\n\/\/ fetchAndInsertModule fetches the given module version from the module proxy\n\/\/ or (in the case of the standard library) from the Go repo and writes the\n\/\/ resulting data to the database.\n\/\/\n\/\/ The given parentCtx is used for tracing, but fetches actually execute in a\n\/\/ detached context with fixed timeout, so that fetches are allowed to complete\n\/\/ even for short-lived requests.\nfunc (f *Fetcher) fetchAndInsertModule(ctx context.Context, modulePath, requestedVersion string, disableProxyFetch bool) *fetchTask {\n\tft := &fetchTask{\n\t\tFetchResult: fetch.FetchResult{\n\t\t\tModulePath: modulePath,\n\t\t\tRequestedVersion: requestedVersion,\n\t\t},\n\t\ttimings: map[string]time.Duration{},\n\t}\n\tdefer func() {\n\t\tderrors.Wrap(&ft.Error, \"fetchAndInsertModule(%q, %q)\", modulePath, requestedVersion)\n\t\tif ft.Error != nil {\n\t\t\tft.Status = derrors.ToStatus(ft.Error)\n\t\t\tft.ResolvedVersion = requestedVersion\n\t\t}\n\t}()\n\n\tif ProxyRemoved[modulePath+\"@\"+requestedVersion] {\n\t\tlog.Infof(ctx, \"not fetching %s@%s because it is on the ProxyRemoved list\", modulePath, requestedVersion)\n\t\tft.Error = derrors.Excluded\n\t\treturn ft\n\t}\n\n\texc, err := f.DB.IsExcluded(ctx, modulePath)\n\tif err != nil {\n\t\tft.Error = err\n\t\treturn ft\n\t}\n\tif exc {\n\t\tft.Error = derrors.Excluded\n\t\treturn ft\n\t}\n\n\t\/\/ Fetch the module, and the current @main and @master version of this module.\n\t\/\/ The @main and @master version will be used to update the version_map\n\t\/\/ target if applicable.\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tstart := time.Now()\n\t\tfr := fetch.FetchModule(ctx, modulePath, requestedVersion, f.ProxyClient, f.SourceClient, disableProxyFetch)\n\t\tif fr == nil {\n\t\t\tpanic(\"fetch.FetchModule should never return a nil FetchResult\")\n\t\t}\n\t\tdefer fr.Defer()\n\t\tft.FetchResult = *fr\n\t\tft.timings[\"fetch.FetchModule\"] = time.Since(start)\n\t}()\n\t\/\/ Do not resolve the @main and @master version if disableProxyFetch is on.\n\tvar main string\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tif !disableProxyFetch {\n\t\t\tmain = resolvedVersion(ctx, modulePath, internal.MainVersion, f.ProxyClient)\n\t\t}\n\t}()\n\tvar master string\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tif !disableProxyFetch {\n\t\t\tmaster = resolvedVersion(ctx, modulePath, internal.MasterVersion, f.ProxyClient)\n\t\t}\n\t}()\n\twg.Wait()\n\tft.MainVersion = main\n\tft.MasterVersion = master\n\n\t\/\/ There was an error fetching this module.\n\tif ft.Error != nil {\n\t\tlogf := log.Infof\n\t\tif ft.Status == http.StatusServiceUnavailable {\n\t\t\tlogf = log.Warningf\n\t\t} else if ft.Status >= 500 && ft.Status != derrors.ToStatus(derrors.ProxyTimedOut) {\n\t\t\tlogf = log.Errorf\n\t\t}\n\t\tlogf(ctx, \"Error executing fetch: %v (code %d)\", ft.Error, ft.Status)\n\t\treturn ft\n\t}\n\n\t\/\/ The module was successfully fetched.\n\tlog.Infof(ctx, \"fetch.FetchModule succeeded for %s@%s\", ft.ModulePath, ft.RequestedVersion)\n\tstart := time.Now()\n\terr = f.DB.InsertModule(ctx, ft.Module)\n\tft.timings[\"db.InsertModule\"] = time.Since(start)\n\tif err != nil {\n\t\tlog.Error(ctx, err)\n\n\t\tft.Status = derrors.ToStatus(err)\n\t\tft.Error = err\n\t\treturn ft\n\t}\n\tlog.Infof(ctx, \"db.InsertModule succeeded for %s@%s\", ft.ModulePath, ft.RequestedVersion)\n\treturn ft\n}\n\nfunc resolvedVersion(ctx context.Context, modulePath, requestedVersion string, proxyClient *proxy.Client) string {\n\tif modulePath == stdlib.ModulePath && requestedVersion == internal.MainVersion {\n\t\treturn \"\"\n\t}\n\tinfo, err := fetch.GetInfo(ctx, modulePath, requestedVersion, proxyClient, false)\n\tif err != nil {\n\t\tif !errors.Is(err, derrors.NotFound) {\n\t\t\t\/\/ If an error occurs, log it and insert the module as normal.\n\t\t\tlog.Errorf(ctx, \"fetch.GetInfo(ctx, %q, %q, f.ProxyClient, false): %v\", modulePath, requestedVersion, err)\n\t\t}\n\t\treturn \"\"\n\t}\n\treturn info.Version\n}\n\nfunc updateVersionMap(ctx context.Context, db *postgres.DB, ft *fetchTask) (err error) {\n\tstart := time.Now()\n\tdefer func() {\n\t\tft.timings[\"worker.updatedVersionMap\"] = time.Since(start)\n\t\tderrors.Wrap(&err, \"updateVersionMap(%q, %q, %q, %d, %v)\",\n\t\t\tft.ModulePath, ft.RequestedVersion, ft.ResolvedVersion, ft.Status, ft.Error)\n\t}()\n\tctx, span := trace.StartSpan(ctx, \"worker.updateVersionMap\")\n\tdefer span.End()\n\n\tvar errMsg string\n\tif ft.Error != nil {\n\t\terrMsg = ft.Error.Error()\n\t}\n\n\t\/\/ If the resolved version for the this module version is also the resolved\n\t\/\/ version for @main or @master, update version_map to match.\n\trequestedVersions := []string{ft.RequestedVersion}\n\tif ft.MainVersion == ft.ResolvedVersion {\n\t\trequestedVersions = append(requestedVersions, internal.MainVersion)\n\t}\n\tif ft.MasterVersion == ft.ResolvedVersion {\n\t\trequestedVersions = append(requestedVersions, internal.MasterVersion)\n\t}\n\tfor _, v := range requestedVersions {\n\t\tv := v\n\t\tvm := &internal.VersionMap{\n\t\t\tModulePath: ft.ModulePath,\n\t\t\tRequestedVersion: v,\n\t\t\tResolvedVersion: ft.ResolvedVersion,\n\t\t\tStatus: ft.Status,\n\t\t\tGoModPath: ft.GoModPath,\n\t\t\tError: errMsg,\n\t\t}\n\t\tif err := db.UpsertVersionMap(ctx, vm); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deleteModule(ctx context.Context, db *postgres.DB, ft *fetchTask) (err error) {\n\tstart := time.Now()\n\tdefer func() {\n\t\tft.timings[\"worker.deleteModule\"] = time.Since(start)\n\t\tderrors.Wrap(&err, \"deleteModule(%q, %q, %q, %d, %v)\",\n\t\t\tft.ModulePath, ft.RequestedVersion, ft.ResolvedVersion, ft.Status, ft.Error)\n\t}()\n\tctx, span := trace.StartSpan(ctx, \"worker.deleteModule\")\n\tdefer span.End()\n\n\tlog.Infof(ctx, \"%s@%s: code=%d, deleting\", ft.ModulePath, ft.ResolvedVersion, ft.Status)\n\tif err := db.DeleteModule(ctx, ft.ModulePath, ft.ResolvedVersion); err != nil {\n\t\treturn err\n\t}\n\t\/\/ If this was an alternative path (ft.Status == 491) and there is an older\n\t\/\/ version in search_documents, delete it. This is the case where a module's\n\t\/\/ canonical path was changed by the addition of a go.mod file. For example,\n\t\/\/ versions of logrus before it acquired a go.mod file could have the path\n\t\/\/ github.com\/Sirupsen\/logrus, but once the go.mod file specifies that the\n\t\/\/ path is all lower-case, the old versions should not show up in search. We\n\t\/\/ still leave their pages in the database so users of those old versions\n\t\/\/ can still view documentation.\n\tif ft.Status == derrors.ToStatus(derrors.AlternativeModule) {\n\t\tlog.Infof(ctx, \"%s@%s: code=491, deleting older version from search\", ft.ModulePath, ft.ResolvedVersion)\n\t\tif err := db.DeleteOlderVersionFromSearchDocuments(ctx, ft.ModulePath, ft.ResolvedVersion); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc logTaskResult(ctx context.Context, ft *fetchTask, prefix string) {\n\tvar times []string\n\tfor k, v := range ft.timings {\n\t\ttimes = append(times, fmt.Sprintf(\"%s=%.3fs\", k, v.Seconds()))\n\t}\n\tsort.Strings(times)\n\tmsg := strings.Join(times, \", \")\n\tlogf := log.Infof\n\tif ft.Status == http.StatusInternalServerError {\n\t\tlogf = log.Errorf\n\t}\n\tlogf(ctx, \"%s for %s@%s: code=%d, num_packages=%d, err=%v; timings: %s\",\n\t\tprefix, ft.ModulePath, ft.ResolvedVersion, ft.Status, len(ft.PackageVersionStates), ft.Error, msg)\n}\n<|endoftext|>"} {"text":"package chunkymonkey\n\nimport (\n \"os\"\n \"io\"\n \"log\"\n \"net\"\n \"math\"\n \"bytes\"\n\n \"chunkymonkey\/proto\"\n . \"chunkymonkey\/types\"\n)\n\ntype Player struct {\n Entity\n game *Game\n conn net.Conn\n name string\n position AbsXYZ\n look LookDegrees\n currentItem ItemID\n txQueue chan []byte\n}\n\nconst StanceNormal = 1.62\n\nfunc StartPlayer(game *Game, conn net.Conn, name string) {\n player := &Player{\n game: game,\n conn: conn,\n name: name,\n position: StartPosition,\n look: LookDegrees{0, 0},\n txQueue: make(chan []byte, 128),\n }\n\n game.Enqueue(func(game *Game) {\n game.AddPlayer(player)\n \/\/ TODO pass proper map seed and dimension\n proto.ServerWriteLogin(conn, player.Entity.EntityID, 0, DimensionNormal)\n player.start()\n player.postLogin()\n })\n}\n\nfunc (player *Player) start() {\n go player.ReceiveLoop()\n go player.TransmitLoop()\n}\n\nfunc (player *Player) PacketKeepAlive() {\n}\n\nfunc (player *Player) PacketChatMessage(message string) {\n player.game.Enqueue(func(game *Game) { game.SendChatMessage(message) })\n}\n\nfunc (player *Player) PacketEntityAction(entityID EntityID, action EntityAction) {\n}\n\nfunc (player *Player) PacketUseEntity(user EntityID, target EntityID, leftClick bool) {\n}\n\nfunc (player *Player) PacketRespawn() {\n}\n\nfunc (player *Player) PacketPlayer(onGround bool) {\n}\n\nfunc (player *Player) PacketPlayerPosition(position *AbsXYZ, stance AbsCoord, onGround bool) {\n \/\/ TODO: Should keep track of when players enter\/leave their mutual radius\n \/\/ of \"awareness\". I.e a client should receive a RemoveEntity packet when\n \/\/ the player walks out of range, and no longer receive WriteEntityTeleport\n \/\/ packets for them. The converse should happen when players come in range\n \/\/ of each other.\n\n player.game.Enqueue(func(game *Game) {\n var delta = AbsXYZ{position.X - player.position.X,\n position.Y - player.position.Y,\n position.Z - player.position.Z}\n distance := math.Sqrt(float64(delta.X*delta.X + delta.Y*delta.Y + delta.Z*delta.Z))\n if distance > 10 {\n log.Printf(\"Discarding player position that is too far removed (%.2f, %.2f, %.2f)\",\n position.X, position.Y, position.Z)\n return\n }\n\n player.position = *position\n\n buf := &bytes.Buffer{}\n proto.WriteEntityTeleport(\n buf,\n player.EntityID,\n player.position.ToAbsIntXYZ(),\n player.look.ToLookBytes())\n game.MulticastPacket(buf.Bytes(), player)\n })\n}\n\nfunc (player *Player) PacketPlayerLook(look *LookDegrees, onGround bool) {\n player.game.Enqueue(func(game *Game) {\n \/\/ TODO input validation\n player.look = *look\n\n buf := &bytes.Buffer{}\n proto.WriteEntityLook(buf, player.EntityID, look.ToLookBytes())\n game.MulticastPacket(buf.Bytes(), player)\n })\n}\n\nfunc (player *Player) PacketPlayerDigging(status DigStatus, blockLoc *BlockXYZ, face Face) {\n \/\/ TODO validate that the player is actually somewhere near the block\n\n if status == DigBlockBroke {\n \/\/ TODO validate that the player has dug long enough to stop speed\n \/\/ hacking (based on block type and tool used - non-trivial).\n\n player.game.Enqueue(func(game *Game) {\n chunkLoc, subLoc := blockLoc.ToChunkLocal()\n\n chunk := game.chunkManager.Get(chunkLoc)\n\n if chunk == nil {\n return\n }\n\n chunk.DestroyBlock(subLoc)\n })\n }\n}\n\nfunc (player *Player) PacketPlayerBlockPlacement(itemID ItemID, blockLoc *BlockXYZ, face Face, amount ItemCount, uses ItemUses) {\n}\n\nfunc (player *Player) PacketHoldingChange(itemID ItemID) {\n}\n\nfunc (player *Player) PacketEntityAnimation(entityID EntityID, animation EntityAnimation) {\n}\n\nfunc (player *Player) PacketWindowClose(windowID WindowID) {\n}\n\nfunc (player *Player) PacketWindowClick(windowID WindowID, slot SlotID, rightClick bool, txID TxID, itemID ItemID, amount ItemCount, uses ItemUses) {\n}\n\nfunc (player *Player) PacketSignUpdate(position *BlockXYZ, lines [4]string) {\n}\n\nfunc (player *Player) PacketDisconnect(reason string) {\n log.Printf(\"Player %s disconnected reason=%s\", player.name, reason)\n player.game.Enqueue(func(game *Game) {\n game.RemovePlayer(player)\n close(player.txQueue)\n player.conn.Close()\n })\n}\n\nfunc (player *Player) ReceiveLoop() {\n for {\n err := proto.ServerReadPacket(player.conn, player)\n if err != nil {\n if err != os.EOF {\n log.Print(\"ReceiveLoop failed: \", err.String())\n }\n return\n }\n }\n}\n\nfunc (player *Player) TransmitLoop() {\n for {\n bs := <-player.txQueue\n if bs == nil {\n return \/\/ txQueue closed\n }\n\n _, err := player.conn.Write(bs)\n if err != nil {\n if err != os.EOF {\n log.Print(\"TransmitLoop failed: \", err.String())\n }\n return\n }\n }\n}\n\nfunc (player *Player) sendChunks(writer io.Writer) {\n playerChunkLoc := player.position.ToChunkXZ()\n\n for chunk := range player.game.chunkManager.ChunksInRadius(playerChunkLoc) {\n proto.WritePreChunk(writer, &chunk.XZ, ChunkInit)\n }\n\n for chunk := range player.game.chunkManager.ChunksInRadius(playerChunkLoc) {\n chunk.SendChunkData(writer)\n }\n}\n\nfunc (player *Player) TransmitPacket(packet []byte) {\n if packet == nil {\n return \/\/ skip empty packets\n }\n player.txQueue <- packet\n}\n\nfunc (player *Player) postLogin() {\n buf := &bytes.Buffer{}\n proto.WriteSpawnPosition(buf, player.position.ToBlockXYZ())\n player.sendChunks(buf)\n proto.ServerWritePlayerPositionLook(buf, &player.position, &player.look,\n player.position.Y+StanceNormal, false)\n player.TransmitPacket(buf.Bytes())\n}\nAdded expvar for cumulative number of player connects\/disconnects.package chunkymonkey\n\nimport (\n \"bytes\"\n \"expvar\"\n \"io\"\n \"log\"\n \"math\"\n \"net\"\n \"os\"\n\n \"chunkymonkey\/proto\"\n . \"chunkymonkey\/types\"\n)\n\nvar (\n expVarPlayerConnectionCount *expvar.Int\n expVarPlayerDisconnectionCount *expvar.Int\n)\n\nfunc init() {\n expVarPlayerConnectionCount = expvar.NewInt(\"player-connection-count\")\n expVarPlayerDisconnectionCount = expvar.NewInt(\"player-disconnection-count\")\n}\n\ntype Player struct {\n Entity\n game *Game\n conn net.Conn\n name string\n position AbsXYZ\n look LookDegrees\n currentItem ItemID\n txQueue chan []byte\n}\n\nconst StanceNormal = 1.62\n\nfunc StartPlayer(game *Game, conn net.Conn, name string) {\n player := &Player{\n game: game,\n conn: conn,\n name: name,\n position: StartPosition,\n look: LookDegrees{0, 0},\n txQueue: make(chan []byte, 128),\n }\n\n game.Enqueue(func(game *Game) {\n game.AddPlayer(player)\n \/\/ TODO pass proper map seed and dimension\n proto.ServerWriteLogin(conn, player.Entity.EntityID, 0, DimensionNormal)\n player.start()\n player.postLogin()\n })\n}\n\nfunc (player *Player) start() {\n expVarPlayerConnectionCount.Add(1)\n go player.ReceiveLoop()\n go player.TransmitLoop()\n}\n\nfunc (player *Player) PacketKeepAlive() {\n}\n\nfunc (player *Player) PacketChatMessage(message string) {\n player.game.Enqueue(func(game *Game) { game.SendChatMessage(message) })\n}\n\nfunc (player *Player) PacketEntityAction(entityID EntityID, action EntityAction) {\n}\n\nfunc (player *Player) PacketUseEntity(user EntityID, target EntityID, leftClick bool) {\n}\n\nfunc (player *Player) PacketRespawn() {\n}\n\nfunc (player *Player) PacketPlayer(onGround bool) {\n}\n\nfunc (player *Player) PacketPlayerPosition(position *AbsXYZ, stance AbsCoord, onGround bool) {\n \/\/ TODO: Should keep track of when players enter\/leave their mutual radius\n \/\/ of \"awareness\". I.e a client should receive a RemoveEntity packet when\n \/\/ the player walks out of range, and no longer receive WriteEntityTeleport\n \/\/ packets for them. The converse should happen when players come in range\n \/\/ of each other.\n\n player.game.Enqueue(func(game *Game) {\n var delta = AbsXYZ{position.X - player.position.X,\n position.Y - player.position.Y,\n position.Z - player.position.Z}\n distance := math.Sqrt(float64(delta.X*delta.X + delta.Y*delta.Y + delta.Z*delta.Z))\n if distance > 10 {\n log.Printf(\"Discarding player position that is too far removed (%.2f, %.2f, %.2f)\",\n position.X, position.Y, position.Z)\n return\n }\n\n player.position = *position\n\n buf := &bytes.Buffer{}\n proto.WriteEntityTeleport(\n buf,\n player.EntityID,\n player.position.ToAbsIntXYZ(),\n player.look.ToLookBytes())\n game.MulticastPacket(buf.Bytes(), player)\n })\n}\n\nfunc (player *Player) PacketPlayerLook(look *LookDegrees, onGround bool) {\n player.game.Enqueue(func(game *Game) {\n \/\/ TODO input validation\n player.look = *look\n\n buf := &bytes.Buffer{}\n proto.WriteEntityLook(buf, player.EntityID, look.ToLookBytes())\n game.MulticastPacket(buf.Bytes(), player)\n })\n}\n\nfunc (player *Player) PacketPlayerDigging(status DigStatus, blockLoc *BlockXYZ, face Face) {\n \/\/ TODO validate that the player is actually somewhere near the block\n\n if status == DigBlockBroke {\n \/\/ TODO validate that the player has dug long enough to stop speed\n \/\/ hacking (based on block type and tool used - non-trivial).\n\n player.game.Enqueue(func(game *Game) {\n chunkLoc, subLoc := blockLoc.ToChunkLocal()\n\n chunk := game.chunkManager.Get(chunkLoc)\n\n if chunk == nil {\n return\n }\n\n chunk.DestroyBlock(subLoc)\n })\n }\n}\n\nfunc (player *Player) PacketPlayerBlockPlacement(itemID ItemID, blockLoc *BlockXYZ, face Face, amount ItemCount, uses ItemUses) {\n}\n\nfunc (player *Player) PacketHoldingChange(itemID ItemID) {\n}\n\nfunc (player *Player) PacketEntityAnimation(entityID EntityID, animation EntityAnimation) {\n}\n\nfunc (player *Player) PacketWindowClose(windowID WindowID) {\n}\n\nfunc (player *Player) PacketWindowClick(windowID WindowID, slot SlotID, rightClick bool, txID TxID, itemID ItemID, amount ItemCount, uses ItemUses) {\n}\n\nfunc (player *Player) PacketSignUpdate(position *BlockXYZ, lines [4]string) {\n}\n\nfunc (player *Player) PacketDisconnect(reason string) {\n log.Printf(\"Player %s disconnected reason=%s\", player.name, reason)\n player.game.Enqueue(func(game *Game) {\n game.RemovePlayer(player)\n close(player.txQueue)\n player.conn.Close()\n })\n}\n\nfunc (player *Player) ReceiveLoop() {\n for {\n err := proto.ServerReadPacket(player.conn, player)\n if err != nil {\n if err != os.EOF {\n log.Print(\"ReceiveLoop failed: \", err.String())\n }\n expVarPlayerDisconnectionCount.Add(1)\n return\n }\n }\n}\n\nfunc (player *Player) TransmitLoop() {\n for {\n bs := <-player.txQueue\n if bs == nil {\n return \/\/ txQueue closed\n }\n\n _, err := player.conn.Write(bs)\n if err != nil {\n if err != os.EOF {\n log.Print(\"TransmitLoop failed: \", err.String())\n }\n return\n }\n }\n}\n\nfunc (player *Player) sendChunks(writer io.Writer) {\n playerChunkLoc := player.position.ToChunkXZ()\n\n for chunk := range player.game.chunkManager.ChunksInRadius(playerChunkLoc) {\n proto.WritePreChunk(writer, &chunk.XZ, ChunkInit)\n }\n\n for chunk := range player.game.chunkManager.ChunksInRadius(playerChunkLoc) {\n chunk.SendChunkData(writer)\n }\n}\n\nfunc (player *Player) TransmitPacket(packet []byte) {\n if packet == nil {\n return \/\/ skip empty packets\n }\n player.txQueue <- packet\n}\n\nfunc (player *Player) postLogin() {\n buf := &bytes.Buffer{}\n proto.WriteSpawnPosition(buf, player.position.ToBlockXYZ())\n player.sendChunks(buf)\n proto.ServerWritePlayerPositionLook(buf, &player.position, &player.look,\n player.position.Y+StanceNormal, false)\n player.TransmitPacket(buf.Bytes())\n}\n<|endoftext|>"} {"text":"package search\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/wasanx25\/sreq\/config\"\n\t\"github.com\/wasanx25\/sreq\/history\"\n)\n\n\/\/ Content is structure that scraping content from Qiita\ntype Content struct {\n\tID string\n\tTitle string\n\tDesc string\n}\n\nfunc search(argument string, pagenation int, sort string) ([]*Content, error) {\n\tdoc, err := goquery.NewDocument(config.GetPageURL(argument, sort, strconv.Itoa(pagenation)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar contents []*Content\n\n\tdoc.Find(\".searchResult\").Each(func(_ int, s *goquery.Selection) {\n\t\titemID, _ := s.Attr(\"data-uuid\")\n\t\ttitle := s.Find(\".searchResult_itemTitle a\").Text()\n\t\tdesc := s.Find(\".searchResult_snippet\").Text()\n\n\t\tcontent := &Content{\n\t\t\tID: itemID,\n\t\t\tTitle: title,\n\t\t\tDesc: desc,\n\t\t}\n\n\t\tcontents = append(contents, content)\n\t})\n\n\treturn contents, nil\n}\n\nfunc viewList(contents []*Content) {\n\tfor num, content := range contents {\n\t\tfmt.Print(color.YellowString(strconv.Itoa(num) + \" -> \"))\n\t\tfmt.Println(content.Title)\n\t\tfmt.Println(color.GreenString(content.Desc))\n\t\tfmt.Print(\"\\n\")\n\t}\n\tif len(contents) == 10 {\n\t\tfmt.Println(color.YellowString(\"n -> \") + \"next page\")\n\t}\n\tfmt.Print(\"SELECT > \")\n}\n\nfunc scan(contents []*Content, argument string, lynx bool) bool {\n\tvar num string\n\tif _, err := fmt.Scanf(\"%s\", &num); err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tif num == \"n\" {\n\t\treturn false\n\t}\n\n\tindex, _ := strconv.Atoi(num)\n\ttarget := contents[index]\n\n\tresp, err := http.Get(config.GetAPIURL(target.ID))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tvar qiita *config.Qiita\n\tjson.Unmarshal(b, &qiita)\n\n\twriteHistory(qiita, argument)\n\n\tif lynx {\n\t\topenFile(qiita.HTML, \"\/tmp\/sreq.html\", \"lynx\", \"-display_charset=utf-8\", \"-assume_charset=utf-8\")\n\t\treturn true\n\t}\n\n\topenFile(qiita.Markdown, \"\/tmp\/sreq.txt\", \"less\")\n\treturn true\n}\n\nfunc openFile(body string, file string, cmdName ...string) {\n\ttext := []byte(body)\n\tioutil.WriteFile(file, text, os.ModePerm)\n\tcmdName = append(cmdName, file)\n\tcmd := exec.Command(cmdName[0], cmdName[1:]...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Run()\n}\n\nfunc writeHistory(content *config.Qiita, argument string) {\n\tvar snippets history.Snippets\n\tsnippets.Load()\n\turl := content.URL\n\tnewSnippet := history.Snippet{\n\t\tSearchKeyword: argument,\n\t\tURL: url,\n\t\tTitle: content.Title,\n\t}\n\tsnippets.Snippets = append(snippets.Snippets, newSnippet)\n\tif err := snippets.Save(); err != nil {\n\t\tfmt.Printf(\"Failed. %v\", err)\n\t\tos.Exit(2)\n\t}\n}\nReset search.gopackage search\n\n\/\/ import (\n\/\/ \t\"encoding\/json\"\n\/\/ \t\"fmt\"\n\/\/ \t\"io\/ioutil\"\n\/\/ \t\"net\/http\"\n\/\/ \t\"os\"\n\/\/ \t\"os\/exec\"\n\/\/ \t\"strconv\"\n\/\/\n\/\/ \t\"github.com\/PuerkitoBio\/goquery\"\n\/\/ \t\"github.com\/fatih\/color\"\n\/\/ \t\"github.com\/wasanx25\/sreq\/config\"\n\/\/ \t\"github.com\/wasanx25\/sreq\/history\"\n\/\/ )\n\/\/\n\/\/ \/\/ Content is structure that scraping content from Qiita\n\/\/ type Content struct {\n\/\/ \tID string\n\/\/ \tTitle string\n\/\/ \tDesc string\n\/\/ }\n\/\/\n\/\/ func search(argument string, pagenation int, sort string) ([]*Content, error) {\n\/\/ \tdoc, err := goquery.NewDocument(config.GetPageURL(argument, sort, strconv.Itoa(pagenation)))\n\/\/ \tif err != nil {\n\/\/ \t\treturn nil, err\n\/\/ \t}\n\/\/\n\/\/ \tvar contents []*Content\n\/\/\n\/\/ \tdoc.Find(\".searchResult\").Each(func(_ int, s *goquery.Selection) {\n\/\/ \t\titemID, _ := s.Attr(\"data-uuid\")\n\/\/ \t\ttitle := s.Find(\".searchResult_itemTitle a\").Text()\n\/\/ \t\tdesc := s.Find(\".searchResult_snippet\").Text()\n\/\/\n\/\/ \t\tcontent := &Content{\n\/\/ \t\t\tID: itemID,\n\/\/ \t\t\tTitle: title,\n\/\/ \t\t\tDesc: desc,\n\/\/ \t\t}\n\/\/\n\/\/ \t\tcontents = append(contents, content)\n\/\/ \t})\n\/\/\n\/\/ \treturn contents, nil\n\/\/ }\n\/\/\n\/\/ func viewList(contents []*Content) {\n\/\/ \tfor num, content := range contents {\n\/\/ \t\tfmt.Print(color.YellowString(strconv.Itoa(num) + \" -> \"))\n\/\/ \t\tfmt.Println(content.Title)\n\/\/ \t\tfmt.Println(color.GreenString(content.Desc))\n\/\/ \t\tfmt.Print(\"\\n\")\n\/\/ \t}\n\/\/ \tif len(contents) == 10 {\n\/\/ \t\tfmt.Println(color.YellowString(\"n -> \") + \"next page\")\n\/\/ \t}\n\/\/ \tfmt.Print(\"SELECT > \")\n\/\/ }\n\/\/\n\/\/ func scan(contents []*Content, argument string, lynx bool) bool {\n\/\/ \tvar num string\n\/\/ \tif _, err := fmt.Scanf(\"%s\", &num); err != nil {\n\/\/ \t\tfmt.Println(err)\n\/\/ \t}\n\/\/\n\/\/ \tif num == \"n\" {\n\/\/ \t\treturn false\n\/\/ \t}\n\/\/\n\/\/ \tindex, _ := strconv.Atoi(num)\n\/\/ \ttarget := contents[index]\n\/\/\n\/\/ \tresp, err := http.Get(config.GetAPIURL(target.ID))\n\/\/ \tif err != nil {\n\/\/ \t\tfmt.Println(err)\n\/\/ \t}\n\/\/ \tdefer resp.Body.Close()\n\/\/\n\/\/ \tb, err := ioutil.ReadAll(resp.Body)\n\/\/ \tif err != nil {\n\/\/ \t\tfmt.Println(err)\n\/\/ \t}\n\/\/ \tvar qiita *config.Qiita\n\/\/ \tjson.Unmarshal(b, &qiita)\n\/\/\n\/\/ \twriteHistory(qiita, argument)\n\/\/\n\/\/ \tif lynx {\n\/\/ \t\topenFile(qiita.HTML, \"\/tmp\/sreq.html\", \"lynx\", \"-display_charset=utf-8\", \"-assume_charset=utf-8\")\n\/\/ \t\treturn true\n\/\/ \t}\n\/\/\n\/\/ \topenFile(qiita.Markdown, \"\/tmp\/sreq.txt\", \"less\")\n\/\/ \treturn true\n\/\/ }\n\/\/\n\/\/ func openFile(body string, file string, cmdName ...string) {\n\/\/ \ttext := []byte(body)\n\/\/ \tioutil.WriteFile(file, text, os.ModePerm)\n\/\/ \tcmdName = append(cmdName, file)\n\/\/ \tcmd := exec.Command(cmdName[0], cmdName[1:]...)\n\/\/ \tcmd.Stdin = os.Stdin\n\/\/ \tcmd.Stdout = os.Stdout\n\/\/ \tcmd.Run()\n\/\/ }\n\/\/\n\/\/ func writeHistory(content *config.Qiita, argument string) {\n\/\/ \tvar snippets history.Snippets\n\/\/ \tsnippets.Load()\n\/\/ \turl := content.URL\n\/\/ \tnewSnippet := history.Snippet{\n\/\/ \t\tSearchKeyword: argument,\n\/\/ \t\tURL: url,\n\/\/ \t\tTitle: content.Title,\n\/\/ \t}\n\/\/ \tsnippets.Snippets = append(snippets.Snippets, newSnippet)\n\/\/ \tif err := snippets.Save(); err != nil {\n\/\/ \t\tfmt.Printf(\"Failed. %v\", err)\n\/\/ \t\tos.Exit(2)\n\/\/ \t}\n\/\/ }\n<|endoftext|>"} {"text":"package auctioneer_runner\n\nimport (\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n)\n\ntype AuctioneerRunner struct {\n\tauctioneerBin string\n\tetcdCluster []string\n\tnatsCluster []string\n\tSession *gexec.Session\n}\n\nfunc New(auctioneerBin string, etcdCluster, natsCluster []string) *AuctioneerRunner {\n\treturn &AuctioneerRunner{\n\t\tauctioneerBin: auctioneerBin,\n\t\tetcdCluster: etcdCluster,\n\t\tnatsCluster: natsCluster,\n\t}\n}\n\nfunc (r *AuctioneerRunner) Start() {\n\tr.StartWithoutCheck()\n\tEventually(r.Session, 5*time.Second).Should(gbytes.Say(\"auctioneer.started\"))\n}\n\nfunc (r *AuctioneerRunner) StartWithoutCheck() {\n\texecutorSession, err := gexec.Start(\n\t\texec.Command(\n\t\t\tr.auctioneerBin,\n\t\t\t\"-etcdCluster\", strings.Join(r.etcdCluster, \",\"),\n\t\t\t\"-natsAddresses\", strings.Join(r.natsCluster, \",\"),\n\t\t),\n\t\tginkgo.GinkgoWriter,\n\t\tginkgo.GinkgoWriter,\n\t)\n\tΩ(err).ShouldNot(HaveOccurred())\n\tr.Session = executorSession\n}\n\nfunc (r *AuctioneerRunner) Stop() {\n\tif r.Session != nil {\n\t\tr.Session.Terminate().Wait(5 * time.Second)\n\t}\n}\n\nfunc (r *AuctioneerRunner) KillWithFire() {\n\tif r.Session != nil {\n\t\tr.Session.Kill().Wait(5 * time.Second)\n\t}\n}\nAdd coloured prefix to runner outputpackage auctioneer_runner\n\nimport (\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n)\n\ntype AuctioneerRunner struct {\n\tauctioneerBin string\n\tetcdCluster []string\n\tnatsCluster []string\n\tSession *gexec.Session\n}\n\nfunc New(auctioneerBin string, etcdCluster, natsCluster []string) *AuctioneerRunner {\n\treturn &AuctioneerRunner{\n\t\tauctioneerBin: auctioneerBin,\n\t\tetcdCluster: etcdCluster,\n\t\tnatsCluster: natsCluster,\n\t}\n}\n\nfunc (r *AuctioneerRunner) Start() {\n\tr.StartWithoutCheck()\n\tEventually(r.Session, 5*time.Second).Should(gbytes.Say(\"auctioneer.started\"))\n}\n\nfunc (r *AuctioneerRunner) StartWithoutCheck() {\n\texecutorSession, err := gexec.Start(\n\t\texec.Command(\n\t\t\tr.auctioneerBin,\n\t\t\t\"-etcdCluster\", strings.Join(r.etcdCluster, \",\"),\n\t\t\t\"-natsAddresses\", strings.Join(r.natsCluster, \",\"),\n\t\t),\n\t\tgexec.NewPrefixedWriter(\"\\x1b[32m[o]\\x1b[93m[auctioneer]\\x1b[0m \", ginkgo.GinkgoWriter),\n\t\tgexec.NewPrefixedWriter(\"\\x1b[91m[e]\\x1b[93m[auctioneer]\\x1b[0m \", ginkgo.GinkgoWriter),\n\t)\n\tΩ(err).ShouldNot(HaveOccurred())\n\tr.Session = executorSession\n}\n\nfunc (r *AuctioneerRunner) Stop() {\n\tif r.Session != nil {\n\t\tr.Session.Terminate().Wait(5 * time.Second)\n\t}\n}\n\nfunc (r *AuctioneerRunner) KillWithFire() {\n\tif r.Session != nil {\n\t\tr.Session.Kill().Wait(5 * time.Second)\n\t}\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage fi\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/kops\/util\/pkg\/hashing\"\n)\n\nfunc DownloadURL(url string, dest string, hash *hashing.Hash) (*hashing.Hash, error) {\n\tif hash != nil {\n\t\tmatch, err := fileHasHash(dest, hash)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif match {\n\t\t\treturn hash, nil\n\t\t}\n\t}\n\n\tdirMode := os.FileMode(0755)\n\terr := downloadURLAlways(url, dest, dirMode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif hash != nil {\n\t\tmatch, err := fileHasHash(dest, hash)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !match {\n\t\t\treturn nil, fmt.Errorf(\"downloaded from %q but hash did not match expected %q\", url, hash)\n\t\t}\n\t} else {\n\t\thash, err = hashing.HashAlgorithmSHA256.HashFile(dest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn hash, nil\n}\n\nfunc downloadURLAlways(url string, destPath string, dirMode os.FileMode) error {\n\terr := os.MkdirAll(path.Dir(destPath), dirMode)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating directories for destination file %q: %v\", destPath, err)\n\t}\n\n\toutput, err := os.Create(destPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating file for download %q: %v\", destPath, err)\n\t}\n\tdefer output.Close()\n\n\tklog.Infof(\"Downloading %q\", url)\n\n\t\/\/ Create a client with custom timeouts\n\t\/\/ to avoid idle downloads to hang the program\n\thttpClient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout: 30 * time.Second,\n\t\t\t\tKeepAlive: 30 * time.Second,\n\t\t\t}).DialContext,\n\t\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\t\tResponseHeaderTimeout: 10 * time.Second,\n\t\t\tIdleConnTimeout: 30 * time.Second,\n\t\t},\n\t}\n\n\t\/\/ this will stop slow downloads after 3 minutes\n\t\/\/ and interrupt reading of the Response.Body\n\tctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)\n\tdefer cancel()\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Cannot create request: %v\", err)\n\t}\n\n\tresponse, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error doing HTTP fetch of %q: %v\", url, err)\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode >= 400 {\n\t\treturn fmt.Errorf(\"error response from %q: HTTP %v\", url, response.StatusCode)\n\t}\n\n\tstart := time.Now()\n\tdefer klog.Infof(\"Copying %q to %q took %q seconds\", url, destPath, time.Since(start))\n\n\t_, err = io.Copy(output, response.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error downloading HTTP content from %q: %v\", url, err)\n\t}\n\treturn nil\n}\nleverage proxy env variables\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage fi\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/kops\/util\/pkg\/hashing\"\n)\n\nfunc DownloadURL(url string, dest string, hash *hashing.Hash) (*hashing.Hash, error) {\n\tif hash != nil {\n\t\tmatch, err := fileHasHash(dest, hash)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif match {\n\t\t\treturn hash, nil\n\t\t}\n\t}\n\n\tdirMode := os.FileMode(0755)\n\terr := downloadURLAlways(url, dest, dirMode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif hash != nil {\n\t\tmatch, err := fileHasHash(dest, hash)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !match {\n\t\t\treturn nil, fmt.Errorf(\"downloaded from %q but hash did not match expected %q\", url, hash)\n\t\t}\n\t} else {\n\t\thash, err = hashing.HashAlgorithmSHA256.HashFile(dest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn hash, nil\n}\n\nfunc downloadURLAlways(url string, destPath string, dirMode os.FileMode) error {\n\terr := os.MkdirAll(path.Dir(destPath), dirMode)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating directories for destination file %q: %v\", destPath, err)\n\t}\n\n\toutput, err := os.Create(destPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating file for download %q: %v\", destPath, err)\n\t}\n\tdefer output.Close()\n\n\tklog.Infof(\"Downloading %q\", url)\n\n\t\/\/ Create a client with custom timeouts\n\t\/\/ to avoid idle downloads to hang the program\n\thttpClient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout: 30 * time.Second,\n\t\t\t\tKeepAlive: 30 * time.Second,\n\t\t\t}).DialContext,\n\t\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\t\tResponseHeaderTimeout: 10 * time.Second,\n\t\t\tIdleConnTimeout: 30 * time.Second,\n\t\t},\n\t}\n\n\t\/\/ this will stop slow downloads after 3 minutes\n\t\/\/ and interrupt reading of the Response.Body\n\tctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)\n\tdefer cancel()\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Cannot create request: %v\", err)\n\t}\n\n\tresponse, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error doing HTTP fetch of %q: %v\", url, err)\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode >= 400 {\n\t\treturn fmt.Errorf(\"error response from %q: HTTP %v\", url, response.StatusCode)\n\t}\n\n\tstart := time.Now()\n\tdefer klog.Infof(\"Copying %q to %q took %q seconds\", url, destPath, time.Since(start))\n\n\t_, err = io.Copy(output, response.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error downloading HTTP content from %q: %v\", url, err)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage actions\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"k8s.io\/kubeadm\/kinder\/pkg\/cluster\/status\"\n\t\"k8s.io\/kubeadm\/kinder\/pkg\/constants\"\n)\n\n\/\/ KubeadmJoin executes the kubeadm join workflow both for control-plane nodes and\n\/\/ worker nodes\nfunc KubeadmJoin(c *status.Cluster, usePhases bool, copyCertsMode CopyCertsMode, discoveryMode DiscoveryMode, kubeadmConfigVersion, patchesDir, ignorePreflightErrors string, wait time.Duration, vLevel int) (err error) {\n\tif err := joinControlPlanes(c, usePhases, copyCertsMode, discoveryMode, kubeadmConfigVersion, patchesDir, ignorePreflightErrors, wait, vLevel); err != nil {\n\t\treturn err\n\t}\n\n\tif err := joinWorkers(c, usePhases, discoveryMode, wait, kubeadmConfigVersion, ignorePreflightErrors, vLevel); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc joinControlPlanes(c *status.Cluster, usePhases bool, copyCertsMode CopyCertsMode, discoveryMode DiscoveryMode, kubeadmConfigVersion, patchesDir, ignorePreflightErrors string, wait time.Duration, vLevel int) (err error) {\n\tcpX := []*status.Node{c.BootstrapControlPlane()}\n\n\tfor _, cp2 := range c.SecondaryControlPlanes().EligibleForActions() {\n\t\tif err := copyPatchesToNode(cp2, patchesDir); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ if not automatic copy certs, simulate manual copy\n\t\tif copyCertsMode == CopyCertsModeManual {\n\t\t\tif err := copyCertificatesToNode(c, cp2); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ checks pre-loaded images available on the node (this will report missing images, if any)\n\t\tkubeVersion, err := cp2.KubeVersion()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := checkImagesForVersion(cp2, kubeVersion); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ prepares the kubeadm config on this node\n\t\tif err := KubeadmJoinConfig(c, kubeadmConfigVersion, copyCertsMode, discoveryMode, cp2); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ executes the kubeadm join control-plane workflow\n\t\tif usePhases {\n\t\t\terr = kubeadmJoinControlPlaneWithPhases(cp2, patchesDir, ignorePreflightErrors, vLevel)\n\t\t} else {\n\t\t\terr = kubeadmJoinControlPlane(cp2, patchesDir, ignorePreflightErrors, vLevel)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ updates the loadbalancer config with the new cp node\n\t\tcpX = append(cpX, cp2)\n\t\tif err := LoadBalancer(c, cpX...); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := waitNewControlPlaneNodeReady(c, cp2, wait); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc kubeadmJoinControlPlane(cp *status.Node, patchesDir, ignorePreflightErrors string, vLevel int) (err error) {\n\tjoinArgs := []string{\n\t\t\"join\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--ignore-preflight-errors=%s\", ignorePreflightErrors),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t}\n\tif patchesDir != \"\" {\n\t\tif cp.MustKubeadmVersion().LessThan(constants.V1_22) {\n\t\t\tjoinArgs = append(joinArgs, \"--experimental-patches\", constants.PatchesDir)\n\t\t}\n\t}\n\n\tif err := cp.Command(\n\t\t\"kubeadm\", joinArgs...,\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc kubeadmJoinControlPlaneWithPhases(cp *status.Node, patchesDir, ignorePreflightErrors string, vLevel int) (err error) {\n\t\/\/ kubeadm join phase preflight\n\tpreflightArgs := []string{\n\t\t\"join\", \"phase\", \"preflight\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--ignore-preflight-errors=%s\", ignorePreflightErrors),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t}\n\n\tif err := cp.Command(\n\t\t\"kubeadm\", preflightArgs...,\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ kubeadm join phase control-plane-prepare\n\tprepareArgs := []string{\n\t\t\"join\", \"phase\", \"control-plane-prepare\", \"all\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t}\n\n\tif patchesDir != \"\" {\n\t\tif cp.MustKubeadmVersion().LessThan(constants.V1_22) {\n\t\t\tprepareArgs = append(prepareArgs, \"--experimental-patches\", constants.PatchesDir)\n\t\t}\n\t}\n\n\tif err := cp.Command(\n\t\t\"kubeadm\", prepareArgs...,\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ kubeadm join phase kubelet-start\n\tif err := cp.Command(\n\t\t\"kubeadm\", \"join\", \"phase\", \"kubelet-start\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ kubeadm join phase control-plane-join\n\tcontrolPlaneArgs := []string{\n\t\t\"join\", \"phase\", \"control-plane-join\", \"all\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t}\n\tif patchesDir != \"\" {\n\t\tif cp.MustKubeadmVersion().LessThan(constants.V1_22) {\n\t\t\tcontrolPlaneArgs = append(controlPlaneArgs, \"--experimental-patches\", constants.PatchesDir)\n\t\t}\n\t}\n\n\tif err := cp.Command(\n\t\t\"kubeadm\", controlPlaneArgs...,\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc joinWorkers(c *status.Cluster, usePhases bool, discoveryMode DiscoveryMode, wait time.Duration, kubeadmConfigVersion, ignorePreflightErrors string, vLevel int) (err error) {\n\tfor _, w := range c.Workers().EligibleForActions() {\n\t\t\/\/ checks pre-loaded images available on the node (this will report missing images, if any)\n\t\tkubeVersion, err := w.KubeVersion()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := checkImagesForVersion(w, kubeVersion); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ prepares the kubeadm config on this node\n\t\tif err := KubeadmJoinConfig(c, kubeadmConfigVersion, CopyCertsModeNone, discoveryMode, w); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ executes the kubeadm join workflow\n\t\tif usePhases {\n\t\t\terr = kubeadmJoinWorkerWithPhases(w, ignorePreflightErrors, vLevel)\n\t\t} else {\n\t\t\terr = kubeadmJoinWorker(w, ignorePreflightErrors, vLevel)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := waitNewWorkerNodeReady(c, w, wait); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc kubeadmJoinWorker(w *status.Node, ignorePreflightErrors string, vLevel int) (err error) {\n\tif err := w.Command(\n\t\t\"kubeadm\", \"join\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--ignore-preflight-errors=%s\", ignorePreflightErrors),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc kubeadmJoinWorkerWithPhases(w *status.Node, ignorePreflightErrors string, vLevel int) (err error) {\n\t\/\/ kubeadm join phase preflight\n\tif err := w.Command(\n\t\t\"kubeadm\", \"join\", \"phase\", \"preflight\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--ignore-preflight-errors=%s\", ignorePreflightErrors),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ NB. kubeadm join phase control-plane-prepare should not be executed when joining a worker node\n\n\t\/\/ kubeadm join phase kubelet-start\n\tif err := w.Command(\n\t\t\"kubeadm\", \"join\", \"phase\", \"kubelet-start\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ NB. kubeadm join phase control-plane-join should not be executed when joining a worker node\n\n\treturn nil\n}\nkinder: make sure worker nodes also get a patches dir created\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage actions\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"k8s.io\/kubeadm\/kinder\/pkg\/cluster\/status\"\n\t\"k8s.io\/kubeadm\/kinder\/pkg\/constants\"\n)\n\n\/\/ KubeadmJoin executes the kubeadm join workflow both for control-plane nodes and\n\/\/ worker nodes\nfunc KubeadmJoin(c *status.Cluster, usePhases bool, copyCertsMode CopyCertsMode, discoveryMode DiscoveryMode, kubeadmConfigVersion, patchesDir, ignorePreflightErrors string, wait time.Duration, vLevel int) (err error) {\n\tif err := joinControlPlanes(c, usePhases, copyCertsMode, discoveryMode, kubeadmConfigVersion, patchesDir, ignorePreflightErrors, wait, vLevel); err != nil {\n\t\treturn err\n\t}\n\n\tif err := joinWorkers(c, usePhases, discoveryMode, wait, kubeadmConfigVersion, patchesDir, ignorePreflightErrors, vLevel); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc joinControlPlanes(c *status.Cluster, usePhases bool, copyCertsMode CopyCertsMode, discoveryMode DiscoveryMode, kubeadmConfigVersion, patchesDir, ignorePreflightErrors string, wait time.Duration, vLevel int) (err error) {\n\tcpX := []*status.Node{c.BootstrapControlPlane()}\n\n\tfor _, cp2 := range c.SecondaryControlPlanes().EligibleForActions() {\n\t\tif err := copyPatchesToNode(cp2, patchesDir); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ if not automatic copy certs, simulate manual copy\n\t\tif copyCertsMode == CopyCertsModeManual {\n\t\t\tif err := copyCertificatesToNode(c, cp2); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ checks pre-loaded images available on the node (this will report missing images, if any)\n\t\tkubeVersion, err := cp2.KubeVersion()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := checkImagesForVersion(cp2, kubeVersion); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ prepares the kubeadm config on this node\n\t\tif err := KubeadmJoinConfig(c, kubeadmConfigVersion, copyCertsMode, discoveryMode, cp2); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ executes the kubeadm join control-plane workflow\n\t\tif usePhases {\n\t\t\terr = kubeadmJoinControlPlaneWithPhases(cp2, patchesDir, ignorePreflightErrors, vLevel)\n\t\t} else {\n\t\t\terr = kubeadmJoinControlPlane(cp2, patchesDir, ignorePreflightErrors, vLevel)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ updates the loadbalancer config with the new cp node\n\t\tcpX = append(cpX, cp2)\n\t\tif err := LoadBalancer(c, cpX...); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := waitNewControlPlaneNodeReady(c, cp2, wait); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc kubeadmJoinControlPlane(cp *status.Node, patchesDir, ignorePreflightErrors string, vLevel int) (err error) {\n\tjoinArgs := []string{\n\t\t\"join\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--ignore-preflight-errors=%s\", ignorePreflightErrors),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t}\n\tif patchesDir != \"\" {\n\t\tif cp.MustKubeadmVersion().LessThan(constants.V1_22) {\n\t\t\tjoinArgs = append(joinArgs, \"--experimental-patches\", constants.PatchesDir)\n\t\t}\n\t}\n\n\tif err := cp.Command(\n\t\t\"kubeadm\", joinArgs...,\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc kubeadmJoinControlPlaneWithPhases(cp *status.Node, patchesDir, ignorePreflightErrors string, vLevel int) (err error) {\n\t\/\/ kubeadm join phase preflight\n\tpreflightArgs := []string{\n\t\t\"join\", \"phase\", \"preflight\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--ignore-preflight-errors=%s\", ignorePreflightErrors),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t}\n\n\tif err := cp.Command(\n\t\t\"kubeadm\", preflightArgs...,\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ kubeadm join phase control-plane-prepare\n\tprepareArgs := []string{\n\t\t\"join\", \"phase\", \"control-plane-prepare\", \"all\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t}\n\n\tif patchesDir != \"\" {\n\t\tif cp.MustKubeadmVersion().LessThan(constants.V1_22) {\n\t\t\tprepareArgs = append(prepareArgs, \"--experimental-patches\", constants.PatchesDir)\n\t\t}\n\t}\n\n\tif err := cp.Command(\n\t\t\"kubeadm\", prepareArgs...,\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ kubeadm join phase kubelet-start\n\tif err := cp.Command(\n\t\t\"kubeadm\", \"join\", \"phase\", \"kubelet-start\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ kubeadm join phase control-plane-join\n\tcontrolPlaneArgs := []string{\n\t\t\"join\", \"phase\", \"control-plane-join\", \"all\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t}\n\tif patchesDir != \"\" {\n\t\tif cp.MustKubeadmVersion().LessThan(constants.V1_22) {\n\t\t\tcontrolPlaneArgs = append(controlPlaneArgs, \"--experimental-patches\", constants.PatchesDir)\n\t\t}\n\t}\n\n\tif err := cp.Command(\n\t\t\"kubeadm\", controlPlaneArgs...,\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc joinWorkers(c *status.Cluster, usePhases bool, discoveryMode DiscoveryMode, wait time.Duration, kubeadmConfigVersion, patchesDir, ignorePreflightErrors string, vLevel int) (err error) {\n\tfor _, w := range c.Workers().EligibleForActions() {\n\t\t\/\/ checks pre-loaded images available on the node (this will report missing images, if any)\n\t\tkubeVersion, err := w.KubeVersion()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := copyPatchesToNode(w, patchesDir); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := checkImagesForVersion(w, kubeVersion); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ prepares the kubeadm config on this node\n\t\tif err := KubeadmJoinConfig(c, kubeadmConfigVersion, CopyCertsModeNone, discoveryMode, w); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ executes the kubeadm join workflow\n\t\tif usePhases {\n\t\t\terr = kubeadmJoinWorkerWithPhases(w, ignorePreflightErrors, vLevel)\n\t\t} else {\n\t\t\terr = kubeadmJoinWorker(w, ignorePreflightErrors, vLevel)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := waitNewWorkerNodeReady(c, w, wait); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc kubeadmJoinWorker(w *status.Node, ignorePreflightErrors string, vLevel int) (err error) {\n\tif err := w.Command(\n\t\t\"kubeadm\", \"join\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--ignore-preflight-errors=%s\", ignorePreflightErrors),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc kubeadmJoinWorkerWithPhases(w *status.Node, ignorePreflightErrors string, vLevel int) (err error) {\n\t\/\/ kubeadm join phase preflight\n\tif err := w.Command(\n\t\t\"kubeadm\", \"join\", \"phase\", \"preflight\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--ignore-preflight-errors=%s\", ignorePreflightErrors),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ NB. kubeadm join phase control-plane-prepare should not be executed when joining a worker node\n\n\t\/\/ kubeadm join phase kubelet-start\n\tif err := w.Command(\n\t\t\"kubeadm\", \"join\", \"phase\", \"kubelet-start\",\n\t\tfmt.Sprintf(\"--config=%s\", constants.KubeadmConfigPath),\n\t\tfmt.Sprintf(\"--v=%d\", vLevel),\n\t).RunWithEcho(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ NB. kubeadm join phase control-plane-join should not be executed when joining a worker node\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"package secret\n\nimport (\n\t\"io\/ioutil\"\n\t\"sort\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nconst (\n\t\/\/ DefaultKMSKey represents default KMS key alias\n\tDefaultKMSKey = \"valec\"\n)\n\n\/\/ Secret represents key=value pair\ntype Secret struct {\n\tKey string `yaml:\"key\"`\n\tValue string `yaml:\"value\"`\n}\n\n\/\/ Secrets represents the array of Secret\ntype Secrets []*Secret\n\n\/\/ YAML represents secret yaml structure\ntype YAML struct {\n\tKMSKey string `yaml:\"kms_key\"`\n\tSecrets Secrets `yaml:\"secrets\"`\n}\n\n\/\/ Len returns the length of the array\nfunc (ss Secrets) Len() int {\n\treturn len(ss)\n}\n\n\/\/ Less returns Secrets[i] is less than Secrets[j]\nfunc (ss Secrets) Less(i, j int) bool {\n\tsi, sj := ss[i], ss[j]\n\n\tif si.Key < sj.Key {\n\t\treturn true\n\t}\n\n\tif si.Key > sj.Key {\n\t\treturn false\n\t}\n\n\tif si.Value < sj.Value {\n\t\treturn true\n\t}\n\n\tif si.Value > sj.Value {\n\t\treturn false\n\t}\n\n\treturn false\n}\n\n\/\/ Swap swaps Secrets[i] and Secrets[j]\nfunc (ss Secrets) Swap(i, j int) {\n\tss[i], ss[j] = ss[j], ss[i]\n}\n\n\/\/ CompareList compares two secret lists and returns the differences between them\nfunc (ss Secrets) CompareList(old Secrets) (added, updated, deleted Secrets) {\n\tnewMap, oldMap := ss.ListToMap(), old.ListToMap()\n\n\tfor _, c := range ss {\n\t\tv, ok := oldMap[c.Key]\n\t\tif !ok {\n\t\t\tadded = append(added, c)\n\t\t} else if v != c.Value {\n\t\t\tupdated = append(updated, c)\n\t\t}\n\t}\n\n\tfor _, c := range old {\n\t\t_, ok := newMap[c.Key]\n\t\tif !ok {\n\t\t\tdeleted = append(deleted, c)\n\t\t}\n\t}\n\n\treturn added, updated, deleted\n}\n\n\/\/ ListToMap converts secret list to map\nfunc (ss Secrets) ListToMap() map[string]string {\n\tsecretMap := map[string]string{}\n\n\tfor _, secret := range ss {\n\t\tsecretMap[secret.Key] = secret.Value\n\t}\n\n\treturn secretMap\n}\n\n\/\/ SaveAsYAML saves secrets to local secret file\nfunc (ss Secrets) SaveAsYAML(filename, kmsKey string) error {\n\ty := &YAML{\n\t\tKMSKey: kmsKey,\n\t\tSecrets: ss,\n\t}\n\n\tbody, err := yaml.Marshal(y)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed to convert secrets as YAML.\")\n\t}\n\n\tif err := ioutil.WriteFile(filename, body, 0644); err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to save file. filename=%s\", filename)\n\t}\n\n\treturn nil\n}\n\n\/\/ LoadFromYAML loads secrets from the given YAML file\nfunc LoadFromYAML(filename string) (string, Secrets, error) {\n\tbody, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn \"\", Secrets{}, errors.Wrapf(err, \"Failed to read secret file. filename=%s\", filename)\n\t}\n\n\tvar y YAML\n\n\tif err := yaml.Unmarshal(body, &y); err != nil {\n\t\treturn \"\", Secrets{}, errors.Wrapf(err, \"Failed to parse secret file as YAML. filename=%s\", filename)\n\t}\n\n\treturn y.KMSKey, y.Secrets, nil\n}\n\n\/\/ MapToList converts map to secret list\nfunc MapToList(secretMap map[string]string) Secrets {\n\tsecrets := Secrets{}\n\n\tfor key, value := range secretMap {\n\t\tsecrets = append(secrets, &Secret{\n\t\t\tKey: key,\n\t\t\tValue: value,\n\t\t})\n\t}\n\n\tsort.Sort(secrets)\n\n\treturn secrets\n}\nCreate secret directory if it does not existpackage secret\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\n\t\"github.com\/dtan4\/valec\/util\"\n\t\"github.com\/pkg\/errors\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nconst (\n\t\/\/ DefaultKMSKey represents default KMS key alias\n\tDefaultKMSKey = \"valec\"\n)\n\n\/\/ Secret represents key=value pair\ntype Secret struct {\n\tKey string `yaml:\"key\"`\n\tValue string `yaml:\"value\"`\n}\n\n\/\/ Secrets represents the array of Secret\ntype Secrets []*Secret\n\n\/\/ YAML represents secret yaml structure\ntype YAML struct {\n\tKMSKey string `yaml:\"kms_key\"`\n\tSecrets Secrets `yaml:\"secrets\"`\n}\n\n\/\/ Len returns the length of the array\nfunc (ss Secrets) Len() int {\n\treturn len(ss)\n}\n\n\/\/ Less returns Secrets[i] is less than Secrets[j]\nfunc (ss Secrets) Less(i, j int) bool {\n\tsi, sj := ss[i], ss[j]\n\n\tif si.Key < sj.Key {\n\t\treturn true\n\t}\n\n\tif si.Key > sj.Key {\n\t\treturn false\n\t}\n\n\tif si.Value < sj.Value {\n\t\treturn true\n\t}\n\n\tif si.Value > sj.Value {\n\t\treturn false\n\t}\n\n\treturn false\n}\n\n\/\/ Swap swaps Secrets[i] and Secrets[j]\nfunc (ss Secrets) Swap(i, j int) {\n\tss[i], ss[j] = ss[j], ss[i]\n}\n\n\/\/ CompareList compares two secret lists and returns the differences between them\nfunc (ss Secrets) CompareList(old Secrets) (added, updated, deleted Secrets) {\n\tnewMap, oldMap := ss.ListToMap(), old.ListToMap()\n\n\tfor _, c := range ss {\n\t\tv, ok := oldMap[c.Key]\n\t\tif !ok {\n\t\t\tadded = append(added, c)\n\t\t} else if v != c.Value {\n\t\t\tupdated = append(updated, c)\n\t\t}\n\t}\n\n\tfor _, c := range old {\n\t\t_, ok := newMap[c.Key]\n\t\tif !ok {\n\t\t\tdeleted = append(deleted, c)\n\t\t}\n\t}\n\n\treturn added, updated, deleted\n}\n\n\/\/ ListToMap converts secret list to map\nfunc (ss Secrets) ListToMap() map[string]string {\n\tsecretMap := map[string]string{}\n\n\tfor _, secret := range ss {\n\t\tsecretMap[secret.Key] = secret.Value\n\t}\n\n\treturn secretMap\n}\n\n\/\/ SaveAsYAML saves secrets to local secret file\nfunc (ss Secrets) SaveAsYAML(filename, kmsKey string) error {\n\ty := &YAML{\n\t\tKMSKey: kmsKey,\n\t\tSecrets: ss,\n\t}\n\n\tbody, err := yaml.Marshal(y)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed to convert secrets as YAML.\")\n\t}\n\n\tdir := filepath.Dir(filename)\n\tif !util.IsExist(dir) {\n\t\tif err := os.MkdirAll(dir, 0755); err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to create directory %q\", dir)\n\t\t}\n\t}\n\n\tif err := ioutil.WriteFile(filename, body, 0644); err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to save file. filename=%s\", filename)\n\t}\n\n\treturn nil\n}\n\n\/\/ LoadFromYAML loads secrets from the given YAML file\nfunc LoadFromYAML(filename string) (string, Secrets, error) {\n\tbody, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn \"\", Secrets{}, errors.Wrapf(err, \"Failed to read secret file. filename=%s\", filename)\n\t}\n\n\tvar y YAML\n\n\tif err := yaml.Unmarshal(body, &y); err != nil {\n\t\treturn \"\", Secrets{}, errors.Wrapf(err, \"Failed to parse secret file as YAML. filename=%s\", filename)\n\t}\n\n\treturn y.KMSKey, y.Secrets, nil\n}\n\n\/\/ MapToList converts map to secret list\nfunc MapToList(secretMap map[string]string) Secrets {\n\tsecrets := Secrets{}\n\n\tfor key, value := range secretMap {\n\t\tsecrets = append(secrets, &Secret{\n\t\t\tKey: key,\n\t\t\tValue: value,\n\t\t})\n\t}\n\n\tsort.Sort(secrets)\n\n\treturn secrets\n}\n<|endoftext|>"} {"text":"\/*\n\n\tgolden is a package designed to make it possible to compare a game to a\n\tgolden run for testing purposes. It takes a record saved in\n\tstorage\/filesystem format and compares it.\n\n*\/\npackage golden\n\nimport (\n\t\"errors\"\n\t\"github.com\/jkomoros\/boardgame\"\n\t\"github.com\/jkomoros\/boardgame\/storage\/filesystem\/record\"\n\t\"github.com\/jkomoros\/boardgame\/storage\/memory\"\n\t\"github.com\/yudai\/gojsondiff\"\n\t\"github.com\/yudai\/gojsondiff\/formatter\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\n\/\/Compare is the primary method in the package. It takes a game delegate and a\n\/\/filename denoting a record to compare against. delegate shiould be a fresh\n\/\/delegate not yet affiliated with a manager.\nfunc Compare(delegate boardgame.GameDelegate, recFilename string) error {\n\n\tmanager, err := boardgame.NewGameManager(delegate, memory.NewStorageManager())\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create new manager: \" + err.Error())\n\t}\n\n\trec, err := record.New(recFilename)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create record: \" + err.Error())\n\t}\n\n\treturn compare(manager, rec)\n\n}\n\n\/\/CompareFolder is like Compare, except it will iterate through any file in\n\/\/recFolder that ends in .json. Errors if any of those files cannot be parsed\n\/\/into recs, or if no files match.\nfunc CompareFolder(delegate boardgame.GameDelegate, recFolder string) error {\n\tmanager, err := boardgame.NewGameManager(delegate, memory.NewStorageManager())\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create new manager: \" + err.Error())\n\t}\n\n\tinfos, err := ioutil.ReadDir(recFolder)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't read folder: \" + err.Error())\n\t}\n\n\tprocessedRecs := 0\n\n\tfor _, info := range infos {\n\t\tif info.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif filepath.Ext(info.Name()) != \".json\" {\n\t\t\tcontinue\n\t\t}\n\n\t\trec, err := record.New(filepath.Join(recFolder, info.Name()))\n\n\t\tif err != nil {\n\t\t\treturn errors.New(\"File with name \" + info.Name() + \" couldn't be loaded into rec: \" + err.Error())\n\t\t}\n\n\t\tif err := compare(manager, rec); err != nil {\n\t\t\treturn errors.New(\"File named \" + info.Name() + \" had compare error: \" + err.Error())\n\t\t}\n\n\t\tprocessedRecs++\n\t}\n\n\tif processedRecs < 1 {\n\t\treturn errors.New(\"Processed 0 recs in folder\")\n\t}\n\n\treturn nil\n}\n\nfunc compare(manager *boardgame.GameManager, rec *record.Record) error {\n\tgame, err := manager.RecreateGame(rec.Game())\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create game: \" + err.Error())\n\t}\n\n\tlastVerifiedVersion := 0\n\n\tfor !game.Finished() {\n\t\t\/\/Verify all new moves that have happened since the last time we\n\t\t\/\/checked (often, fix-up moves).\n\t\tfor lastVerifiedVersion < game.Version() {\n\t\t\tstateToCompare, err := rec.State(lastVerifiedVersion)\n\n\t\t\tif err != nil {\n\t\t\t\treturn errors.New(\"Couldn't get \" + strconv.Itoa(lastVerifiedVersion) + \" state: \" + err.Error())\n\t\t\t}\n\n\t\t\t\/\/TODO: use go-test\/deep (if vendored) for a more descriptive error.\n\t\t\tif err := compareStorageRecords(game.State(lastVerifiedVersion).StorageRecord(), stateToCompare); err != nil {\n\t\t\t\treturn errors.New(\"State \" + strconv.Itoa(lastVerifiedVersion) + \" compared differently: \" + err.Error())\n\t\t\t}\n\n\t\t\t\/\/TODO: compare the move storage records too.\n\n\t\t\tlastVerifiedVersion++\n\t\t}\n\n\t\tnextMoveRec, err := rec.Move(lastVerifiedVersion + 1)\n\n\t\tif err != nil {\n\t\t\t\/\/We'll assume that menas that's all of the moves there are to make.\n\t\t\tbreak\n\t\t}\n\n\t\tif nextMoveRec.Proposer < 0 {\n\t\t\treturn errors.New(\"At version \" + strconv.Itoa(lastVerifiedVersion) + \" the next player move to apply was not applied by a player\")\n\t\t}\n\n\t\tnextMove, err := nextMoveRec.Inflate(game)\n\n\t\tif err != nil {\n\t\t\treturn errors.New(\"Couldn't inflate move: \" + err.Error())\n\t\t}\n\n\t\tif err := <-game.ProposeMove(nextMove, nextMoveRec.Proposer); err != nil {\n\t\t\treturn errors.New(\"Couldn't propose next move in chain: \" + err.Error())\n\t\t}\n\n\t}\n\n\tif game.Finished() != rec.Game().Finished {\n\t\treturn errors.New(\"Game finished did not match rec\")\n\t}\n\n\tif !reflect.DeepEqual(game.Winners(), rec.Game().Winners) {\n\t\treturn errors.New(\"Game winners did not match\")\n\t}\n\n\treturn nil\n}\n\nvar differ = gojsondiff.New()\n\nvar diffformatter = formatter.NewDeltaFormatter()\n\nfunc compareStorageRecords(one, two boardgame.StateStorageRecord) error {\n\n\tdiff, err := differ.Compare(one, two)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't diff: \" + err.Error())\n\t}\n\n\tif diff.Modified() {\n\n\t\tstr, err := diffformatter.Format(diff)\n\n\t\tif err != nil {\n\t\t\treturn errors.New(\"Couldn't format diff: \" + err.Error())\n\t\t}\n\n\t\treturn errors.New(\"Diff: \" + str)\n\t}\n\n\treturn nil\n\n}\nRemove a completed TODO. Part of #648.\/*\n\n\tgolden is a package designed to make it possible to compare a game to a\n\tgolden run for testing purposes. It takes a record saved in\n\tstorage\/filesystem format and compares it.\n\n*\/\npackage golden\n\nimport (\n\t\"errors\"\n\t\"github.com\/jkomoros\/boardgame\"\n\t\"github.com\/jkomoros\/boardgame\/storage\/filesystem\/record\"\n\t\"github.com\/jkomoros\/boardgame\/storage\/memory\"\n\t\"github.com\/yudai\/gojsondiff\"\n\t\"github.com\/yudai\/gojsondiff\/formatter\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\n\/\/Compare is the primary method in the package. It takes a game delegate and a\n\/\/filename denoting a record to compare against. delegate shiould be a fresh\n\/\/delegate not yet affiliated with a manager.\nfunc Compare(delegate boardgame.GameDelegate, recFilename string) error {\n\n\tmanager, err := boardgame.NewGameManager(delegate, memory.NewStorageManager())\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create new manager: \" + err.Error())\n\t}\n\n\trec, err := record.New(recFilename)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create record: \" + err.Error())\n\t}\n\n\treturn compare(manager, rec)\n\n}\n\n\/\/CompareFolder is like Compare, except it will iterate through any file in\n\/\/recFolder that ends in .json. Errors if any of those files cannot be parsed\n\/\/into recs, or if no files match.\nfunc CompareFolder(delegate boardgame.GameDelegate, recFolder string) error {\n\tmanager, err := boardgame.NewGameManager(delegate, memory.NewStorageManager())\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create new manager: \" + err.Error())\n\t}\n\n\tinfos, err := ioutil.ReadDir(recFolder)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't read folder: \" + err.Error())\n\t}\n\n\tprocessedRecs := 0\n\n\tfor _, info := range infos {\n\t\tif info.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif filepath.Ext(info.Name()) != \".json\" {\n\t\t\tcontinue\n\t\t}\n\n\t\trec, err := record.New(filepath.Join(recFolder, info.Name()))\n\n\t\tif err != nil {\n\t\t\treturn errors.New(\"File with name \" + info.Name() + \" couldn't be loaded into rec: \" + err.Error())\n\t\t}\n\n\t\tif err := compare(manager, rec); err != nil {\n\t\t\treturn errors.New(\"File named \" + info.Name() + \" had compare error: \" + err.Error())\n\t\t}\n\n\t\tprocessedRecs++\n\t}\n\n\tif processedRecs < 1 {\n\t\treturn errors.New(\"Processed 0 recs in folder\")\n\t}\n\n\treturn nil\n}\n\nfunc compare(manager *boardgame.GameManager, rec *record.Record) error {\n\tgame, err := manager.RecreateGame(rec.Game())\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't create game: \" + err.Error())\n\t}\n\n\tlastVerifiedVersion := 0\n\n\tfor !game.Finished() {\n\t\t\/\/Verify all new moves that have happened since the last time we\n\t\t\/\/checked (often, fix-up moves).\n\t\tfor lastVerifiedVersion < game.Version() {\n\t\t\tstateToCompare, err := rec.State(lastVerifiedVersion)\n\n\t\t\tif err != nil {\n\t\t\t\treturn errors.New(\"Couldn't get \" + strconv.Itoa(lastVerifiedVersion) + \" state: \" + err.Error())\n\t\t\t}\n\n\t\t\tif err := compareStorageRecords(game.State(lastVerifiedVersion).StorageRecord(), stateToCompare); err != nil {\n\t\t\t\treturn errors.New(\"State \" + strconv.Itoa(lastVerifiedVersion) + \" compared differently: \" + err.Error())\n\t\t\t}\n\n\t\t\t\/\/TODO: compare the move storage records too.\n\n\t\t\tlastVerifiedVersion++\n\t\t}\n\n\t\tnextMoveRec, err := rec.Move(lastVerifiedVersion + 1)\n\n\t\tif err != nil {\n\t\t\t\/\/We'll assume that menas that's all of the moves there are to make.\n\t\t\tbreak\n\t\t}\n\n\t\tif nextMoveRec.Proposer < 0 {\n\t\t\treturn errors.New(\"At version \" + strconv.Itoa(lastVerifiedVersion) + \" the next player move to apply was not applied by a player\")\n\t\t}\n\n\t\tnextMove, err := nextMoveRec.Inflate(game)\n\n\t\tif err != nil {\n\t\t\treturn errors.New(\"Couldn't inflate move: \" + err.Error())\n\t\t}\n\n\t\tif err := <-game.ProposeMove(nextMove, nextMoveRec.Proposer); err != nil {\n\t\t\treturn errors.New(\"Couldn't propose next move in chain: \" + err.Error())\n\t\t}\n\n\t}\n\n\tif game.Finished() != rec.Game().Finished {\n\t\treturn errors.New(\"Game finished did not match rec\")\n\t}\n\n\tif !reflect.DeepEqual(game.Winners(), rec.Game().Winners) {\n\t\treturn errors.New(\"Game winners did not match\")\n\t}\n\n\treturn nil\n}\n\nvar differ = gojsondiff.New()\n\nvar diffformatter = formatter.NewDeltaFormatter()\n\nfunc compareStorageRecords(one, two boardgame.StateStorageRecord) error {\n\n\tdiff, err := differ.Compare(one, two)\n\n\tif err != nil {\n\t\treturn errors.New(\"Couldn't diff: \" + err.Error())\n\t}\n\n\tif diff.Modified() {\n\n\t\tstr, err := diffformatter.Format(diff)\n\n\t\tif err != nil {\n\t\t\treturn errors.New(\"Couldn't format diff: \" + err.Error())\n\t\t}\n\n\t\treturn errors.New(\"Diff: \" + str)\n\t}\n\n\treturn nil\n\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2012 David Miller. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage seq\n\nimport (\n\t\"github.com\/dmiller\/go-seq\/iseq\"\n\t\"testing\"\n)\n\nfunc TestConsCtors(t *testing.T) {\n\tc := NewCons(\"abc\", nil)\n\n\tif c.Meta() != nil {\n\t\tt.Error(\"NewCons ctor should have nil meta\")\n\t}\n\n\tif c.First() != \"abc\" {\n\t\tt.Error(\"NewCons ctor did not initialize first\")\n\t}\n\n\tc1 := NewCons(\"def\", c)\n\tif c1.First() != \"def\" {\n\t\tt.Error(\"NewCons ctor did not initialize first\")\n\t}\n\n\tif c1.Next() != c {\n\t\tt.Error(\"NewCons ctor did nto initialize more\/next\")\n\t}\n\n\t\/\/ TODO: add tests for c-tor with meta -- we need a PMap implementation first\n}\n\nfunc TestConsImplementInterfaces(t *testing.T) {\n\tvar c interface{} = NewCons(\"abc\", nil)\n\n\tif _, ok := c.(iseq.MetaW); !ok {\n\t\tt.Error(\"Cons must implement MetaW\")\n\t}\n\n\tif _, ok := c.(iseq.Meta); !ok {\n\t\tt.Error(\"Cons must implement Meta\")\n\t}\n\n\tif _, ok := c.(iseq.PCollection); !ok {\n\t\tt.Error(\"Cons must implement PCollection\")\n\t}\n\n\tif _, ok := c.(iseq.Seqable); !ok {\n\t\tt.Error(\"Cons must implement Seqable\")\n\t}\n\n\tif _, ok := c.(iseq.Equivable); !ok {\n\t\tt.Error(\"Cons must implement Equatable\")\n\t}\n\n\tif _, ok := c.(iseq.Hashable); !ok {\n\t\tt.Error(\"Cons must implement Hashable\")\n\t}\n}\n\nfunc createComplicatedCons() *Cons {\n\tc1 := NewCons(1, nil)\n\tc2 := NewCons(2, c1)\n\tc3 := NewCons(\"abc\", nil)\n\tc4 := NewCons(c3, c2)\n\tc5 := NewCons(\"def\", c4)\n\treturn c5\n}\n\nfunc TestConsCount(t *testing.T) {\n\tc := createComplicatedCons()\n\tif c.Count() != 4 {\n\t\tt.Errorf(\"Count: expected 4, got %v\", c.Count())\n\t}\n}\n\nfunc TestConsSeq(t *testing.T) {\n\tc1 := NewCons(\"abc\", nil)\n\tc2 := createComplicatedCons()\n\tif c1.Seq() != c1 {\n\t\tt.Error(\"Seq should return self\")\n\t}\n\tif c2.Seq() != c2 {\n\t\tt.Error(\"Seq should return self\")\n\t}\n}\n\nfunc TestConsEmpty(t *testing.T) {\n\tc := NewCons(\"abc\", nil)\n\te := c.Empty()\n\tif e != CachedEmptyList {\n\t\tt.Error(\"Empty should be CachedEmptyList\")\n\t}\n}\n\nfunc TestConsEquiv(t *testing.T) {\n\tc1 := createComplicatedCons()\n\tc2 := createComplicatedCons()\n\tif c1 == c2 {\n\t\tt.Error(\"Expect two calls to createComplicatedCons to return distinct structs\")\n\t}\n\tif !c1.Equiv(c1) {\n\t\tt.Error(\"Expect cons to be equiv to itself\")\n\t}\n\tif !c1.Equiv(c2) {\n\t\tt.Error(\"Expect cons to equiv similar cons\")\n\t}\n\n\tc3 := NewCons(\"abc\", nil)\n\tif c1.Equiv(c3) {\n\t\tt.Error(\"cons equiv dissimilar cons\")\n\t}\n}\nAdd test for Cons zero-value\/\/ Copyright 2012 David Miller. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage seq\n\nimport (\n\t\"github.com\/dmiller\/go-seq\/iseq\"\n\t\"testing\"\n)\n\nfunc TestConsCtors(t *testing.T) {\n\tc := NewCons(\"abc\", nil)\n\n\tif c.Meta() != nil {\n\t\tt.Error(\"NewCons ctor should have nil meta\")\n\t}\n\n\tif c.First() != \"abc\" {\n\t\tt.Error(\"NewCons ctor did not initialize first\")\n\t}\n\n\tc1 := NewCons(\"def\", c)\n\tif c1.First() != \"def\" {\n\t\tt.Error(\"NewCons ctor did not initialize first\")\n\t}\n\n\tif c1.Next() != c {\n\t\tt.Error(\"NewCons ctor did nto initialize more\/next\")\n\t}\n\n\t\/\/ TODO: add tests for c-tor with meta -- we need a PMap implementation first\n}\n\nfunc TestConsImplementInterfaces(t *testing.T) {\n\tvar c interface{} = NewCons(\"abc\", nil)\n\n\tif _, ok := c.(iseq.MetaW); !ok {\n\t\tt.Error(\"Cons must implement MetaW\")\n\t}\n\n\tif _, ok := c.(iseq.Meta); !ok {\n\t\tt.Error(\"Cons must implement Meta\")\n\t}\n\n\tif _, ok := c.(iseq.PCollection); !ok {\n\t\tt.Error(\"Cons must implement PCollection\")\n\t}\n\n\tif _, ok := c.(iseq.Seqable); !ok {\n\t\tt.Error(\"Cons must implement Seqable\")\n\t}\n\n\tif _, ok := c.(iseq.Equivable); !ok {\n\t\tt.Error(\"Cons must implement Equatable\")\n\t}\n\n\tif _, ok := c.(iseq.Hashable); !ok {\n\t\tt.Error(\"Cons must implement Hashable\")\n\t}\n}\n\nfunc createComplicatedCons() *Cons {\n\tc1 := NewCons(1, nil)\n\tc2 := NewCons(2, c1)\n\tc3 := NewCons(\"abc\", nil)\n\tc4 := NewCons(c3, c2)\n\tc5 := NewCons(\"def\", c4)\n\treturn c5\n}\n\nfunc TestConsCount(t *testing.T) {\n\tc := createComplicatedCons()\n\tif c.Count() != 4 {\n\t\tt.Errorf(\"Count: expected 4, got %v\", c.Count())\n\t}\n}\n\nfunc TestConsSeq(t *testing.T) {\n\tc1 := NewCons(\"abc\", nil)\n\tc2 := createComplicatedCons()\n\tif c1.Seq() != c1 {\n\t\tt.Error(\"Seq should return self\")\n\t}\n\tif c2.Seq() != c2 {\n\t\tt.Error(\"Seq should return self\")\n\t}\n}\n\nfunc TestConsEmpty(t *testing.T) {\n\tc := NewCons(\"abc\", nil)\n\te := c.Empty()\n\tif e != CachedEmptyList {\n\t\tt.Error(\"Empty should be CachedEmptyList\")\n\t}\n}\n\nfunc TestConsEquiv(t *testing.T) {\n\tc1 := createComplicatedCons()\n\tc2 := createComplicatedCons()\n\tif c1 == c2 {\n\t\tt.Error(\"Expect two calls to createComplicatedCons to return distinct structs\")\n\t}\n\tif !c1.Equiv(c1) {\n\t\tt.Error(\"Expect cons to be equiv to itself\")\n\t}\n\tif !c1.Equiv(c2) {\n\t\tt.Error(\"Expect cons to equiv similar cons\")\n\t}\n\n\tc3 := NewCons(\"abc\", nil)\n\tif c1.Equiv(c3) {\n\t\tt.Error(\"cons equiv dissimilar cons\")\n\t}\n}\n\nfunc TestConsZeroValue(t *testing.T) {\n\tc1 := new(Cons)\n\tc2 := NewCons(nil, nil)\n\tif !c1.Equiv(c2) {\n\t\tt.Error(\"Expect zero-value Cons to be equiv to (nil)\")\n\t}\n}\n<|endoftext|>"} {"text":"package radius\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net\"\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\ntype packetResponseWriter struct {\n\t\/\/ listener that received the packet\n\tconn net.PacketConn\n\taddr net.Addr\n}\n\nfunc (r *packetResponseWriter) Write(packet *Packet) error {\n\tencoded, err := packet.Encode()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := r.conn.WriteTo(encoded, r.addr); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ PacketServer listens for RADIUS requests on a packet-based protocols (e.g.\n\/\/ UDP).\ntype PacketServer struct {\n\t\/\/ The address on which the server listens. Defaults to :1812.\n\tAddr string\n\n\t\/\/ The network on which the server listens. Defaults to udp.\n\tNetwork string\n\n\t\/\/ The source from which the secret is obtained for parsing and validating\n\t\/\/ the request.\n\tSecretSource SecretSource\n\n\t\/\/ Handler which is called to process the request.\n\tHandler Handler\n\n\t\/\/ Skip incoming packet authenticity validation.\n\t\/\/ This should only be set to true for debugging purposes.\n\tInsecureSkipVerify bool\n\n\tshutdownRequested int32\n\n\tmu sync.Mutex\n\tctx context.Context\n\tctxDone context.CancelFunc\n\tlisteners map[net.PacketConn]uint\n\tlastActive chan struct{} \/\/ closed when the last active item finishes\n\tactiveCount int32\n}\n\nfunc (s *PacketServer) initLocked() {\n\tif s.ctx == nil {\n\t\ts.ctx, s.ctxDone = context.WithCancel(context.Background())\n\t\ts.listeners = make(map[net.PacketConn]uint)\n\t\ts.lastActive = make(chan struct{})\n\t}\n}\n\nfunc (s *PacketServer) activeAdd() {\n\tatomic.AddInt32(&s.activeCount, 1)\n}\n\nfunc (s *PacketServer) activeDone() {\n\tif atomic.AddInt32(&s.activeCount, -1) == -1 {\n\t\tclose(s.lastActive)\n\t}\n}\n\n\/\/ TODO: logger on PacketServer\n\n\/\/ Serve accepts incoming connections on conn.\nfunc (s *PacketServer) Serve(conn net.PacketConn) error {\n\tif s.Handler == nil {\n\t\treturn errors.New(\"radius: nil Handler\")\n\t}\n\tif s.SecretSource == nil {\n\t\treturn errors.New(\"radius: nil SecretSource\")\n\t}\n\n\ts.mu.Lock()\n\ts.initLocked()\n\tif atomic.LoadInt32(&s.shutdownRequested) == 1 {\n\t\ts.mu.Unlock()\n\t\treturn ErrServerShutdown\n\t}\n\n\ts.listeners[conn]++\n\ts.mu.Unlock()\n\n\ttype requestKey struct {\n\t\tIP string\n\t\tIdentifier byte\n\t}\n\n\tvar (\n\t\trequestsLock sync.Mutex\n\t\trequests = map[requestKey]struct{}{}\n\t)\n\n\ts.activeAdd()\n\tdefer func() {\n\t\ts.mu.Lock()\n\t\ts.listeners[conn]--\n\t\tif s.listeners[conn] == 0 {\n\t\t\tdelete(s.listeners, conn)\n\t\t}\n\t\ts.mu.Unlock()\n\t\ts.activeDone()\n\t}()\n\n\tvar buff [MaxPacketLength]byte\n\tfor {\n\t\tn, remoteAddr, err := conn.ReadFrom(buff[:])\n\t\tif err != nil {\n\t\t\tif atomic.LoadInt32(&s.shutdownRequested) == 1 {\n\t\t\t\treturn ErrServerShutdown\n\t\t\t}\n\n\t\t\tif ne, ok := err.(net.Error); ok && !ne.Temporary() {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\ts.activeAdd()\n\t\tgo func(buff []byte, remoteAddr net.Addr) {\n\t\t\tdefer s.activeDone()\n\n\t\t\tsecret, err := s.SecretSource.RADIUSSecret(s.ctx, remoteAddr)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(secret) == 0 {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !s.InsecureSkipVerify && !IsAuthenticRequest(buff, secret) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tpacket, err := Parse(buff, secret)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tkey := requestKey{\n\t\t\t\tIP: remoteAddr.String(),\n\t\t\t\tIdentifier: packet.Identifier,\n\t\t\t}\n\t\t\trequestsLock.Lock()\n\t\t\tif _, ok := requests[key]; ok {\n\t\t\t\trequestsLock.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequests[key] = struct{}{}\n\t\t\trequestsLock.Unlock()\n\n\t\t\tresponse := packetResponseWriter{\n\t\t\t\tconn: conn,\n\t\t\t\taddr: remoteAddr,\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\trequestsLock.Lock()\n\t\t\t\tdelete(requests, key)\n\t\t\t\trequestsLock.Unlock()\n\t\t\t}()\n\n\t\t\trequest := Request{\n\t\t\t\tLocalAddr: conn.LocalAddr(),\n\t\t\t\tRemoteAddr: remoteAddr,\n\t\t\t\tPacket: packet,\n\t\t\t\tctx: s.ctx,\n\t\t\t}\n\n\t\t\ts.Handler.ServeRADIUS(&response, &request)\n\t\t}(append([]byte(nil), buff[:n]...), remoteAddr)\n\t}\n}\n\n\/\/ ListenAndServe starts a RADIUS server on the address given in s.\nfunc (s *PacketServer) ListenAndServe() error {\n\tif s.Handler == nil {\n\t\treturn errors.New(\"radius: nil Handler\")\n\t}\n\tif s.SecretSource == nil {\n\t\treturn errors.New(\"radius: nil SecretSource\")\n\t}\n\n\taddrStr := \":1812\"\n\tif s.Addr != \"\" {\n\t\taddrStr = s.Addr\n\t}\n\n\tnetwork := \"udp\"\n\tif s.Network != \"\" {\n\t\tnetwork = s.Network\n\t}\n\n\tpc, err := net.ListenPacket(network, addrStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer pc.Close()\n\treturn s.Serve(pc)\n}\n\n\/\/ Shutdown gracefully stops the server. It first closes all listeners and then\n\/\/ waits for any running handlers to complete.\n\/\/\n\/\/ Shutdown returns after nil all handlers have completed. ctx.Err() is\n\/\/ returned if ctx is canceled.\n\/\/\n\/\/ Any Serve methods return ErrShutdown after Shutdown is called.\nfunc (s *PacketServer) Shutdown(ctx context.Context) error {\n\ts.mu.Lock()\n\ts.initLocked()\n\tif atomic.CompareAndSwapInt32(&s.shutdownRequested, 0, 1) {\n\t\tfor listener := range s.listeners {\n\t\t\tlistener.Close()\n\t\t}\n\n\t\ts.ctxDone()\n\t\ts.activeDone()\n\t}\n\ts.mu.Unlock()\n\n\tselect {\n\tcase <-s.lastActive:\n\t\treturn nil\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}\nadd PacketServer.ErrorLogpackage radius\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"log\"\n\t\"net\"\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\ntype packetResponseWriter struct {\n\t\/\/ listener that received the packet\n\tconn net.PacketConn\n\taddr net.Addr\n}\n\nfunc (r *packetResponseWriter) Write(packet *Packet) error {\n\tencoded, err := packet.Encode()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := r.conn.WriteTo(encoded, r.addr); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ PacketServer listens for RADIUS requests on a packet-based protocols (e.g.\n\/\/ UDP).\ntype PacketServer struct {\n\t\/\/ The address on which the server listens. Defaults to :1812.\n\tAddr string\n\n\t\/\/ The network on which the server listens. Defaults to udp.\n\tNetwork string\n\n\t\/\/ The source from which the secret is obtained for parsing and validating\n\t\/\/ the request.\n\tSecretSource SecretSource\n\n\t\/\/ Handler which is called to process the request.\n\tHandler Handler\n\n\t\/\/ Skip incoming packet authenticity validation.\n\t\/\/ This should only be set to true for debugging purposes.\n\tInsecureSkipVerify bool\n\n\t\/\/ ErrorLog specifies an optional logger for errors\n\t\/\/ around packet accepting, processing, and validation.\n\t\/\/ If nil, logging is done via the log package's standard logger.\n\tErrorLog *log.Logger\n\n\tshutdownRequested int32\n\n\tmu sync.Mutex\n\tctx context.Context\n\tctxDone context.CancelFunc\n\tlisteners map[net.PacketConn]uint\n\tlastActive chan struct{} \/\/ closed when the last active item finishes\n\tactiveCount int32\n}\n\nfunc (s *PacketServer) initLocked() {\n\tif s.ctx == nil {\n\t\ts.ctx, s.ctxDone = context.WithCancel(context.Background())\n\t\ts.listeners = make(map[net.PacketConn]uint)\n\t\ts.lastActive = make(chan struct{})\n\t}\n}\n\nfunc (s *PacketServer) activeAdd() {\n\tatomic.AddInt32(&s.activeCount, 1)\n}\n\nfunc (s *PacketServer) activeDone() {\n\tif atomic.AddInt32(&s.activeCount, -1) == -1 {\n\t\tclose(s.lastActive)\n\t}\n}\n\nfunc (s *PacketServer) logf(format string, args ...interface{}) {\n\tif s.ErrorLog != nil {\n\t\ts.ErrorLog.Printf(format, args...)\n\t} else {\n\t\tlog.Printf(format, args...)\n\t}\n}\n\n\/\/ Serve accepts incoming connections on conn.\nfunc (s *PacketServer) Serve(conn net.PacketConn) error {\n\tif s.Handler == nil {\n\t\treturn errors.New(\"radius: nil Handler\")\n\t}\n\tif s.SecretSource == nil {\n\t\treturn errors.New(\"radius: nil SecretSource\")\n\t}\n\n\ts.mu.Lock()\n\ts.initLocked()\n\tif atomic.LoadInt32(&s.shutdownRequested) == 1 {\n\t\ts.mu.Unlock()\n\t\treturn ErrServerShutdown\n\t}\n\n\ts.listeners[conn]++\n\ts.mu.Unlock()\n\n\ttype requestKey struct {\n\t\tIP string\n\t\tIdentifier byte\n\t}\n\n\tvar (\n\t\trequestsLock sync.Mutex\n\t\trequests = map[requestKey]struct{}{}\n\t)\n\n\ts.activeAdd()\n\tdefer func() {\n\t\ts.mu.Lock()\n\t\ts.listeners[conn]--\n\t\tif s.listeners[conn] == 0 {\n\t\t\tdelete(s.listeners, conn)\n\t\t}\n\t\ts.mu.Unlock()\n\t\ts.activeDone()\n\t}()\n\n\tvar buff [MaxPacketLength]byte\n\tfor {\n\t\tn, remoteAddr, err := conn.ReadFrom(buff[:])\n\t\tif err != nil {\n\t\t\tif atomic.LoadInt32(&s.shutdownRequested) == 1 {\n\t\t\t\treturn ErrServerShutdown\n\t\t\t}\n\n\t\t\tif ne, ok := err.(net.Error); ok && !ne.Temporary() {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ts.logf(\"radius: could not read packet: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\ts.activeAdd()\n\t\tgo func(buff []byte, remoteAddr net.Addr) {\n\t\t\tdefer s.activeDone()\n\n\t\t\tsecret, err := s.SecretSource.RADIUSSecret(s.ctx, remoteAddr)\n\t\t\tif err != nil {\n\t\t\t\ts.logf(\"radius: error fetching from secret source: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(secret) == 0 {\n\t\t\t\ts.logf(\"radius: empty secret returned from secret source\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !s.InsecureSkipVerify && !IsAuthenticRequest(buff, secret) {\n\t\t\t\ts.logf(\"radius: packet validation failed; bad secret\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tpacket, err := Parse(buff, secret)\n\t\t\tif err != nil {\n\t\t\t\ts.logf(\"radius: unable to parse packet: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tkey := requestKey{\n\t\t\t\tIP: remoteAddr.String(),\n\t\t\t\tIdentifier: packet.Identifier,\n\t\t\t}\n\n\t\t\trequestsLock.Lock()\n\t\t\tif _, ok := requests[key]; ok {\n\t\t\t\trequestsLock.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequests[key] = struct{}{}\n\t\t\trequestsLock.Unlock()\n\n\t\t\tresponse := packetResponseWriter{\n\t\t\t\tconn: conn,\n\t\t\t\taddr: remoteAddr,\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\trequestsLock.Lock()\n\t\t\t\tdelete(requests, key)\n\t\t\t\trequestsLock.Unlock()\n\t\t\t}()\n\n\t\t\trequest := Request{\n\t\t\t\tLocalAddr: conn.LocalAddr(),\n\t\t\t\tRemoteAddr: remoteAddr,\n\t\t\t\tPacket: packet,\n\t\t\t\tctx: s.ctx,\n\t\t\t}\n\n\t\t\ts.Handler.ServeRADIUS(&response, &request)\n\t\t}(append([]byte(nil), buff[:n]...), remoteAddr)\n\t}\n}\n\n\/\/ ListenAndServe starts a RADIUS server on the address given in s.\nfunc (s *PacketServer) ListenAndServe() error {\n\tif s.Handler == nil {\n\t\treturn errors.New(\"radius: nil Handler\")\n\t}\n\tif s.SecretSource == nil {\n\t\treturn errors.New(\"radius: nil SecretSource\")\n\t}\n\n\taddrStr := \":1812\"\n\tif s.Addr != \"\" {\n\t\taddrStr = s.Addr\n\t}\n\n\tnetwork := \"udp\"\n\tif s.Network != \"\" {\n\t\tnetwork = s.Network\n\t}\n\n\tpc, err := net.ListenPacket(network, addrStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer pc.Close()\n\treturn s.Serve(pc)\n}\n\n\/\/ Shutdown gracefully stops the server. It first closes all listeners and then\n\/\/ waits for any running handlers to complete.\n\/\/\n\/\/ Shutdown returns after nil all handlers have completed. ctx.Err() is\n\/\/ returned if ctx is canceled.\n\/\/\n\/\/ Any Serve methods return ErrShutdown after Shutdown is called.\nfunc (s *PacketServer) Shutdown(ctx context.Context) error {\n\ts.mu.Lock()\n\ts.initLocked()\n\tif atomic.CompareAndSwapInt32(&s.shutdownRequested, 0, 1) {\n\t\tfor listener := range s.listeners {\n\t\t\tlistener.Close()\n\t\t}\n\n\t\ts.ctxDone()\n\t\ts.activeDone()\n\t}\n\ts.mu.Unlock()\n\n\tselect {\n\tcase <-s.lastActive:\n\t\treturn nil\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\ntype GameEvent interface {\n\tExecute(*GameState) error\n}\n\n\/\/ create a ship\ntype CreateShipEvent struct {\n\tTime uint64\n\tId string\n\tPosition *Point\n}\n\nfunc (e *CreateShipEvent) Execute(state *GameState) error {\n\tstate.Ships[e.Id] = CreateShip(e.Id, e.Position)\n\treturn nil\n}\n\n\/\/ create an asteroid\ntype CreateAsteroidEvent struct {\n\tTime uint64\n\tId string\n\tPosition *Point\n\tAngle float64\n\tVelocity *Vector\n\tShape []*Point\n}\n\nfunc (e *CreateAsteroidEvent) Execute(state *GameState) error {\n\tstate.Asteroids[e.Id] = CreateAsteroid(e.Id, e.Position, e.Angle, e.Velocity, e.Shape)\n\treturn nil\n}\n\n\/\/ remove a ship\ntype RemoveShipEvent struct {\n\tTime uint64\n\tShipId string\n}\n\nfunc (e *RemoveShipEvent) Execute(state *GameState) error {\n\tdelete(state.Ships, e.ShipId)\n\treturn nil\n}\n\n\/\/ change ship acceleration direction\ntype ChangeAccelerationEvent struct {\n\tTime uint64\n\tShipId string\n\tDirection int8\n}\n\nfunc (e *ChangeAccelerationEvent) Execute(state *GameState) error {\n\ts := state.Ships[e.ShipId]\n\tif s == nil {\n\t\treturn GameError{\"Ship doesn't exist for player\"}\n\t}\n\n\ts.Acceleration = e.Direction\n\treturn nil\n}\n\n\/\/ change ship rotation direction\ntype ChangeRotationEvent struct {\n\tTime uint64\n\tShipId string\n\tDirection int8\n}\n\nfunc (e *ChangeRotationEvent) Execute(state *GameState) error {\n\ts := state.Ships[e.ShipId]\n\tif s == nil {\n\t\treturn GameError{\"Ship doesn't exist for player\"}\n\t}\n\n\ts.Rotation = e.Direction\n\treturn nil\n}\n\n\/\/ fire ship laser!\ntype FireEvent struct {\n\tTime uint64\n\tShipId string\n\tProjectileId string\n\tCreated uint64\n}\n\nfunc (e *FireEvent) Execute(state *GameState) error {\n\ts := state.Ships[e.ShipId]\n\tif s == nil {\n\t\treturn GameError{\"Ship doesn't exist for player\"}\n\t}\n\n\tpos := *s.Position \/\/ Clone ship position\n\tprojectile := CreateProjectile(e.ProjectileId, &pos, s.Angle, e.Created, e.ShipId)\n\tstate.Projectiles[projectile.Id] = projectile\n\treturn nil\n}\n\n\/\/ remove dead objects\ntype CleanupEvent struct {\n\tTime uint64\n}\n\nfunc (e *CleanupEvent) Execute(state *GameState) error {\n\tdead := []string{}\n\n\tfor k, v := range state.Projectiles {\n\t\tif !v.Alive {\n\t\t\tdead = append(dead, k)\n\t\t}\n\t}\n\n\tfor i := range dead {\n\t\tdelete(state.Projectiles, dead[i])\n\t}\n\n\treturn nil\n}\nChanged event fields to private, added Time() function.package main\n\ntype GameEvent interface {\n\tTime() uint64\n\tExecute(*GameState) error\n}\n\n\/\/ create a ship\ntype CreateShipEvent struct {\n\ttime uint64\n\tid string\n\tposition *Point\n}\n\nfunc (e *CreateShipEvent) Time() uint64 {\n\treturn e.time\n}\n\nfunc (e *CreateShipEvent) Execute(state *GameState) error {\n\tstate.Ships[e.id] = CreateShip(e.id, e.position)\n\treturn nil\n}\n\n\/\/ create an asteroid\ntype CreateAsteroidEvent struct {\n\ttime uint64\n\tid string\n\tposition *Point\n\tangle float64\n\tvelocity *Vector\n\tshape []*Point\n}\n\nfunc (e *CreateAsteroidEvent) Time() uint64 {\n\treturn e.time\n}\n\nfunc (e *CreateAsteroidEvent) Execute(state *GameState) error {\n\tstate.Asteroids[e.id] = CreateAsteroid(e.id, e.position, e.angle, e.velocity, e.shape)\n\treturn nil\n}\n\n\/\/ remove a ship\ntype RemoveShipEvent struct {\n\ttime uint64\n\tshipId string\n}\n\nfunc (e *RemoveShipEvent) Time() uint64 {\n\treturn e.time\n}\n\nfunc (e *RemoveShipEvent) Execute(state *GameState) error {\n\tdelete(state.Ships, e.shipId)\n\treturn nil\n}\n\n\/\/ change ship acceleration direction\ntype ChangeAccelerationEvent struct {\n\ttime uint64\n\tshipId string\n\tdirection int8\n}\n\nfunc (e *ChangeAccelerationEvent) Time() uint64 {\n\treturn e.time\n}\n\nfunc (e *ChangeAccelerationEvent) Execute(state *GameState) error {\n\ts := state.Ships[e.shipId]\n\tif s == nil {\n\t\treturn GameError{\"Ship doesn't exist for player\"}\n\t}\n\n\ts.Acceleration = e.direction\n\treturn nil\n}\n\n\/\/ change ship rotation direction\ntype ChangeRotationEvent struct {\n\ttime uint64\n\tshipId string\n\tdirection int8\n}\n\nfunc (e *ChangeRotationEvent) Time() uint64 {\n\treturn e.time\n}\n\nfunc (e *ChangeRotationEvent) Execute(state *GameState) error {\n\ts := state.Ships[e.shipId]\n\tif s == nil {\n\t\treturn GameError{\"Ship doesn't exist for player\"}\n\t}\n\n\ts.Rotation = e.direction\n\treturn nil\n}\n\n\/\/ fire ship laser!\ntype FireEvent struct {\n\ttime uint64\n\tshipId string\n\tprojectileId string\n\tCreated uint64\n}\n\nfunc (e *FireEvent) Time() uint64 {\n\treturn e.time\n}\n\nfunc (e *FireEvent) Execute(state *GameState) error {\n\ts := state.Ships[e.shipId]\n\tif s == nil {\n\t\treturn GameError{\"Ship doesn't exist for player\"}\n\t}\n\n\tpos := *s.Position \/\/ Clone ship position\n\tprojectile := CreateProjectile(e.projectileId, &pos, s.Angle, e.Created, e.shipId)\n\tstate.Projectiles[projectile.Id] = projectile\n\treturn nil\n}\n\n\/\/ remove dead objects\ntype CleanupEvent struct {\n\ttime uint64\n}\n\nfunc (e *CleanupEvent) Time() uint64 {\n\treturn e.time\n}\n\nfunc (e *CleanupEvent) Execute(state *GameState) error {\n\tdead := []string{}\n\n\tfor k, v := range state.Projectiles {\n\t\tif !v.Alive {\n\t\t\tdead = append(dead, k)\n\t\t}\n\t}\n\n\tfor i := range dead {\n\t\tdelete(state.Projectiles, dead[i])\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"package goat\n\nimport (\n\t\"fmt\"\n)\n\nconst APP = \"goat\"\n\nfunc Manager(killChan chan bool, doneChan chan int, port string) {\n\t\/\/ Launch listeners\n\tgo new(HttpListener).Listen(port)\n\tgo new(UdpListener).Listen(port)\n\n\tfmt.Println(APP, \": HTTP and UDP listeners launched on port \" + port)\n\n\tfor {\n\t\tselect {\n\t\tcase <-killChan:\n\t\t\t\/\/change this to kill workers gracefully and exit\n\t\t\tfmt.Println(\"done\")\n\t\t\tdoneChan <- 0\n\t\t\t\/\/ case freeWorker := <-ioReturn:\n\t\t}\n\t}\n}\nmanager now supports loggingpackage goat\n\nimport (\n\t\"fmt\"\n)\n\nconst APP = \"goat\"\n\nfunc Manager(killChan chan bool, doneChan chan int, port string) {\n\t\/\/ Launch listeners\n\tlogChan := make(chan string)\n\tgo new(HttpListener).Listen(port,logChan)\n\tgo new(UdpListener).Listen(port,logChan)\n\tgo goat.LogMng(doneChan,logChan)\n\n\tfmt.Println(APP, \": HTTP and UDP listeners launched on port \" + port)\n\n\tfor {\n\t\tselect {\n\t\tcase <-killChan:\n\t\t\t\/\/change this to kill workers gracefully and exit\n\t\t\tfmt.Println(\"done\")\n\t\t\tdoneChan <- 0\n\t\tcase\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package utilcmd\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/jasonpuglisi\/ircutil\"\n)\n\n\/\/ Say sends a message to a target. Function key: inami\/utilcmd.Say\nfunc Say(client *ircutil.Client, command *ircutil.Command,\n\tmessage *ircutil.Message) {\n\tircutil.SendPrivmsg(client, message.Args[0], strings.Join(message.Args[1:],\n\t\t\" \"))\n}\n\n\/\/ Init adds utilcmd's functions to the command map.\nfunc Init(cmdMap ircutil.CmdMap) {\n\tircutil.AddCommand(cmdMap, \"inami\/utilcmd.Say\", Say)\n}\nAdd basic utility commandspackage utilcmd\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/jasonpuglisi\/ircutil\"\n)\n\n\/\/ Init adds utilcmd's functions to the command map.\nfunc Init(cmdMap ircutil.CmdMap) {\n\tircutil.AddCommand(cmdMap, \"inami\/utilcmd.Nick\", Nick)\n\tircutil.AddCommand(cmdMap, \"inami\/utilcmd.Join\", Join)\n\tircutil.AddCommand(cmdMap, \"inami\/utilcmd.Part\", Part)\n\tircutil.AddCommand(cmdMap, \"inami\/utilcmd.Say\", Say)\n\tircutil.AddCommand(cmdMap, \"inami\/utilcmd.Notify\", Notify)\n\tircutil.AddCommand(cmdMap, \"inami\/utilcmd.Do\", Do)\n}\n\n\/\/ Nick updates a nickname. Function key: inami\/utilcmd.Nick\nfunc Nick(client *ircutil.Client, command *ircutil.Command,\n\tmessage *ircutil.Message) {\n\tircutil.SendNick(client, message.Args[0])\n}\n\n\/\/ Join attahces to a channel with an optional password.\n\/\/ Function key: inami\/utilcmd.Join\nfunc Join(client *ircutil.Client, command *ircutil.Command,\n\tmessage *ircutil.Message) {\n\tpass := \"\"\n\tif len(message.Args) > 1 {\n\t\tpass = message.Args[1]\n\t}\n\tircutil.SendJoin(client, message.Args[0], pass)\n}\n\n\/\/ Part detaches from a channel. Function key: inami\/utilcmd.Part\nfunc Part(client *ircutil.Client, command *ircutil.Command,\n\tmessage *ircutil.Message) {\n\tmsg := \"\"\n\tif len(message.Args) > 1 {\n\t\tmsg = strings.Join(message.Args[1:], \" \")\n\t}\n\tircutil.SendPart(client, message.Args[0], msg)\n}\n\n\/\/ Say sends a message to a target. Function key: inami\/utilcmd.Say\nfunc Say(client *ircutil.Client, command *ircutil.Command,\n\tmessage *ircutil.Message) {\n\tircutil.SendPrivmsg(client, message.Args[0], strings.Join(message.Args[1:],\n\t\t\" \"))\n}\n\n\/\/ Notify sends a notice to a target. Function key: inami\/utilcmd.Notify\nfunc Notify(client *ircutil.Client, command *ircutil.Command,\n\tmessage *ircutil.Message) {\n\tircutil.SendNotice(client, message.Args[0], strings.Join(message.Args[1:],\n\t\t\" \"))\n}\n\n\/\/ Do performs an action at a target. Function key: inami\/utilcmd.Do\nfunc Do(client *ircutil.Client, command *ircutil.Command,\n\tmessage *ircutil.Message) {\n\tircutil.SendPrivmsg(client, message.Args[0], \"\\x01ACTION \"+\n\t\tstrings.Join(message.Args[1:], \" \")+\"\\x01\")\n}\n<|endoftext|>"} {"text":"package utils\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"net\/http\"\r\n\t\"time\"\r\n)\r\n\r\nconst (\r\n\t\/\/ ISODate - iso date format\r\n\tISODate string = \"2006-01-02\"\r\n\t\/\/ ISODateTime - iso date time format\r\n\tISODateTime string = \"2006-01-02 15:04:05\"\r\n\t\/\/ ISODateTimestamp - iso timestamp format\r\n\tISODateTimestamp string = \"2006-01-02 15:04:05.000\"\r\n\t\/\/ ISODateTimeZ - iso datetime with timezone format\r\n\tISODateTimeZ string = \"2006-01-02 15:04:05Z07:00\"\r\n\t\/\/ ISODateTimestampZ - iso timestamp with timezone format\r\n\tISODateTimestampZ string = \"2006-01-02 15:04:05.000Z07:00\"\r\n\t\/\/ DMY - dd\/MM\/yyyy\r\n\tDMY string = \"02\/01\/2006\"\r\n\t\/\/ DMYTime - dd\/MM\/yyyy HH:m:ss\r\n\tDMYTime string = \"02\/01\/2006 15:04:05\"\r\n\t\/\/ UTCDate - date at midnight UTC\r\n\tUTCDate string = \"UTCDate\"\r\n\t\/\/ UTCDateTime - ISODateTime at UTC\r\n\tUTCDateTime string = \"UTC\"\r\n\t\/\/ UTCDateTimestamp - ISODateTimestamp at UTC\r\n\tUTCDateTimestamp string = \"UTCTimestamp\"\r\n\t\/\/ DateOffset - time zone offset\r\n\tDateOffset string = \"Z07:00\"\r\n\t\/\/ RSSDateTime - rss date time format\r\n\tRSSDateTime string = \"Mon, _2 Jan 2006 15:04:05 Z07:00\"\r\n\t\/\/ RSSDateTimeTZ - rss date time format with named timezone\r\n\tRSSDateTimeTZ string = \"Mon, _2 Jan 2006 15:04:05 MST\"\r\n)\r\n\r\n\/\/ IsISODate - checks if is in iso date format\r\nfunc IsISODate(sval string) bool {\r\n\t_, err := String2date(sval, ISODate)\r\n\r\n\tif err != nil {\r\n\t\treturn false\r\n\t}\r\n\r\n\treturn true\r\n}\r\n\r\n\/\/ IsISODateTime - checks if is in iso datetime format\r\nfunc IsISODateTime(sval string) bool {\r\n\t_, err := String2date(sval, ISODateTime)\r\n\r\n\tif err != nil {\r\n\t\treturn false\r\n\t}\r\n\r\n\treturn true\r\n}\r\n\r\n\/\/ DateFromISODateTime - Date From ISODateTime\r\nfunc DateFromISODateTime(sval string) (time.Time, error) {\r\n\treturn String2date(sval, ISODateTime)\r\n}\r\n\r\n\/\/ Date2string - Date to string\r\nfunc Date2string(val time.Time, format string) string {\r\n\tswitch format {\r\n\tcase ISODate, ISODateTime, ISODateTimestamp, ISODateTimeZ, ISODateTimestampZ, DMY, DMYTime:\r\n\t\treturn val.Format(format)\r\n\tcase UTCDate:\r\n\t\treturn val.UTC().Format(ISODate)\r\n\tcase UTCDateTime:\r\n\t\treturn val.UTC().Format(ISODateTimeZ)\r\n\tcase UTCDateTimestamp:\r\n\t\treturn val.UTC().Format(ISODateTimestampZ)\r\n\tcase RSSDateTime:\r\n\t\treturn val.UTC().Format(RSSDateTime)\r\n\tcase RSSDateTimeTZ:\r\n\t\treturn val.Format(RSSDateTimeTZ)\r\n\tdefault:\r\n\t\treturn \"\"\r\n\t}\r\n\r\n}\r\n\r\n\/\/ String2dateNoErr - String to date NoErrCheck\r\nfunc String2dateNoErr(sval string, format string) time.Time {\r\n\tdt, err := String2date(sval, format)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\treturn dt\r\n}\r\n\r\n\/\/ String2date - String to date\r\nfunc String2date(sval string, format string) (time.Time, error) {\r\n\tswitch format {\r\n\tcase ISODate, ISODateTime, ISODateTimestamp, ISODateTimeZ, ISODateTimestampZ, DMY, DMYTime, DateOffset:\r\n\t\tloc, err := time.LoadLocation(\"Local\")\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\r\n\t\tt, err := time.ParseInLocation(format, sval, loc)\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\t\treturn t, nil\r\n\tcase UTCDate:\r\n\t\tloc, err := time.LoadLocation(\"UTC\")\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\r\n\t\tt, err := time.ParseInLocation(ISODate, sval, loc)\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\t\treturn t, nil\r\n\tcase UTCDateTime:\r\n\t\tloc, err := time.LoadLocation(\"UTC\")\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\r\n\t\tt, err := time.ParseInLocation(ISODateTime, sval, loc)\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\t\treturn t, nil\r\n\tcase UTCDateTimestamp:\r\n\t\tloc, err := time.LoadLocation(\"UTC\")\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\r\n\t\tt, err := time.ParseInLocation(ISODateTimestamp, sval, loc)\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\t\treturn t, nil\r\n\tcase RSSDateTime:\r\n\t\tloc, err := time.LoadLocation(\"UTC\")\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\r\n\t\tt, err := time.ParseInLocation(RSSDateTime, sval, loc)\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\t\treturn t, nil\r\n\tcase RSSDateTimeTZ:\r\n\t\tloc, err := time.LoadLocation(\"UTC\")\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\r\n\t\tt, err := time.ParseInLocation(RSSDateTimeTZ, sval, loc)\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\t\treturn t, nil\r\n\tdefault:\r\n\t\treturn time.Now(), fmt.Errorf(\"Unknown datetime format \\\"%s\\\"\", format)\r\n\t}\r\n}\r\n\r\n\/\/ Server2ClientDmy - Server2ClientDmy\r\nfunc Server2ClientDmy(r *http.Request, serverTime time.Time) string {\r\n\tt := Server2ClientLocal(r, serverTime)\r\n\treturn Date2string(t, DMY)\r\n}\r\n\r\n\/\/ Server2ClientDmyTime - Server2ClientDmyTime\r\nfunc Server2ClientDmyTime(r *http.Request, serverTime time.Time) string {\r\n\tt := Server2ClientLocal(r, serverTime)\r\n\treturn Date2string(t, DMYTime)\r\n}\r\n\r\n\/\/ Server2ClientLocal - Server2ClientLocal\r\nfunc Server2ClientLocal(r *http.Request, serverTime time.Time) time.Time {\r\n\ttimeOffset := 0\r\n\r\n\tcookie, err := r.Cookie(\"time_zone_offset\")\r\n\tif err != nil && err != http.ErrNoCookie {\r\n\t\treturn serverTime.UTC()\r\n\t} else if err == http.ErrNoCookie {\r\n\t\ttimeOffset = 0\r\n\t} else {\r\n\t\ttimeOffset = String2int(cookie.Value)\r\n\t}\r\n\r\n\treturn serverTime.UTC().Add(time.Duration(-1*timeOffset) * time.Minute)\r\n}\r\nParse RSS datepackage utils\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"net\/http\"\r\n\t\"time\"\r\n)\r\n\r\nconst (\r\n\t\/\/ ISODate - iso date format\r\n\tISODate string = \"2006-01-02\"\r\n\t\/\/ ISODateTime - iso date time format\r\n\tISODateTime string = \"2006-01-02 15:04:05\"\r\n\t\/\/ ISODateTimestamp - iso timestamp format\r\n\tISODateTimestamp string = \"2006-01-02 15:04:05.000\"\r\n\t\/\/ ISODateTimeZ - iso datetime with timezone format\r\n\tISODateTimeZ string = \"2006-01-02 15:04:05Z07:00\"\r\n\t\/\/ ISODateTimestampZ - iso timestamp with timezone format\r\n\tISODateTimestampZ string = \"2006-01-02 15:04:05.000Z07:00\"\r\n\t\/\/ DMY - dd\/MM\/yyyy\r\n\tDMY string = \"02\/01\/2006\"\r\n\t\/\/ DMYTime - dd\/MM\/yyyy HH:m:ss\r\n\tDMYTime string = \"02\/01\/2006 15:04:05\"\r\n\t\/\/ UTCDate - date at midnight UTC\r\n\tUTCDate string = \"UTCDate\"\r\n\t\/\/ UTCDateTime - ISODateTime at UTC\r\n\tUTCDateTime string = \"UTC\"\r\n\t\/\/ UTCDateTimestamp - ISODateTimestamp at UTC\r\n\tUTCDateTimestamp string = \"UTCTimestamp\"\r\n\t\/\/ DateOffset - time zone offset\r\n\tDateOffset string = \"Z07:00\"\r\n\t\/\/ RSSDateTime - rss date time format\r\n\tRSSDateTime string = \"Mon, 02 Jan 2006 15:04:05 Z07:00\"\r\n\t\/\/ RSSDateTime1 - rss date time format\r\n\tRSSDateTime1 string = \"Mon, _2 Jan 2006 15:04:05 Z07:00\"\r\n\t\/\/ RSSDateTime2 - rss date time format 2\r\n\tRSSDateTime2 string = \"Mon, 02 Jan 2006 15:04:05 Z0700\"\r\n\t\/\/ RSSDateTime3 - rss date time format 2\r\n\tRSSDateTime3 string = \"Mon, _2 Jan 2006 15:04:05 Z0700\"\r\n\t\/\/ RSSDateTimeTZ - rss date time format with named timezone\r\n\tRSSDateTimeTZ string = \"Mon, 02 Jan 2006 15:04:05 MST\"\r\n\t\/\/ RSSDateTimeTZ1 - rss date time format with named timezone\r\n\tRSSDateTimeTZ1 string = \"Mon, _2 Jan 2006 15:04:05 MST\"\r\n)\r\n\r\n\/\/ IsISODate - checks if is in iso date format\r\nfunc IsISODate(sval string) bool {\r\n\t_, err := String2date(sval, ISODate)\r\n\r\n\tif err != nil {\r\n\t\treturn false\r\n\t}\r\n\r\n\treturn true\r\n}\r\n\r\n\/\/ IsISODateTime - checks if is in iso datetime format\r\nfunc IsISODateTime(sval string) bool {\r\n\t_, err := String2date(sval, ISODateTime)\r\n\r\n\tif err != nil {\r\n\t\treturn false\r\n\t}\r\n\r\n\treturn true\r\n}\r\n\r\n\/\/ DateFromISODateTime - Date From ISODateTime\r\nfunc DateFromISODateTime(sval string) (time.Time, error) {\r\n\treturn String2date(sval, ISODateTime)\r\n}\r\n\r\n\/\/ Date2string - Date to string\r\nfunc Date2string(val time.Time, format string) string {\r\n\tswitch format {\r\n\tcase ISODate, ISODateTime, ISODateTimestamp, ISODateTimeZ, ISODateTimestampZ, DMY, DMYTime:\r\n\t\treturn val.Format(format)\r\n\tcase UTCDate:\r\n\t\treturn val.UTC().Format(ISODate)\r\n\tcase UTCDateTime:\r\n\t\treturn val.UTC().Format(ISODateTimeZ)\r\n\tcase UTCDateTimestamp:\r\n\t\treturn val.UTC().Format(ISODateTimestampZ)\r\n\tcase RSSDateTime:\r\n\t\treturn val.UTC().Format(RSSDateTime)\r\n\tcase RSSDateTimeTZ:\r\n\t\treturn val.Format(RSSDateTimeTZ)\r\n\tdefault:\r\n\t\treturn \"\"\r\n\t}\r\n\r\n}\r\n\r\n\/\/ String2dateNoErr - String to date NoErrCheck\r\nfunc String2dateNoErr(sval string, format string) time.Time {\r\n\tdt, err := String2date(sval, format)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\treturn dt\r\n}\r\n\r\n\/\/ String2date - String to date\r\nfunc String2date(sval string, format string) (time.Time, error) {\r\n\tswitch format {\r\n\tcase ISODate, ISODateTime, ISODateTimestamp, ISODateTimeZ, ISODateTimestampZ, DMY, DMYTime, DateOffset:\r\n\t\tloc, err := time.LoadLocation(\"Local\")\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\r\n\t\tt, err := time.ParseInLocation(format, sval, loc)\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\t\treturn t, nil\r\n\tcase UTCDate:\r\n\t\tloc, err := time.LoadLocation(\"UTC\")\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\r\n\t\tt, err := time.ParseInLocation(ISODate, sval, loc)\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\t\treturn t, nil\r\n\tcase UTCDateTime:\r\n\t\tloc, err := time.LoadLocation(\"UTC\")\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\r\n\t\tt, err := time.ParseInLocation(ISODateTime, sval, loc)\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\t\treturn t, nil\r\n\tcase UTCDateTimestamp:\r\n\t\tloc, err := time.LoadLocation(\"UTC\")\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\r\n\t\tt, err := time.ParseInLocation(ISODateTimestamp, sval, loc)\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\t\treturn t, nil\r\n\tcase RSSDateTime:\r\n\t\tloc, err := time.LoadLocation(\"UTC\")\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\r\n\t\tt, err := time.ParseInLocation(RSSDateTime, sval, loc)\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\t\treturn t, nil\r\n\tcase RSSDateTimeTZ:\r\n\t\tloc, err := time.LoadLocation(\"UTC\")\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\r\n\t\tt, err := time.ParseInLocation(RSSDateTimeTZ, sval, loc)\r\n\t\tif err != nil {\r\n\t\t\treturn time.Now(), err\r\n\t\t}\r\n\t\treturn t, nil\r\n\tdefault:\r\n\t\treturn time.Now(), fmt.Errorf(\"Unknown datetime format \\\"%s\\\"\", format)\r\n\t}\r\n}\r\n\r\n\/\/ Server2ClientDmy - Server2ClientDmy\r\nfunc Server2ClientDmy(r *http.Request, serverTime time.Time) string {\r\n\tt := Server2ClientLocal(r, serverTime)\r\n\treturn Date2string(t, DMY)\r\n}\r\n\r\n\/\/ Server2ClientDmyTime - Server2ClientDmyTime\r\nfunc Server2ClientDmyTime(r *http.Request, serverTime time.Time) string {\r\n\tt := Server2ClientLocal(r, serverTime)\r\n\treturn Date2string(t, DMYTime)\r\n}\r\n\r\n\/\/ Server2ClientLocal - Server2ClientLocal\r\nfunc Server2ClientLocal(r *http.Request, serverTime time.Time) time.Time {\r\n\ttimeOffset := 0\r\n\r\n\tcookie, err := r.Cookie(\"time_zone_offset\")\r\n\tif err != nil && err != http.ErrNoCookie {\r\n\t\treturn serverTime.UTC()\r\n\t} else if err == http.ErrNoCookie {\r\n\t\ttimeOffset = 0\r\n\t} else {\r\n\t\ttimeOffset = String2int(cookie.Value)\r\n\t}\r\n\r\n\treturn serverTime.UTC().Add(time.Duration(-1*timeOffset) * time.Minute)\r\n}\r\n\r\n\/\/ ParseRSSDate - try to parse RSS date in multiple formats\r\nfunc ParseRSSDate(sdate string) (time.Time, error) {\r\n\tvar err error\r\n\tvar dt time.Time\r\n\r\n\tformats := []string{\r\n\t\tRSSDateTimeTZ,\r\n\t\tRSSDateTimeTZ1,\r\n\t\tRSSDateTime,\r\n\t\tRSSDateTime1,\r\n\t\tRSSDateTime2,\r\n\t\tRSSDateTime3,\r\n\t}\r\n\r\n\tfor _, format := range formats {\r\n\t\tdt, err = String2date(sdate, format)\r\n\t\tif err == nil {\r\n\t\t\tbreak\r\n\t\t}\r\n\t}\r\n\r\n\treturn dt.UTC(), err\r\n}\r\n<|endoftext|>"} {"text":"package utils\n\nimport (\n\t\"bytes\"\n\tcrand \"crypto\/rand\"\n\t\"errors\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/jpeg\"\n\t\"image\/png\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/eirka\/eirka-libs\/config\"\n\tlocal \"github.com\/eirka\/eirka-post\/config\"\n)\n\nfunc testPng(size int) *bytes.Buffer {\n\n\toutput := new(bytes.Buffer)\n\n\tmyimage := image.NewRGBA(image.Rectangle{image.Point{0, 0}, image.Point{size, size}})\n\n\t\/\/ This loop just fills the image with random data\n\tfor x := 0; x < size; x++ {\n\t\tfor y := 0; y < size; y++ {\n\t\t\tc := color.RGBA{uint8(rand.Intn(255)), uint8(rand.Intn(255)), uint8(rand.Intn(255)), 255}\n\t\t\tmyimage.Set(x, y, c)\n\t\t}\n\t}\n\n\tpng.Encode(output, myimage)\n\n\treturn output\n}\n\nfunc testJpeg(size int) *bytes.Buffer {\n\n\toutput := new(bytes.Buffer)\n\n\tmyimage := image.NewRGBA(image.Rectangle{image.Point{0, 0}, image.Point{size, size}})\n\n\t\/\/ This loop just fills the image with random data\n\tfor x := 0; x < size; x++ {\n\t\tfor y := 0; y < size; y++ {\n\t\t\tc := color.RGBA{uint8(rand.Intn(255)), uint8(rand.Intn(255)), uint8(rand.Intn(255)), 255}\n\t\t\tmyimage.Set(x, y, c)\n\t\t}\n\t}\n\n\tjpeg.Encode(output, myimage, nil)\n\n\treturn output\n}\n\nfunc testRandom() []byte {\n\tbytes := make([]byte, 20000)\n\n\tif _, err := io.ReadFull(crand.Reader, bytes); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn bytes\n}\n\nfunc formJpegRequest(size int, filename string) *http.Request {\n\n\tvar b bytes.Buffer\n\n\tw := multipart.NewWriter(&b)\n\n\tfw, _ := w.CreateFormFile(\"file\", filename)\n\n\tio.Copy(fw, testJpeg(size))\n\n\tw.Close()\n\n\treq, _ := http.NewRequest(\"POST\", \"\/reply\", &b)\n\treq.Header.Set(\"Content-Type\", w.FormDataContentType())\n\n\treturn req\n}\n\nfunc formRandomRequest(filename string) *http.Request {\n\n\tvar b bytes.Buffer\n\n\tw := multipart.NewWriter(&b)\n\n\tfw, _ := w.CreateFormFile(\"file\", filename)\n\n\tio.Copy(fw, bytes.NewReader(testRandom()))\n\n\tw.Close()\n\n\treq, _ := http.NewRequest(\"POST\", \"\/reply\", &b)\n\treq.Header.Set(\"Content-Type\", w.FormDataContentType())\n\n\treturn req\n}\n\nfunc TestIsAllowedExt(t *testing.T) {\n\n\tassert.False(t, isAllowedExt(\".png.exe\"), \"Should not be allowed\")\n\n\tassert.False(t, isAllowedExt(\".exe.png\"), \"Should not be allowed\")\n\n\tassert.False(t, isAllowedExt(\"\"), \"Should not be allowed\")\n\n\tassert.False(t, isAllowedExt(\".\"), \"Should not be allowed\")\n\n\tassert.False(t, isAllowedExt(\".pdf\"), \"Should not be allowed\")\n\n\tassert.True(t, isAllowedExt(\".jpg\"), \"Should be allowed\")\n\n\tassert.True(t, isAllowedExt(\".JPEG\"), \"Should be allowed\")\n\n}\n\nfunc TestCheckReqGoodExt(t *testing.T) {\n\n\tvar err error\n\n\treq := formJpegRequest(300, \"test.jpeg\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr = img.checkReqExt()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n}\n\nfunc TestCheckReqBadExt(t *testing.T) {\n\n\treq := formJpegRequest(300, \"test.crap\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.checkReqExt()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"format not supported\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestCheckReqBadExtExploit1(t *testing.T) {\n\n\treq := formRandomRequest(\"test.exe.png\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.checkReqExt()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n\terr = img.getMD5()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.NotEmpty(t, img.MD5, \"MD5 should be returned\")\n\t}\n\n\terr = img.checkMagic()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"unknown file type\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestCheckReqBadExtExploit2(t *testing.T) {\n\n\treq := formRandomRequest(\"test.png.exe\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.checkReqExt()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"format not supported\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestCheckReqNoExt(t *testing.T) {\n\n\treq := formJpegRequest(300, \"test\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.checkReqExt()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"no file extension\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestGetMD5(t *testing.T) {\n\n\treq := formJpegRequest(300, \"test.jpeg\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.getMD5()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.NotEmpty(t, img.MD5, \"MD5 should be returned\")\n\t}\n\n}\n\nfunc TestCheckMagicGood(t *testing.T) {\n\n\treq := formJpegRequest(300, \"test.jpeg\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.getMD5()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.NotEmpty(t, img.MD5, \"MD5 should be returned\")\n\t}\n\n\terr = img.checkMagic()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.Equal(t, img.mime, \"image\/jpeg\", \"Mime type should be the same\")\n\t}\n\n}\n\nfunc TestCheckMagicBad(t *testing.T) {\n\n\treq := formRandomRequest(\"test.jpeg\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.getMD5()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.NotEmpty(t, img.MD5, \"MD5 should be returned\")\n\t}\n\n\terr = img.checkMagic()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"unknown file type\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestGetStatsGoodPng(t *testing.T) {\n\n\tconfig.Settings.Limits.ImageMaxWidth = 1000\n\tconfig.Settings.Limits.ImageMinWidth = 100\n\tconfig.Settings.Limits.ImageMaxHeight = 1000\n\tconfig.Settings.Limits.ImageMinHeight = 100\n\tconfig.Settings.Limits.ImageMaxSize = 3000000\n\n\timg := ImageType{}\n\n\timg.image = testPng(400)\n\n\terr := img.getStats()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n}\n\nfunc TestGetStatsGoodJpeg(t *testing.T) {\n\n\tconfig.Settings.Limits.ImageMaxWidth = 1000\n\tconfig.Settings.Limits.ImageMinWidth = 100\n\tconfig.Settings.Limits.ImageMaxHeight = 1000\n\tconfig.Settings.Limits.ImageMinHeight = 100\n\tconfig.Settings.Limits.ImageMaxSize = 3000000\n\n\timg := ImageType{}\n\n\timg.image = testJpeg(400)\n\n\terr := img.getStats()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n}\n\nfunc TestGetStatsBadSize(t *testing.T) {\n\n\tconfig.Settings.Limits.ImageMaxWidth = 1000\n\tconfig.Settings.Limits.ImageMinWidth = 100\n\tconfig.Settings.Limits.ImageMaxHeight = 1000\n\tconfig.Settings.Limits.ImageMinHeight = 100\n\tconfig.Settings.Limits.ImageMaxSize = 3000\n\n\timg := ImageType{}\n\n\timg.image = testPng(400)\n\n\terr := img.getStats()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"image size too large\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestGetStatsBadMin(t *testing.T) {\n\n\tconfig.Settings.Limits.ImageMaxWidth = 1000\n\tconfig.Settings.Limits.ImageMinWidth = 100\n\tconfig.Settings.Limits.ImageMaxHeight = 1000\n\tconfig.Settings.Limits.ImageMinHeight = 100\n\tconfig.Settings.Limits.ImageMaxSize = 300000\n\n\timg := ImageType{}\n\n\timg.image = testPng(50)\n\n\terr := img.getStats()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"image width too small\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestGetStatsBadMax(t *testing.T) {\n\n\tconfig.Settings.Limits.ImageMaxWidth = 1000\n\tconfig.Settings.Limits.ImageMinWidth = 100\n\tconfig.Settings.Limits.ImageMaxHeight = 1000\n\tconfig.Settings.Limits.ImageMinHeight = 100\n\tconfig.Settings.Limits.ImageMaxSize = 300000\n\n\timg := ImageType{}\n\n\timg.image = testPng(1200)\n\n\terr := img.getStats()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"image width too large\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestMakeFilenames(t *testing.T) {\n\n\timg := ImageType{}\n\n\timg.makeFilenames()\n\n\tassert.NotEmpty(t, img.Filename, \"Filename should be returned\")\n\n\tassert.NotEmpty(t, img.Thumbnail, \"Thumbnail name should be returned\")\n\n}\n\nfunc TestSaveFile(t *testing.T) {\n\n\treq := formJpegRequest(300, \"test.jpeg\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.ProcessFile()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.NotEmpty(t, img.MD5, \"MD5 should be returned\")\n\t\tassert.Equal(t, img.Ext, \".jpg\", \"Ext should be the same\")\n\t\tassert.Equal(t, img.mime, \"image\/jpeg\", \"Mime type should be the same\")\n\t}\n\n\tfilesize := img.image.Len()\n\n\terr = img.getStats()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.Equal(t, img.OrigHeight, 300, \"Height should be the same\")\n\t\tassert.Equal(t, img.OrigWidth, 300, \"Width should be the same\")\n\t}\n\n\terr = img.saveFile()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.NotEmpty(t, img.Filename, \"Filename should be returned\")\n\t\tassert.NotEmpty(t, img.Thumbnail, \"Thumbnail name should be returned\")\n\t}\n\n\tfile, err = os.Open(filepath.Join(local.Settings.Directories.ImageDir, img.Filename))\n\tassert.NoError(t, err, \"An error was not expected\")\n\n\tinfo, err = file.Stat()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.Equal(t, info.Name(), img.File, \"Name should be the same\")\n\t\tassert.Equal(t, info.Size(), filesize, \"Size should be the same\")\n\t}\n}\nadd tests and change image functions a bitpackage utils\n\nimport (\n\t\"bytes\"\n\tcrand \"crypto\/rand\"\n\t\"errors\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/jpeg\"\n\t\"image\/png\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/eirka\/eirka-libs\/config\"\n\tlocal \"github.com\/eirka\/eirka-post\/config\"\n)\n\nfunc testPng(size int) *bytes.Buffer {\n\n\toutput := new(bytes.Buffer)\n\n\tmyimage := image.NewRGBA(image.Rectangle{image.Point{0, 0}, image.Point{size, size}})\n\n\t\/\/ This loop just fills the image with random data\n\tfor x := 0; x < size; x++ {\n\t\tfor y := 0; y < size; y++ {\n\t\t\tc := color.RGBA{uint8(rand.Intn(255)), uint8(rand.Intn(255)), uint8(rand.Intn(255)), 255}\n\t\t\tmyimage.Set(x, y, c)\n\t\t}\n\t}\n\n\tpng.Encode(output, myimage)\n\n\treturn output\n}\n\nfunc testJpeg(size int) *bytes.Buffer {\n\n\toutput := new(bytes.Buffer)\n\n\tmyimage := image.NewRGBA(image.Rectangle{image.Point{0, 0}, image.Point{size, size}})\n\n\t\/\/ This loop just fills the image with random data\n\tfor x := 0; x < size; x++ {\n\t\tfor y := 0; y < size; y++ {\n\t\t\tc := color.RGBA{uint8(rand.Intn(255)), uint8(rand.Intn(255)), uint8(rand.Intn(255)), 255}\n\t\t\tmyimage.Set(x, y, c)\n\t\t}\n\t}\n\n\tjpeg.Encode(output, myimage, nil)\n\n\treturn output\n}\n\nfunc testRandom() []byte {\n\tbytes := make([]byte, 20000)\n\n\tif _, err := io.ReadFull(crand.Reader, bytes); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn bytes\n}\n\nfunc formJpegRequest(size int, filename string) *http.Request {\n\n\tvar b bytes.Buffer\n\n\tw := multipart.NewWriter(&b)\n\n\tfw, _ := w.CreateFormFile(\"file\", filename)\n\n\tio.Copy(fw, testJpeg(size))\n\n\tw.Close()\n\n\treq, _ := http.NewRequest(\"POST\", \"\/reply\", &b)\n\treq.Header.Set(\"Content-Type\", w.FormDataContentType())\n\n\treturn req\n}\n\nfunc formRandomRequest(filename string) *http.Request {\n\n\tvar b bytes.Buffer\n\n\tw := multipart.NewWriter(&b)\n\n\tfw, _ := w.CreateFormFile(\"file\", filename)\n\n\tio.Copy(fw, bytes.NewReader(testRandom()))\n\n\tw.Close()\n\n\treq, _ := http.NewRequest(\"POST\", \"\/reply\", &b)\n\treq.Header.Set(\"Content-Type\", w.FormDataContentType())\n\n\treturn req\n}\n\nfunc TestIsAllowedExt(t *testing.T) {\n\n\tassert.False(t, isAllowedExt(\".png.exe\"), \"Should not be allowed\")\n\n\tassert.False(t, isAllowedExt(\".exe.png\"), \"Should not be allowed\")\n\n\tassert.False(t, isAllowedExt(\"\"), \"Should not be allowed\")\n\n\tassert.False(t, isAllowedExt(\".\"), \"Should not be allowed\")\n\n\tassert.False(t, isAllowedExt(\".pdf\"), \"Should not be allowed\")\n\n\tassert.True(t, isAllowedExt(\".jpg\"), \"Should be allowed\")\n\n\tassert.True(t, isAllowedExt(\".JPEG\"), \"Should be allowed\")\n\n}\n\nfunc TestCheckReqGoodExt(t *testing.T) {\n\n\tvar err error\n\n\treq := formJpegRequest(300, \"test.jpeg\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr = img.checkReqExt()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n}\n\nfunc TestCheckReqBadExt(t *testing.T) {\n\n\treq := formJpegRequest(300, \"test.crap\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.checkReqExt()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"format not supported\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestCheckReqBadExtExploit1(t *testing.T) {\n\n\treq := formRandomRequest(\"test.exe.png\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.checkReqExt()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n\terr = img.getMD5()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.NotEmpty(t, img.MD5, \"MD5 should be returned\")\n\t}\n\n\terr = img.checkMagic()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"unknown file type\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestCheckReqBadExtExploit2(t *testing.T) {\n\n\treq := formRandomRequest(\"test.png.exe\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.checkReqExt()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"format not supported\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestCheckReqNoExt(t *testing.T) {\n\n\treq := formJpegRequest(300, \"test\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.checkReqExt()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"no file extension\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestGetMD5(t *testing.T) {\n\n\treq := formJpegRequest(300, \"test.jpeg\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.getMD5()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.NotEmpty(t, img.MD5, \"MD5 should be returned\")\n\t}\n\n}\n\nfunc TestCheckMagicGood(t *testing.T) {\n\n\treq := formJpegRequest(300, \"test.jpeg\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.getMD5()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.NotEmpty(t, img.MD5, \"MD5 should be returned\")\n\t}\n\n\terr = img.checkMagic()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.Equal(t, img.mime, \"image\/jpeg\", \"Mime type should be the same\")\n\t}\n\n}\n\nfunc TestCheckMagicBad(t *testing.T) {\n\n\treq := formRandomRequest(\"test.jpeg\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.getMD5()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.NotEmpty(t, img.MD5, \"MD5 should be returned\")\n\t}\n\n\terr = img.checkMagic()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"unknown file type\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestGetStatsGoodPng(t *testing.T) {\n\n\tconfig.Settings.Limits.ImageMaxWidth = 1000\n\tconfig.Settings.Limits.ImageMinWidth = 100\n\tconfig.Settings.Limits.ImageMaxHeight = 1000\n\tconfig.Settings.Limits.ImageMinHeight = 100\n\tconfig.Settings.Limits.ImageMaxSize = 3000000\n\n\timg := ImageType{}\n\n\timg.image = testPng(400)\n\n\terr := img.getStats()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n}\n\nfunc TestGetStatsGoodJpeg(t *testing.T) {\n\n\tconfig.Settings.Limits.ImageMaxWidth = 1000\n\tconfig.Settings.Limits.ImageMinWidth = 100\n\tconfig.Settings.Limits.ImageMaxHeight = 1000\n\tconfig.Settings.Limits.ImageMinHeight = 100\n\tconfig.Settings.Limits.ImageMaxSize = 3000000\n\n\timg := ImageType{}\n\n\timg.image = testJpeg(400)\n\n\terr := img.getStats()\n\tassert.NoError(t, err, \"An error was not expected\")\n\n}\n\nfunc TestGetStatsBadSize(t *testing.T) {\n\n\tconfig.Settings.Limits.ImageMaxWidth = 1000\n\tconfig.Settings.Limits.ImageMinWidth = 100\n\tconfig.Settings.Limits.ImageMaxHeight = 1000\n\tconfig.Settings.Limits.ImageMinHeight = 100\n\tconfig.Settings.Limits.ImageMaxSize = 3000\n\n\timg := ImageType{}\n\n\timg.image = testPng(400)\n\n\terr := img.getStats()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"image size too large\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestGetStatsBadMin(t *testing.T) {\n\n\tconfig.Settings.Limits.ImageMaxWidth = 1000\n\tconfig.Settings.Limits.ImageMinWidth = 100\n\tconfig.Settings.Limits.ImageMaxHeight = 1000\n\tconfig.Settings.Limits.ImageMinHeight = 100\n\tconfig.Settings.Limits.ImageMaxSize = 300000\n\n\timg := ImageType{}\n\n\timg.image = testPng(50)\n\n\terr := img.getStats()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"image width too small\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestGetStatsBadMax(t *testing.T) {\n\n\tconfig.Settings.Limits.ImageMaxWidth = 1000\n\tconfig.Settings.Limits.ImageMinWidth = 100\n\tconfig.Settings.Limits.ImageMaxHeight = 1000\n\tconfig.Settings.Limits.ImageMinHeight = 100\n\tconfig.Settings.Limits.ImageMaxSize = 300000\n\n\timg := ImageType{}\n\n\timg.image = testPng(1200)\n\n\terr := img.getStats()\n\tif assert.Error(t, err, \"An error was expected\") {\n\t\tassert.Equal(t, err, errors.New(\"image width too large\"), \"Error should match\")\n\t}\n\n}\n\nfunc TestMakeFilenames(t *testing.T) {\n\n\timg := ImageType{}\n\n\timg.makeFilenames()\n\n\tassert.NotEmpty(t, img.Filename, \"Filename should be returned\")\n\n\tassert.NotEmpty(t, img.Thumbnail, \"Thumbnail name should be returned\")\n\n}\n\nfunc TestSaveFile(t *testing.T) {\n\n\treq := formJpegRequest(300, \"test.jpeg\")\n\n\timg := ImageType{}\n\n\timg.File, img.Header, _ = req.FormFile(\"file\")\n\n\terr := img.ProcessFile()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.NotEmpty(t, img.MD5, \"MD5 should be returned\")\n\t\tassert.Equal(t, img.Ext, \".jpg\", \"Ext should be the same\")\n\t\tassert.Equal(t, img.mime, \"image\/jpeg\", \"Mime type should be the same\")\n\t}\n\n\tfilesize := img.image.Len()\n\n\terr = img.getStats()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.Equal(t, img.OrigHeight, 300, \"Height should be the same\")\n\t\tassert.Equal(t, img.OrigWidth, 300, \"Width should be the same\")\n\t}\n\n\terr = img.saveFile()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.NotEmpty(t, img.Filename, \"Filename should be returned\")\n\t\tassert.NotEmpty(t, img.Thumbnail, \"Thumbnail name should be returned\")\n\t}\n\n\tfile, err := os.Open(filepath.Join(local.Settings.Directories.ImageDir, img.Filename))\n\tassert.NoError(t, err, \"An error was not expected\")\n\n\tinfo, err = file.Stat()\n\tif assert.NoError(t, err, \"An error was not expected\") {\n\t\tassert.Equal(t, info.Name(), img.File, \"Name should be the same\")\n\t\tassert.Equal(t, info.Size(), filesize, \"Size should be the same\")\n\t}\n}\n<|endoftext|>"} {"text":"package log\n\nimport (\n\t\"go.uber.org\/zap\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\n\/\/ Config defines Logger configuration.\ntype Config struct {\n\tDisable bool `yaml:\"disable\"`\n\tServiceName string `yaml:\"service_name\"`\n\tPath string `yaml:\"path\"`\n\tEncoding string `yaml:\"encoding\"`\n}\n\nfunc (c Config) applyDefaults() Config {\n\tif c.Path == \"\" {\n\t\tc.Path = \"stderr\"\n\t}\n\tif c.Encoding == \"\" {\n\t\tc.Encoding = \"console\"\n\t}\n\treturn c\n}\n\n\/\/ New creates a logger that is not default.\nfunc New(c Config, fields map[string]interface{}) (*zap.Logger, error) {\n\tc = c.applyDefaults()\n\tif c.Disable {\n\t\treturn zap.NewNop(), nil\n\t}\n\tif fields == nil {\n\t\tfields = map[string]interface{}{}\n\t}\n\tif c.ServiceName != \"\" {\n\t\tfields[\"service_name\"] = c.ServiceName\n\t}\n\n\treturn zap.Config{\n\t\tLevel: zap.NewAtomicLevel(),\n\t\tSampling: &zap.SamplingConfig{\n\t\t\tInitial: 100,\n\t\t\tThereafter: 100,\n\t\t},\n\t\tEncoding: c.Encoding,\n\t\tEncoderConfig: zapcore.EncoderConfig{\n\t\t\tMessageKey: \"message\",\n\t\t\tNameKey: \"logger_name\",\n\t\t\tLevelKey: \"level\",\n\t\t\tTimeKey: \"ts\",\n\t\t\tCallerKey: \"caller\",\n\t\t\tStacktraceKey: \"stack\",\n\t\t\tEncodeLevel: zapcore.CapitalLevelEncoder,\n\t\t\tEncodeTime: zapcore.ISO8601TimeEncoder,\n\t\t\tEncodeDuration: zapcore.SecondsDurationEncoder,\n\t\t\tEncodeCaller: zapcore.ShortCallerEncoder,\n\t\t},\n\t\tOutputPaths: []string{c.Path},\n\t\tInitialFields: fields,\n\t}.Build()\n}\nDisable stacktracepackage log\n\nimport (\n\t\"go.uber.org\/zap\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\n\/\/ Config defines Logger configuration.\ntype Config struct {\n\tDisable bool `yaml:\"disable\"`\n\tServiceName string `yaml:\"service_name\"`\n\tPath string `yaml:\"path\"`\n\tEncoding string `yaml:\"encoding\"`\n}\n\nfunc (c Config) applyDefaults() Config {\n\tif c.Path == \"\" {\n\t\tc.Path = \"stderr\"\n\t}\n\tif c.Encoding == \"\" {\n\t\tc.Encoding = \"console\"\n\t}\n\treturn c\n}\n\n\/\/ New creates a logger that is not default.\nfunc New(c Config, fields map[string]interface{}) (*zap.Logger, error) {\n\tc = c.applyDefaults()\n\tif c.Disable {\n\t\treturn zap.NewNop(), nil\n\t}\n\tif fields == nil {\n\t\tfields = map[string]interface{}{}\n\t}\n\tif c.ServiceName != \"\" {\n\t\tfields[\"service_name\"] = c.ServiceName\n\t}\n\n\treturn zap.Config{\n\t\tLevel: zap.NewAtomicLevel(),\n\t\tSampling: &zap.SamplingConfig{\n\t\t\tInitial: 100,\n\t\t\tThereafter: 100,\n\t\t},\n\t\tEncoding: c.Encoding,\n\t\tEncoderConfig: zapcore.EncoderConfig{\n\t\t\tMessageKey: \"message\",\n\t\t\tNameKey: \"logger_name\",\n\t\t\tLevelKey: \"level\",\n\t\t\tTimeKey: \"ts\",\n\t\t\tCallerKey: \"caller\",\n\t\t\tEncodeLevel: zapcore.CapitalLevelEncoder,\n\t\t\tEncodeTime: zapcore.ISO8601TimeEncoder,\n\t\t\tEncodeDuration: zapcore.SecondsDurationEncoder,\n\t\t\tEncodeCaller: zapcore.ShortCallerEncoder,\n\t\t},\n\t\tDisableStacktrace: true,\n\t\tOutputPaths: []string{c.Path},\n\t\tInitialFields: fields,\n\t}.Build()\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/*\nhelloworld tracks how often a user has visited the index page.\n\nThis program demonstrates usage of the Cloud Bigtable API for the App Engine Flex environment and Go.\nInstructions for running this program are in the README.md.\n*\/\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"cloud.google.com\/go\/bigtable\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/appengine\"\n\taelog \"google.golang.org\/appengine\/log\"\n\t\"google.golang.org\/appengine\/user\"\n)\n\n\/\/ User-provided constants.\nconst (\n\tproject = \"PROJECT_ID\"\n\tinstance = \"INSTANCE\"\n)\n\nvar (\n\ttableName = \"bigtable-hello\"\n\tfamilyName = \"emails\"\n\n\t\/\/ Client is initialized by main.\n\tclient *bigtable.Client\n)\n\nfunc main() {\n\tctx := context.Background()\n\n\t\/\/ Set up admin client, tables, and column families.\n\t\/\/ NewAdminClient uses Application Default Credentials to authenticate.\n\tadminClient, err := bigtable.NewAdminClient(ctx, project, instance)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to create a table admin client. %v\", err)\n\t}\n\ttables, err := adminClient.Tables(ctx)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to fetch table list. %v\", err)\n\t}\n\tif !sliceContains(tables, tableName) {\n\t\tif err := adminClient.CreateTable(ctx, tableName); err != nil {\n\t\t\tlog.Fatalf(\"Unable to create table: %v. %v\", tableName, err)\n\t\t}\n\t}\n\ttblInfo, err := adminClient.TableInfo(ctx, tableName)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to read info for table: %v. %v\", tableName, err)\n\t}\n\tif !sliceContains(tblInfo.Families, familyName) {\n\t\tif err := adminClient.CreateColumnFamily(ctx, tableName, familyName); err != nil {\n\t\t\tlog.Fatalf(\"Unable to create column family: %v. %v\", familyName, err)\n\t\t}\n\t}\n\tadminClient.Close()\n\n\t\/\/ Set up Bigtable data operations client.\n\t\/\/ NewClient uses Application Default Credentials to authenticate.\n\tclient, err = bigtable.NewClient(ctx, project, instance)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to create data operations client. %v\", err)\n\t}\n\n\thttp.Handle(\"\/\", appHandler(mainHandler))\n\tappengine.Main() \/\/ Never returns.\n}\n\n\/\/ mainHandler tracks how many times each user has visited this page.\nfunc mainHandler(w http.ResponseWriter, r *http.Request) *appError {\n\tif r.URL.Path != \"\/\" {\n\t\thttp.NotFound(w, r)\n\t\treturn nil\n\t}\n\n\tctx := appengine.NewContext(r)\n\tu := user.Current(ctx)\n\tif u == nil {\n\t\tlogin, err := user.LoginURL(ctx, r.URL.String())\n\t\tif err != nil {\n\t\t\treturn &appError{err, \"Error finding login URL\", http.StatusInternalServerError}\n\t\t}\n\t\thttp.Redirect(w, r, login, http.StatusFound)\n\t\treturn nil\n\t}\n\tlogoutURL, err := user.LogoutURL(ctx, \"\/\")\n\tif err != nil {\n\t\treturn &appError{err, \"Error finding logout URL\", http.StatusInternalServerError}\n\t}\n\n\t\/\/ Display hello page.\n\ttbl := client.Open(tableName)\n\trmw := bigtable.NewReadModifyWrite()\n\trmw.Increment(familyName, u.Email, 1)\n\trow, err := tbl.ApplyReadModifyWrite(ctx, u.Email, rmw)\n\tif err != nil {\n\t\treturn &appError{err, \"Error applying ReadModifyWrite to row: \" + u.Email, http.StatusInternalServerError}\n\t}\n\tdata := struct {\n\t\tUsername, Logout string\n\t\tVisits uint64\n\t}{\n\t\tUsername: u.Email,\n\t\t\/\/ Retrieve the most recently edited column.\n\t\tVisits: binary.BigEndian.Uint64(row[familyName][0].Value),\n\t\tLogout: logoutURL,\n\t}\n\tvar buf bytes.Buffer\n\tif err := tmpl.Execute(&buf, data); err != nil {\n\t\treturn &appError{err, \"Error writing template\", http.StatusInternalServerError}\n\t}\n\tbuf.WriteTo(w)\n\treturn nil\n}\n\nvar tmpl = template.Must(template.New(\"\").Parse(`\n\n\n

\n{{with .Username}} Hello {{.}}{{end}}\n{{with .Logout}}Sign out<\/a>{{end}}\n\n<\/p>\n\n

\nYou have visited {{.Visits}}\n<\/p>\n\n<\/body><\/html>`))\n\n\/\/ sliceContains reports whether the provided string is present in the given slice of strings.\nfunc sliceContains(list []string, target string) bool {\n\tfor _, s := range list {\n\t\tif s == target {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ More info about this method of error handling can be found at: http:\/\/blog.golang.org\/error-handling-and-go\ntype appHandler func(http.ResponseWriter, *http.Request) *appError\n\ntype appError struct {\n\tError error\n\tMessage string\n\tCode int\n}\n\nfunc (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif e := fn(w, r); e != nil {\n\t\tctx := appengine.NewContext(r)\n\t\taelog.Errorf(ctx, \"%v\", e.Error)\n\t\thttp.Error(w, e.Message, e.Code)\n\t}\n}\ndoc: refer to the App Engine flexible env correctly\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/*\nhelloworld tracks how often a user has visited the index page.\n\nThis program demonstrates usage of the Cloud Bigtable API for App Engine flexible environment and Go.\nInstructions for running this program are in the README.md.\n*\/\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"cloud.google.com\/go\/bigtable\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/appengine\"\n\taelog \"google.golang.org\/appengine\/log\"\n\t\"google.golang.org\/appengine\/user\"\n)\n\n\/\/ User-provided constants.\nconst (\n\tproject = \"PROJECT_ID\"\n\tinstance = \"INSTANCE\"\n)\n\nvar (\n\ttableName = \"bigtable-hello\"\n\tfamilyName = \"emails\"\n\n\t\/\/ Client is initialized by main.\n\tclient *bigtable.Client\n)\n\nfunc main() {\n\tctx := context.Background()\n\n\t\/\/ Set up admin client, tables, and column families.\n\t\/\/ NewAdminClient uses Application Default Credentials to authenticate.\n\tadminClient, err := bigtable.NewAdminClient(ctx, project, instance)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to create a table admin client. %v\", err)\n\t}\n\ttables, err := adminClient.Tables(ctx)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to fetch table list. %v\", err)\n\t}\n\tif !sliceContains(tables, tableName) {\n\t\tif err := adminClient.CreateTable(ctx, tableName); err != nil {\n\t\t\tlog.Fatalf(\"Unable to create table: %v. %v\", tableName, err)\n\t\t}\n\t}\n\ttblInfo, err := adminClient.TableInfo(ctx, tableName)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to read info for table: %v. %v\", tableName, err)\n\t}\n\tif !sliceContains(tblInfo.Families, familyName) {\n\t\tif err := adminClient.CreateColumnFamily(ctx, tableName, familyName); err != nil {\n\t\t\tlog.Fatalf(\"Unable to create column family: %v. %v\", familyName, err)\n\t\t}\n\t}\n\tadminClient.Close()\n\n\t\/\/ Set up Bigtable data operations client.\n\t\/\/ NewClient uses Application Default Credentials to authenticate.\n\tclient, err = bigtable.NewClient(ctx, project, instance)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to create data operations client. %v\", err)\n\t}\n\n\thttp.Handle(\"\/\", appHandler(mainHandler))\n\tappengine.Main() \/\/ Never returns.\n}\n\n\/\/ mainHandler tracks how many times each user has visited this page.\nfunc mainHandler(w http.ResponseWriter, r *http.Request) *appError {\n\tif r.URL.Path != \"\/\" {\n\t\thttp.NotFound(w, r)\n\t\treturn nil\n\t}\n\n\tctx := appengine.NewContext(r)\n\tu := user.Current(ctx)\n\tif u == nil {\n\t\tlogin, err := user.LoginURL(ctx, r.URL.String())\n\t\tif err != nil {\n\t\t\treturn &appError{err, \"Error finding login URL\", http.StatusInternalServerError}\n\t\t}\n\t\thttp.Redirect(w, r, login, http.StatusFound)\n\t\treturn nil\n\t}\n\tlogoutURL, err := user.LogoutURL(ctx, \"\/\")\n\tif err != nil {\n\t\treturn &appError{err, \"Error finding logout URL\", http.StatusInternalServerError}\n\t}\n\n\t\/\/ Display hello page.\n\ttbl := client.Open(tableName)\n\trmw := bigtable.NewReadModifyWrite()\n\trmw.Increment(familyName, u.Email, 1)\n\trow, err := tbl.ApplyReadModifyWrite(ctx, u.Email, rmw)\n\tif err != nil {\n\t\treturn &appError{err, \"Error applying ReadModifyWrite to row: \" + u.Email, http.StatusInternalServerError}\n\t}\n\tdata := struct {\n\t\tUsername, Logout string\n\t\tVisits uint64\n\t}{\n\t\tUsername: u.Email,\n\t\t\/\/ Retrieve the most recently edited column.\n\t\tVisits: binary.BigEndian.Uint64(row[familyName][0].Value),\n\t\tLogout: logoutURL,\n\t}\n\tvar buf bytes.Buffer\n\tif err := tmpl.Execute(&buf, data); err != nil {\n\t\treturn &appError{err, \"Error writing template\", http.StatusInternalServerError}\n\t}\n\tbuf.WriteTo(w)\n\treturn nil\n}\n\nvar tmpl = template.Must(template.New(\"\").Parse(`\n\n\n

\n{{with .Username}} Hello {{.}}{{end}}\n{{with .Logout}}Sign out<\/a>{{end}}\n\n<\/p>\n\n

\nYou have visited {{.Visits}}\n<\/p>\n\n<\/body><\/html>`))\n\n\/\/ sliceContains reports whether the provided string is present in the given slice of strings.\nfunc sliceContains(list []string, target string) bool {\n\tfor _, s := range list {\n\t\tif s == target {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ More info about this method of error handling can be found at: http:\/\/blog.golang.org\/error-handling-and-go\ntype appHandler func(http.ResponseWriter, *http.Request) *appError\n\ntype appError struct {\n\tError error\n\tMessage string\n\tCode int\n}\n\nfunc (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif e := fn(w, r); e != nil {\n\t\tctx := appengine.NewContext(r)\n\t\taelog.Errorf(ctx, \"%v\", e.Error)\n\t\thttp.Error(w, e.Message, e.Code)\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2020, 2021 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Binary wrap is a test helper program for \/\/elisp:binary_test, which see.\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/google\/go-cmp\/cmp\"\n\t\"github.com\/phst\/runfiles\"\n)\n\nfunc main() {\n\tlog.Println(\"Args:\", os.Args)\n\tlog.Println(\"Environment:\", os.Environ())\n\tvar manifestFile string\n\tflag.StringVar(&manifestFile, \"manifest\", \"\", \"\")\n\tflag.Parse()\n\tif manifestFile == \"\" {\n\t\tlog.Fatal(\"--manifest is empty\")\n\t}\n\tworkspaceDir, err := runfiles.Path(\"phst_rules_elisp\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tgotArgs := flag.Args()\n\twantArgs := []string{\n\t\t\"--quick\", \"--batch\",\n\t\t\"--directory=\" + workspaceDir,\n\t\t\"--option\",\n\t\t\"elisp\/binary.cc\",\n\t\t\" \\t\\n\\r\\f äα𝐴🐈'\\\\\\\"\",\n\t\t\"\/:\/tmp\/output.dat\",\n\t}\n\tif diff := cmp.Diff(gotArgs, wantArgs); diff != \"\" {\n\t\tlog.Fatalf(\"positional arguments: -got +want:\\n%s\", diff)\n\t}\n\tjsonData, err := ioutil.ReadFile(manifestFile)\n\tif err != nil {\n\t\tlog.Fatalf(\"can’t read manifest: %s\", err)\n\t}\n\tvar gotManifest map[string]interface{}\n\tif err := json.Unmarshal(jsonData, &gotManifest); err != nil {\n\t\tlog.Fatalf(\"can’t decode manifest: %s\", err)\n\t}\n\twantManifest := map[string]interface{}{\n\t\t\"root\": \"RUNFILES_ROOT\",\n\t\t\"tags\": []interface{}{\"local\", \"mytag\"},\n\t\t\"loadPath\": []interface{}{\"phst_rules_elisp\"},\n\t\t\"inputFiles\": []interface{}{\"phst_rules_elisp\/elisp\/binary.cc\", \"phst_rules_elisp\/elisp\/binary.h\"},\n\t\t\"outputFiles\": []interface{}{\"\/tmp\/output.dat\"},\n\t}\n\tif diff := cmp.Diff(gotManifest, wantManifest); diff != \"\" {\n\t\tlog.Fatalf(\"manifest: -got +want:\\n%s\", diff)\n\t}\n}\nFix test for manifest-based runfiles\/\/ Copyright 2020, 2021 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Binary wrap is a test helper program for \/\/elisp:binary_test, which see.\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/google\/go-cmp\/cmp\"\n\t\"github.com\/phst\/runfiles\"\n)\n\nfunc main() {\n\tlog.Println(\"Args:\", os.Args)\n\tlog.Println(\"Environment:\", os.Environ())\n\tvar manifestFile string\n\tflag.StringVar(&manifestFile, \"manifest\", \"\", \"\")\n\tflag.Parse()\n\tif manifestFile == \"\" {\n\t\tlog.Fatal(\"--manifest is empty\")\n\t}\n\trunfilesLib, err := runfiles.Path(\"phst_rules_elisp\/elisp\/runfiles\/runfiles.elc\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ The load path setup depends on whether we use manifest-based or\n\t\/\/ directory-based runfiles.\n\tvar loadPathArgs []string\n\tif dir, err := runfiles.Path(\"phst_rules_elisp\"); err == nil {\n\t\t\/\/ Directory-based runfiles.\n\t\tloadPathArgs = []string{\"--directory=\" + dir}\n\t} else {\n\t\t\/\/ Manifest-based runfiles.\n\t\tloadPathArgs = []string{\n\t\t\t\"--load=\" + runfilesLib,\n\t\t\t\"--funcall=elisp\/runfiles\/install-handler\",\n\t\t\t\"--directory=\/bazel-runfile:phst_rules_elisp\",\n\t\t}\n\t}\n\tgotArgs := flag.Args()\n\twantArgs := append(\n\t\tappend([]string{\"--quick\", \"--batch\"}, loadPathArgs...),\n\t\t\"--option\",\n\t\t\"elisp\/binary.cc\",\n\t\t\" \\t\\n\\r\\f äα𝐴🐈'\\\\\\\"\",\n\t\t\"\/:\/tmp\/output.dat\",\n\t)\n\tif diff := cmp.Diff(gotArgs, wantArgs); diff != \"\" {\n\t\tlog.Fatalf(\"positional arguments: -got +want:\\n%s\", diff)\n\t}\n\tjsonData, err := ioutil.ReadFile(manifestFile)\n\tif err != nil {\n\t\tlog.Fatalf(\"can’t read manifest: %s\", err)\n\t}\n\tvar gotManifest map[string]interface{}\n\tif err := json.Unmarshal(jsonData, &gotManifest); err != nil {\n\t\tlog.Fatalf(\"can’t decode manifest: %s\", err)\n\t}\n\twantManifest := map[string]interface{}{\n\t\t\"root\": \"RUNFILES_ROOT\",\n\t\t\"tags\": []interface{}{\"local\", \"mytag\"},\n\t\t\"loadPath\": []interface{}{\"phst_rules_elisp\"},\n\t\t\"inputFiles\": []interface{}{\"phst_rules_elisp\/elisp\/binary.cc\", \"phst_rules_elisp\/elisp\/binary.h\"},\n\t\t\"outputFiles\": []interface{}{\"\/tmp\/output.dat\"},\n\t}\n\tif diff := cmp.Diff(gotManifest, wantManifest); diff != \"\" {\n\t\tlog.Fatalf(\"manifest: -got +want:\\n%s\", diff)\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n)\n\nvar (\n\tport = flag.String(\"p\", \":12345\", \"HTTP listen address\")\n\tfetcherPort = flag.String(\"f\", \":8000\", \"DFK Fetcher port\")\n\tindexContent = `\n\n\n\n\t\n\t