hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
83460a0306ce0c4d051de8ff7ec33c1611162381 | 18,428 | py | Python | stock_analysis/test_file.py | ztaylor2/stock-analysis | 57edd3a7b33a61cf3b7f68dab6b7640ccdc65a02 | [
"MIT"
] | 15 | 2017-11-03T17:30:02.000Z | 2022-01-14T06:15:06.000Z | stock_analysis/test_file.py | seekdestiny/stock-analysis | 57edd3a7b33a61cf3b7f68dab6b7640ccdc65a02 | [
"MIT"
] | 3 | 2019-12-26T16:39:17.000Z | 2021-06-01T21:56:41.000Z | stock_analysis/test_file.py | seekdestiny/stock-analysis | 57edd3a7b33a61cf3b7f68dab6b7640ccdc65a02 | [
"MIT"
] | 4 | 2018-03-01T06:35:08.000Z | 2021-04-02T16:39:12.000Z | """Tests for the Stock Analysis app."""
from __future__ import unicode_literals
from pyramid.httpexceptions import HTTPNotFound, HTTPFound, HTTPBadRequest
from datetime import datetime
import os
import pytest
""" UNIT TESTS FOR MODELS """
stock_details = {
'company': 'Yahoo',
'exchange': 'NYSE',
'ticker': 'YHOO',
'price': '18.55',
'dollar_change': '.50',
'pct_change': '2.32',
'open_price': '18.05',
'PE': '18.95',
'low': '18.05'
}
def test_(db_session):
"""Test that Portfolio constructed when stock is entered."""
from stock_analysis.models import Portfolio
assert len(db_session.query(Portfolio).all()) == 0
new_portfolio = Portfolio(
username='foobar',
stocks=stock_details['ticker']
)
db_session.add(new_portfolio)
db_session.commit()
assert len(db_session.query(Portfolio).all()) == 1
def test_to_dict_puts_all_properties_in_a_dictionary():
"""Test that all properties of a stock are in to_dict dictionary."""
assert all(prop in stock_details for prop in ['company', 'exchange',
'ticker', 'price', 'dollar_change',
'open_price', 'pct_change', 'PE', 'low'])
def test_to_html_dict_puts_all_properties_in_a_dictionary(test_entry):
"""Test that all properties of a stock are in to_html_dict dictionary."""
assert all(prop in stock_details for prop in ['company', 'exchange',
'ticker', 'price', 'dollar_change', 'pct_change',
'open_price', 'PE', 'low'])
""" UNIT TESTS FOR VIEW FUNCTIONS """
# use the testapp instead of the testapp to authenticate
# your request for testing this view
# def test_portfolio_view_returns_list(dummy_request, db_session):
# """Test that the Portfolio view function returns a list of stocks."""
# from stock_analysis.views.default import portfolio_view
# # dummy_request.authenticated_userid = 'shinners'
# dummy_request.dbsession = db_session
# response = portfolio_view(dummy_request)
# assert 'stocks' in response
# assert isinstance(response['stocks'], list)
# def test_portfolio_view_returns_stocks_in_list(testapp, add_stock):
# """Test that Portfolio view function returns entries as dicitonaries."""
# from stock_analysis.views.default import portfolio_view
# response = portfolio_view(testapp)
# assert add_stock[0].to_html_dict() in response['stocks']
# def test_portfolio_view_returns_all_stocks_in_db(testapp):
# """Test that Portfolio view function returns all entries in database."""
# from stock_analysis.views.default import portfolio_view
# from stock_analysis.models import Portfolio
# response = portfolio_view(testapp)
# query = testapp.dbsession.query(Portfolio)
# assert len(response['stocks']) == query.count()
# def test_detail_view_returns_one_entry_detail(testapp, add_stock):
# """Test that the detail view function returns the data of one entry."""
# from stock_analysis.views.default import detail_view
# testapp.matchdict['id'] = 1
# response = detail_view(testapp)
# assert add_stock[0].to_html_dict() == response['stock']
# def test_detail_view_returns_correct_stock_detail(testapp):
# """Test that the detail view function returns the correct stock data."""
# from stock_analysis.views.default import detail_view
# testapp.matchdict['company'] = 'Yahoo'
# response = detail_view(testapp)
# assert response['stock']['company'] == 'Yahoo'
# def test_detail_view_raises_httpnotfound_if_not_found(testapp):
# """Test that detail_view raises HTTPNotFound if symbol is not found."""
# from stock_analysis.views.default import detail_view
# testapp.matchdict['company'] = 'asdfaasdffrew'
# with pytest.raises(HTTPNotFound):
# detail_view(testapp)
# def test_get_symbol_returns_proper_stock_data(testapp):
# """Test that the _get_stock function returns proper stock data."""
# from stock_analysis.views.default import portfolio_view
# # from stock_analysis.models import Portfolio
# stock_details = {
# 'company': 'Yahoo',
# 'exchange': 'NYSE',
# 'ticker': 'YHOO',
# 'price': '18.55',
# 'dollar_change': '.50',
# 'pct_change': '2.32',
# 'open_price': '18.05',
# 'PE': '18.95',
# 'low': '18.05'
# }
# testapp.method = 'POST'
# testapp.POST = stock_details
# # _get_symbol(testapp)
# assert testapp.dbsession.query(portfolio_view).count() == 1
# def test_portfolio_view_post_creates_new_entry_with_given_info(testapp):
# """Test that new stock created uses POST info on portfolio_view POST."""
# from stock_analysis.views.default import portfolio_view
# from stock_analysis.models import Portfolio
# stock_details = {
# 'company': 'Yahoo',
# 'exchange': 'NYSE',
# 'ticker': 'YHOO',
# 'price': '18.55',
# 'dollar_change': '.50',
# 'pct_change': '2.32',
# 'open_price': '18.05',
# 'PE': '18.95',
# 'low': '18.05'
# }
# testapp.method = 'POST'
# testapp.POST = stock_details
# portfolio_view(testapp)
# entry = testapp.dbsession.query(Portfolio).get(1)
# assert entry.company == stock_details['company']
# # assert entry.exchange == stock_details['exchange']
# assert entry.ticker == stock_details['ticker']
# assert entry.price == stock_details['price']
# assert entry.dollar_change == stock_details['dollar_change']
# assert entry.dollar_change == stock_details['pct_change']
# assert entry.open_price == stock_details['open_price']
# assert entry.PE == stock_details['PE']
# # assert entry.low == stock_details['low']
# def test_portfolio_view_post_has_302_status_code(testapp):
# """Test that portfolio_view POST has 302 status code."""
# from stock_analysis.views.default import portfolio_view
# stock_details = {
# 'company': 'Yahoo',
# 'exchange': 'NYSE',
# 'ticker': 'YHOO',
# 'price': '18.55',
# 'dollar_change': '.50',
# 'pct_change': '2.32',
# 'open_price': '18.05',
# 'PE': '18.95',
# 'low': '18.05'
# }
# testapp.method = 'POST'
# testapp.POST = stock_details
# response = portfolio_view(testapp)
# assert response.status_code == 302
# def test_portfolio_view_post_redirects_to_portfolio_view_with_httpfound(testapp):
# """Test that portfolio_view POST redirects to portfolio view with httpfound."""
# from stock_analysis.views.default import portfolio_view
# stock_details = {
# 'company': 'Yahoo',
# 'exchange': 'NYSE',
# 'ticker': 'YHOO',
# 'price': '18.55',
# 'dollar_change': '.50',
# 'pct_change': '2.32',
# 'open_price': '18.05',
# 'PE': '18.95',
# 'low': '18.05'
# }
# testapp.method = 'POST'
# testapp.POST = stock_details
# response = portfolio_view(testapp)
# assert isinstance(response, HTTPFound)
# assert response.location == testapp.route_url('home')
# def test_portfolio_view_post_incompelete_data_is_bad_request(testapp):
# """Test that portfolio_view POST with incomplete data is invalid."""
# from stock_analysis.views.default import portfolio_view
# stock_details = {
# 'company': 'Yahoo',
# 'exchange': 'NYSE',
# 'ticker': 'YHOO',
# 'price': '18.55',
# 'dollar_change': '.50',
# 'pct_change': '2.32',
# 'open_price': '18.05',
# 'PE': '18.95',
# 'low': '18.05'
# }
# testapp.method = 'POST'
# testapp.POST = stock_details
# with pytest.raises(HTTPBadRequest):
# portfolio_view(testapp)
# def test_login_returns_only_home_page_for_unauthenticated_user(testapp):
# """Test that the login function returns only home page for unauth GET."""
# from stock_analysis.views.default import login_view
# data = {
# 'username': 'shinners',
# 'password': 'flerg'
# }
# testapp.method = 'POST'
# testapp.POST = data
# response = login_view(testapp)
# assert 'Username/password combination invalid.' == response.pop('error')
# def test_login_post_incomplete_data_is_bad_request(testapp, fill_the_db):
# """Test that login POST with incomplete data is invalid."""
# from stock_analysis.views.default import login_view
# data = {
# 'username': 'shinners',
# 'password': ''
# }
# testapp.method = 'POST'
# testapp.POST = data
# with pytest.raises(HTTPBadRequest):
# login_view(testapp)
# def test_login_post_incorrect_data_returns_dict_with_error(testapp, fill_the_db):
# """Test that login POST with incorrect data is invalid."""
# from stock_analysis.views.default import login_view
# data = {
# 'username': 'shinners',
# 'password': 'chris'
# }
# testapp.method = 'POST'
# testapp.POST = data
# response = login_view(testapp)
# assert 'error' in response
# assert 'The username and/or password are incorrect.' == response['error']
# def test_login_post_correct_data_returns_302_status_code(testapp):
# """Test that login POST with correct data gets 302 status code."""
# from stock_analysis.views.default import login_view
# data = {
# 'username': 'shinners',
# 'password': 'chris'
# }
# testapp.method = 'POST'
# testapp.POST = data
# response = login_view(testapp)
# assert response.status_code == 302
# def test_login_post_correct_data_redirects_to_portfolio_with_httpfound(testapp):
# """Test that login POST with correct data redirects to portfolio page."""
# from stock_analysis.views.default import login_view
# data = {
# 'username': 'shinners',
# 'password': 'chris'
# }
# testapp.method = 'POST'
# testapp.POST = data
# response = login_view(testapp)
# assert isinstance(response, HTTPFound)
# assert response.location == testapp.route_url('profile')
def test_logout_returns_302_status_code(dummy_request):
"""Test that logout gets 302 status code."""
from stock_analysis.views.default import logout
response = logout(dummy_request)
assert response.status_code == 302
def test_logout_redirects_to_home_with_httpfound(dummy_request):
"""Test that logout redirects to home page."""
from stock_analysis.views.default import logout
response = logout(dummy_request)
assert isinstance(response, HTTPFound)
assert response.location == dummy_request.route_url('home')
# """ FUNCTIONAL TESTS FOR ROUTES """
def test_home_route_gets_200_status_code(testapp, fill_the_db):
"""Test that the home route gets 200 status code for unauth user."""
response = testapp.get("/")
assert response.status_code == 200
def test_home_route_has_login_option(testapp):
"""Test that the home route has a login option."""
response = testapp.get("/")
assert 'Login' in str(response.html.find_all('a')[1])
def test_detail_route_for_valid_id_gets_200_status_code(testapp):
"""Test that a valid detail route gets 200 status code."""
response = testapp.get('/detail')
assert response.status_code == 200
def test_detail_route_has_correct_entry(testapp):
"""Test that the detail route shows correct stock data."""
response = testapp.get('/detail')
print(response.html)
assert 'Stock Ticker' in str(response.html)
def test_detail_route_has_no_login_option(testapp):
"""Test that the detail route has not login option for unauth user."""
response = testapp.get('/detail')
assert not response.html.find('a', 'login')
def test_portfolio_get_route_unauth_gets_403_status_code(testapp):
"""Test that the create GET route gets 403 status code for unauth user."""
assert testapp.get("/portfolio", status=403)
def test_portfolio_post_route_unauth_gets_403_status_code(testapp):
"""Test that the create POST route gets 403 status code for unauth user."""
assert testapp.post("/portfolio", status=403)
def test_logout_route_unauth_gets_302_status_code(testapp):
"""Test that the logout route gets 302 status code for unauth user."""
assert testapp.get("/logout", status=302)
def test_login_get_route_unauth_gets_200_status_code(testapp):
"""Test that the login GET route gets 200 status code."""
response = testapp.get("/login")
assert response.status_code == 200
def test_login_get_route_unauth_has_login_form(testapp):
"""Test that the login GET route gets 200 status code."""
response = testapp.get("/login")
assert len(response.html.find_all('input')) == 3
def test_login_get_route_unauth_has_login_form2(testapp):
"""Test that the login GET route gets 200 status code."""
response = testapp.get("/login")
assert 'LOGIN' in str(response.html.find_all('input')[2])
def test_login_post_route_unauth_incompelete_data_gives_invalid_response(testapp, fill_the_db):
"""Test that POST of incomplete data to login route gives invalid."""
data = {
'username': 'shinners',
'password': ''
}
response = testapp.post("/login", data)
assert 'Username/password combination invalid' in str(response.html)
def test_login_post_route_unauth_wrong_data_has_302_status_code(testapp, fill_the_db):
"""Test that POST of wrong data to login route gets a 302 status code."""
data = {
'username': 'shinners',
'password': 'chris'
}
response = testapp.post("/login", data)
assert response.status_code == 302
def test_login_post_route_unauth_wrong_data_has_error_message(testapp, fill_the_db):
"""Test that POST of wrong data to login route has an error message."""
data = {
'username': 'shinners',
'password': 'psas'
}
response = testapp.post("/login", data)
assert 'Username/password combination invalid' in str(response.html)
def test_login_post_route_unauth_correct_data_has_302_status_code(testapp, fill_the_db):
"""Test that POST of correct data to login route has 302 status code."""
data = {
'username': 'shinners',
'password': 'chris'
}
response = testapp.post("/login", data)
assert response.status_code == 302
def test_logout_route_auth_gets_302_status_code(testapp):
"""Test that the logout route gets 302 status code for auth user."""
response = testapp.get("/logout")
assert response.status_code == 302
def test_login_post_route_correct_data_redirects_to_portfolio(testapp, fill_the_db):
"""Test that POST of correct data to login route redirects to portfolio page."""
data = {
'username': 'shinners',
'password': 'chris'
}
response = testapp.post("/login", data)
assert response.location.endswith('portfolio')
def test_login_post_route_correct_data_gives_redirect_status_code(testapp, fill_the_db):
"""Test that POST of correct data to login route gives redirect status code."""
data = {
'username': 'shinners',
'password': 'chris'
}
response = testapp.post("/login", data)
assert response.status_code == 302
def test_logout_route_auth_redirects_to_home(testapp):
"""Test that the logout route redirects to home page."""
response = testapp.get("/logout")
home = testapp.app.routes_mapper.get_route('home').path
assert response.location.endswith(home)
def test_login_post_route_unauth_correct_data_home_has_logout_tab(testapp, fill_the_db):
"""Test that POST of correct data to login route has home page with logout tab."""
data = {
'username': 'shinners',
'password': 'chris'
}
response = testapp.post("/login", data)
next_page = response.follow()
assert 'Logout' in str(next_page.html.find_all('a')[3])
def test_logout_route_auth_home_has_login_option(testapp):
"""Test that the logout route has home page with login."""
response = testapp.get("/logout")
next_page = response.follow()
assert 'Home page' in str(next_page.html)
assert 'Login' in str(next_page.html)
def test_logout_route_auth_home_has_login_option_2(testapp):
"""Test that the logout route has home page with login."""
response = testapp.get("/logout")
next_page = response.follow()
assert 'Login' in str(next_page.html.find_all('a')[1])
def test_login_post_route_unauth_correct_data_adds_auth_tkt_cookie(testapp, fill_the_db):
"""Test that POST of correct data to login route adds auth_tkt cookie."""
data = {
'username': 'shinners',
'password': 'chris'
}
testapp.post("/login", data)
assert 'auth_tkt' in testapp.cookies
def test_login_get_route_auth_has_302_status_code(testapp):
"""Test that GET to login route has 302 status code."""
response = testapp.get("/login")
assert response.status_code == 200
def test_login_post_route_auth_has_302_status_code(testapp, fill_the_db):
"""Test that POST to login route has 302 status code."""
data = {
'username': 'shinners',
'password': 'chris'
}
response = testapp.post("/login", data)
assert response.status_code == 302
def test_login_post_route_auth_keeps_auth_tkt_cookie(testapp, fill_the_db):
"""Test that POST of correct data to login route adds auth_tkt cookie."""
"""Test that POST to login route adds auth_tkt cookie."""
testapp.get('/logout')
assert 'auth_tkt' not in testapp.cookies
data = {
'username': 'shinners',
'password': 'chris'
}
testapp.post("/login", data)
assert 'auth_tkt' in testapp.cookies
def test_home_route_auth_gets_200_status_code(testapp):
"""Test that the home route gets 200 status code."""
response = testapp.get("/")
assert response.status_code == 200
def test_detail_route_auth_for_valid_id_gets_200_status_code(testapp):
"""Test that the detail route of a valid gets 200 status code."""
response = testapp.get("/detail")
assert response.status_code == 200
def test_logout_route_auth_removes_auth_tkt_cookie(testapp):
"""Test that the logout route removes the auth_tkt cookie."""
testapp.get("/logout")
assert 'auth_tkt' not in testapp.cookies
| 35.302682 | 99 | 0.674029 |
299d0827c01c5140f974b69bc3db8af150b95bb5 | 4,758 | py | Python | scripts/mdns.py | jrbenito/SonoffDIY-tasmotizer | 1fe9eb9b3b5630102feaf941bd02173d916e81a5 | [
"MIT"
] | null | null | null | scripts/mdns.py | jrbenito/SonoffDIY-tasmotizer | 1fe9eb9b3b5630102feaf941bd02173d916e81a5 | [
"MIT"
] | 3 | 2020-03-30T14:07:54.000Z | 2020-03-30T22:59:29.000Z | scripts/mdns.py | jrbenito/SonoffDIY-tasmotizer | 1fe9eb9b3b5630102feaf941bd02173d916e81a5 | [
"MIT"
] | null | null | null | import hashlib
import json
import time
import sys
import requests
from zeroconf import ServiceBrowser, Zeroconf
class MyListener(object):
"""
This class is used for the mDNS browsing discovery device, including calling the remove_service and add_service
properties to ServiceBrowser, and also contains broadcasts for querying and updating existing devices.
Dictionary
all_info_dict:Qualified device information in the current network [keys:info.name,val:info]
"""
def __init__(self):
self.all_del_sub = []
self.all_info_dict = {}
self.all_sub_num = 0
self.new_sub = False
def remove_service(self, zeroconf, type, name):
"""
This function is called for ServiceBrowser.
This function is triggered when ServiceBrowser discovers that some device has logged out
"""
print("inter remove_service()")
if name not in self.all_info_dict:
return
self.all_sub_num -= 1
del self.all_info_dict[name]
self.all_del_sub.append(name)
print("self.all_info_dict[name]", self.all_info_dict)
print("Service %s removed" % (name))
def add_service(self, zeroconf, type, name):
"""
This function is called for ServiceBrowser.This function is triggered when ServiceBrowser finds a new device
When a subdevice is found, the device information is stored into the all_info_dict
"""
self.new_sub = True
self.all_sub_num += 1
info = zeroconf.get_service_info(type, name)
if info.properties[b'type'] == b'diy_plug':
self.all_info_dict[name] = info
if name in self.all_del_sub:
self.all_del_sub.remove(name)
print("Service %s added, service info: %s" % (name, info))
def flash_all_sub_info(self,):
"""
Update all found subdevice information
"""
info_list = list(self.all_info_dict.keys())
for x in info_list:
current_info = all_info_dict[x]
name = current_info["name"]
type = current_info["type"]
info = zeroconf.get_service_info(type=type, name=name)
current_info["info"] = info
self.all_info_dict[x] = current_info["info"]
def main():
# ToDo: test for arguments (out of bounds)
firmFile = sys.argv[1]
sha256_hash = hashlib.sha256()
with open("/root/files/" + firmFile, "rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096),b""):
sha256_hash.update(byte_block)
fHash = sha256_hash.hexdigest()
ota_data = {
'downloadUrl': 'http://192.168.254.1/files/' + firmFile,
'sha256sum': fHash}
zeroconf = Zeroconf()
listener = MyListener()
browser = ServiceBrowser(zeroconf, "_ewelink._tcp.local.",listener= listener)
print("Waiting for device (mDNS...)")
while True:
if listener.all_sub_num>0:
dict=listener.all_info_dict.copy()
for x in dict.keys():
info=dict[x]
info=zeroconf.get_service_info(info.type,x)
if info!= None:
nodeId = x[8:18]
ipAddr = parseAddress(info.address)
port = str(info.port)
#startFlash(nodeId, ipAddr, port)
break
time.sleep(0.5)
print("Tasmotizing: ", nodeId)
baseURL = "http://" + ipAddr + ":" + port + "/zeroconf"
data = {"deviceid": nodeId, "data":{}}
print(" Turn on switch")
url = baseURL + "/switch"
datasw = {"deviceid": nodeId, "data":{"switch":"on"}}
r = requests.post(url = url, json = datasw)
## check info to know about OTA status
url = baseURL + "/info"
print(" Check OTA: ", url)
r = requests.post(url = url, json = data)
rJ = json.loads(r.json()['data'])
if rJ['otaUnlock']:
print(" OTA already unlocked")
else:
url = baseURL + "/ota_unlock"
print(" Unlocking OTA ", url)
r = requests.post(url = url, json = data)
## need to verify return here.
print(" Sending binary URL: ", ota_data)
otaD = data
otaD['data'] = ota_data
url = baseURL + "/ota_flash"
r = requests.post(url = url, json = otaD)
print(" r = ", r.json())
def parseAddress(address):
add_list = []
for i in range(4):
add_list.append(int(address.hex()[(i*2):(i+1)*2], 16))
add_str = str(add_list[0]) + "." + str(add_list[1]) + "." + str(add_list[2])+ "." + str(add_list[3])
return add_str
if __name__ == "__main__":
main() | 35.244444 | 116 | 0.588903 |
570109eb1b0b526b78c82465503a5810ee1663cb | 2,502 | py | Python | nipype/interfaces/semtools/diffusion/maxcurvature.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | nipype/interfaces/semtools/diffusion/maxcurvature.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | nipype/interfaces/semtools/diffusion/maxcurvature.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 2 | 2017-09-23T16:22:00.000Z | 2019-08-01T14:18:52.000Z | # -*- coding: utf-8 -*-
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
import os
from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine,
TraitedSpec, File, Directory, traits, isdefined,
InputMultiPath, OutputMultiPath)
class maxcurvatureInputSpec(CommandLineInputSpec):
image = File(desc="FA Image", exists=True, argstr="--image %s")
output = traits.Either(
traits.Bool,
File(),
hash_files=False,
desc="Output File",
argstr="--output %s")
sigma = traits.Float(desc="Scale of Gradients", argstr="--sigma %f")
verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ")
class maxcurvatureOutputSpec(TraitedSpec):
output = File(desc="Output File", exists=True)
class maxcurvature(SEMLikeCommandLine):
"""title: MaxCurvature-Hessian (DTIProcess)
category: Diffusion
description: This program computes the Hessian of the FA image (--image). We use this scalar image as a registration input when doing DTI atlas building. For most adult FA we use a sigma of 2 whereas for neonate or primate images and sigma of 1 or 1.5 is more appropriate. For really noisy images, 2.5 - 4 can be considered. The final image (--output) shows the main feature of the input image.
version: 1.1.0
documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess
license: Copyright (c) Casey Goodlett. All rights reserved.
See http://www.ia.unc.edu/dev/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
contributor: Casey Goodlett
acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler.
"""
input_spec = maxcurvatureInputSpec
output_spec = maxcurvatureOutputSpec
_cmd = " maxcurvature "
_outputs_filenames = {'output': 'output.nii'}
_redirect_x = False
| 43.894737 | 421 | 0.733813 |
51a7e9d639b3997cc869cd14a27e9e263a7dc805 | 2,490 | py | Python | api/content/signals.py | luoyangC/island_django | e40a16f1465edc1376b00aa8f953b285f9802731 | [
"Apache-2.0"
] | null | null | null | api/content/signals.py | luoyangC/island_django | e40a16f1465edc1376b00aa8f953b285f9802731 | [
"Apache-2.0"
] | 6 | 2020-05-05T07:13:16.000Z | 2021-09-22T18:58:16.000Z | api/content/signals.py | luoyangC/island_django | e40a16f1465edc1376b00aa8f953b285f9802731 | [
"Apache-2.0"
] | null | null | null | from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from content.models import Article
from action.models import Like, Fav, Comment, Reply
@receiver(post_save, sender=Fav)
def add_fav(sender, instance=None, **kwargs):
if instance:
_article = Article.objects.get(id=instance.article.id)
_article.fav_nums += 1
_article.save()
@receiver(post_delete, sender=Fav)
def del_fav(sender, instance=None, **kwargs):
if instance:
_article = Article.objects.get(id=instance.article.id)
if _article.fav_nums > 0:
_article.fav_nums -= 1
else:
_article.fav_nums = 0
_article.save()
@receiver(post_save, sender=Like)
def add_like(sender, instance=None, **kwargs):
if instance and instance.like_type == 'article':
_article = Article.objects.get(id=instance.like_id)
_article.like_nums += 1
_article.save()
@receiver(post_delete, sender=Like)
def del_like(sender, instance=None, **kwargs):
if instance and instance.like_type == 'article':
_article = Article.objects.get(id=instance.like_id)
if _article.like_nums > 0:
_article.like_nums -= 1
else:
_article.like_nums = 0
_article.save()
@receiver(post_save, sender=Comment)
def add_comment(sender, instance=None, **kwargs):
if instance:
_article = Article.objects.get(id=instance.article.id)
_article.comment_nums += 1
_article.save()
@receiver(post_delete, sender=Comment)
def del_comment(sender, instance=None, **kwargs):
if instance:
_article = Article.objects.get(id=instance.article.id)
if _article.comment_nums > 0:
_article.comment_nums -= 1
else:
_article.comment_nums = 0
_article.save()
@receiver(post_save, sender=Reply)
def add_reply(sender, instance=None, **kwargs):
if instance:
_article = Article.objects.get(id=instance.comment.article.id)
_article.comment_nums += 1
_article.save()
@receiver(post_delete, sender=Reply)
def del_reply(sender, instance=None, **kwargs):
if instance:
_article = Article.objects.get(id=instance.comment.article.id)
if _article.comment_nums > 0:
_article.comment_nums -= 1
else:
_article.comment_nums = 0
_article.save()
| 30.740741 | 71 | 0.640161 |
a653a6560a881ca6ce83946e4e639af153cdfca6 | 4,777 | py | Python | stability/stability-iterative-android-tests/iterative/Launch_Exit_Repeatedly.py | JianfengXu/crosswalk-test-suite | 6fb6ef9d89235743ee8b867fd2541c5bdf388786 | [
"BSD-3-Clause"
] | null | null | null | stability/stability-iterative-android-tests/iterative/Launch_Exit_Repeatedly.py | JianfengXu/crosswalk-test-suite | 6fb6ef9d89235743ee8b867fd2541c5bdf388786 | [
"BSD-3-Clause"
] | null | null | null | stability/stability-iterative-android-tests/iterative/Launch_Exit_Repeatedly.py | JianfengXu/crosswalk-test-suite | 6fb6ef9d89235743ee8b867fd2541c5bdf388786 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Hongjuan, Wang<[email protected]>
import unittest
import os
import sys
import commands
import shutil
import time
import subprocess
reload(sys)
sys.setdefaultencoding('utf-8')
SCRIPT_PATH = os.path.realpath(__file__)
ConstPath = os.path.dirname(SCRIPT_PATH)
def setUp():
global device
#device = 'E6OKCY411012'
device = os.environ.get('DEVICE_ID')
if not device:
print 'Get env error\n'
sys.exit(1)
class TestStabilityIterativeFunctions(unittest.TestCase):
def test_launch_exit_repeatedly(self):
setUp()
runtime = 14400
pre_time = time.time()
testName = "test_launch_exit_repeatedly"
sysmon_path = ConstPath + '/sysmon.sh'
sysmon_cmd = sysmon_path + ' ' + testName + \
' ' + str(runtime) + ' org.xwalk.*'
subprocess.Popen(args=sysmon_cmd, shell=True)
i = 0
while True:
i = i + 1
apps_list = {'tct_fileapi_w3c_tests': 'TctFileapiW3cTests',
'tct_fullscreen_nonw3c_tests': 'TctFullscreenNonw3cTests',
'tct_mediacapture_w3c_tests': 'TctMediacaptureW3cTests',
'tct_websocket_w3c_tests': 'TctWebsocketW3cTests',
'gallery': 'Gallery',
'hangonman': 'Hangonman',
'hexgl': 'Hexgl',
'sysapps': 'Sysapps',
'memorygame': 'Memorygame'
}
elapsed_time = time.time() - pre_time
if elapsed_time >= runtime:
print i, elapsed_time, 'Process finished'
break
else:
for name, pkg in apps_list.items():
# print '%s\t%s' % (name, pkg)
print i, elapsed_time, 'Continue'
pmstatus = commands.getstatusoutput(
'adb -s ' +
device +
' shell pm list packages |grep org.xwalk.' +
name)
if pmstatus[0] == 0:
launchstatus = commands.getstatusoutput(
'adb -s ' +
device +
' shell am start -n org.xwalk.' +
name +
'/.' +
pkg +
'Activity')
self.assertNotIn('Error', launchstatus[1])
commands.getstatusoutput(
'adb -s ' +
device +
' shell am force-stop org.xwalk.' +
name)
stopresult = commands.getstatusoutput(
'adb -s ' +
device +
' shell ps |grep org.xwalk.' +
name)
self.assertNotIn('org.xwalk.' + name, stopresult[1])
else:
print 'Please install apk ' + name + ' frist'
sys.exit(1)
if __name__ == '__main__':
unittest.main()
| 40.142857 | 83 | 0.55516 |
a3ba97a93c960cbcd0df7919306b95fc4f28144f | 100 | py | Python | inspect_platform.py | TheCharmingCthulhu/cython-vst-loader | 2d2d358515f24f4846ca664e5a9b366a207207a6 | [
"MIT"
] | 23 | 2020-07-29T14:44:29.000Z | 2022-01-07T05:29:16.000Z | inspect_platform.py | TheCharmingCthulhu/cython-vst-loader | 2d2d358515f24f4846ca664e5a9b366a207207a6 | [
"MIT"
] | 14 | 2020-09-09T02:38:24.000Z | 2022-03-04T05:19:25.000Z | inspect_platform.py | TheCharmingCthulhu/cython-vst-loader | 2d2d358515f24f4846ca664e5a9b366a207207a6 | [
"MIT"
] | 2 | 2021-06-05T23:30:08.000Z | 2021-06-06T19:58:59.000Z | import os
from sys import platform
print("sys.platform: " + platform)
print("os.name: " + os.name)
| 16.666667 | 34 | 0.7 |
be9210a06c6699eee39ff691f9988557b99ea616 | 3,879 | py | Python | superset/datasets/commands/export.py | 7vikpeculiar/superset | 800ced5e257d5d83d6dbe4ced0e7318ac40d026f | [
"Apache-2.0"
] | 2 | 2021-12-21T15:57:16.000Z | 2022-01-31T02:22:02.000Z | superset/datasets/commands/export.py | 7vikpeculiar/superset | 800ced5e257d5d83d6dbe4ced0e7318ac40d026f | [
"Apache-2.0"
] | 10 | 2022-01-05T01:31:07.000Z | 2022-03-16T01:09:46.000Z | superset/datasets/commands/export.py | 7vikpeculiar/superset | 800ced5e257d5d83d6dbe4ced0e7318ac40d026f | [
"Apache-2.0"
] | 2 | 2021-12-21T13:41:18.000Z | 2021-12-26T22:16:43.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import json
import logging
from typing import Iterator, Tuple
import yaml
from werkzeug.utils import secure_filename
from superset.commands.export.models import ExportModelsCommand
from superset.connectors.sqla.models import SqlaTable
from superset.datasets.commands.exceptions import DatasetNotFoundError
from superset.datasets.dao import DatasetDAO
from superset.utils.dict_import_export import EXPORT_VERSION
logger = logging.getLogger(__name__)
JSON_KEYS = {"params", "template_params", "extra"}
class ExportDatasetsCommand(ExportModelsCommand):
dao = DatasetDAO
not_found = DatasetNotFoundError
@staticmethod
def _export(
model: SqlaTable, export_related: bool = True
) -> Iterator[Tuple[str, str]]:
database_slug = secure_filename(model.database.database_name)
dataset_slug = secure_filename(model.table_name)
file_name = f"datasets/{database_slug}/{dataset_slug}.yaml"
payload = model.export_to_dict(
recursive=True,
include_parent_ref=False,
include_defaults=True,
export_uuids=True,
)
# TODO (betodealmeida): move this logic to export_to_dict once this
# becomes the default export endpoint
for key in JSON_KEYS:
if payload.get(key):
try:
payload[key] = json.loads(payload[key])
except json.decoder.JSONDecodeError:
logger.info("Unable to decode `%s` field: %s", key, payload[key])
for key in ("metrics", "columns"):
for attributes in payload.get(key, []):
if attributes.get("extra"):
try:
attributes["extra"] = json.loads(attributes["extra"])
except json.decoder.JSONDecodeError:
logger.info(
"Unable to decode `extra` field: %s", attributes["extra"]
)
payload["version"] = EXPORT_VERSION
payload["database_uuid"] = str(model.database.uuid)
file_content = yaml.safe_dump(payload, sort_keys=False)
yield file_name, file_content
# include database as well
if export_related:
file_name = f"databases/{database_slug}.yaml"
payload = model.database.export_to_dict(
recursive=False,
include_parent_ref=False,
include_defaults=True,
export_uuids=True,
)
# TODO (betodealmeida): move this logic to export_to_dict once this
# becomes the default export endpoint
if payload.get("extra"):
try:
payload["extra"] = json.loads(payload["extra"])
except json.decoder.JSONDecodeError:
logger.info("Unable to decode `extra` field: %s", payload["extra"])
payload["version"] = EXPORT_VERSION
file_content = yaml.safe_dump(payload, sort_keys=False)
yield file_name, file_content
| 38.029412 | 87 | 0.646816 |
5c196b7661c634a0fb4335cadae1836297423d05 | 8,790 | py | Python | python/gui/filedialog.py | trago/fringeproc | 6a2441bce129a56296f50b9c333609cc3036eb7e | [
"Unlicense"
] | 3 | 2015-09-17T14:19:34.000Z | 2018-02-08T07:03:23.000Z | python/gui/filedialog.py | trago/fringeproc | 6a2441bce129a56296f50b9c333609cc3036eb7e | [
"Unlicense"
] | null | null | null | python/gui/filedialog.py | trago/fringeproc | 6a2441bce129a56296f50b9c333609cc3036eb7e | [
"Unlicense"
] | null | null | null | # Copyright (c) 2012, Julio C. Estrada
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# + Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# + Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cv2
import os
import numpy as np
from PyQt4.QtGui import QFileDialog, QVBoxLayout
from PyQt4.QtCore import Qt
from actiondialog import ActionDialog
from process import Process, QMutexLocker, QReadLocker
class OpenFile(Process):
"""
Process to open a file.
This is a process to open a data file. This data file can be an image or
a matrix of float values. The input of this process is the name of the file
to load. The output of this process is the data loaded from the file as
a *numpy array*.
"""
file_error = 0
file_ok = 1
file_no_name = -1
def __init__(self, parent=None):
super(OpenFile, self).__init__(parent)
def run(self):
self.openFile()
def openFile(self):
fname = self._input
if(fname!=''):
ftype = os.path.splitext(fname)[1]
try:
if ftype == '.flt':
self._output=self._openFlt(fname)
else:
self._output=self._openImage(fname)
except IOError:
self.state = self.file_error
else:
self.state = self.file_no_name
return
self.state = self.file_ok
def _readFltFile(self, fname):
"""
readFltFile(fname)
"""
iter = 0
data = None
try:
with open(fname) as f:
try:
M=0
N=0
i=0; j=0
for line in f:
if iter == 0:
M = float(line)
elif iter == 1:
N = float(line)
elif iter == 2:
data = np.zeros((M,N), dtype=float)
else:
data[i,j%N] = float(line)
j += 1
if j%N == 0:
i += 1
iter += 1
except ValueError:
print 'Error: Data file %s is wrong or it does not exists!' % fname
exit(1)
except IOError:
return None
return data
def _openFlt(self, fname):
""" _openFlt(fname, flag='new')
Opens a file having floating point data.
It opens a text file with extension .flt having floating point data and loads
these into an array to bi used as image data. The format for this text file
is one value by line and the first two lines have the number of rows
and number of columns of the image.
Parameters:
* fname: Is the file name to be opened.
:type: str
* flag: if flag='new' indicates that a PixmapItem is going to be created,
otherwise the image data is set to the current PixmapItem.
:type: str
Author: Julio C. Estrada <[email protected]>
"""
image = self._readFltFile(fname)
if(image == None):
raise IOError, "Image file can not be opened"
return image
def _openImage(self, fname):
""" _openImage(fname, flag='new')
Opens the image file and loads its data.
It opens an image file in format png, jpg, tif or bmp and loads its data
creating the PixmapItem or setting the image data to an already created
PixmapItem.
Parameters:
* fname: Is the fale name to be opened.
:type: str
* flag: if flag='new' indicates that a PixmapItem is going to be created,
otherwise the image data is set to the current PixmapItem.
:type: str
Author: Julio C. Estrada <[email protected]>
"""
image = cv2.imread(fname,0)
if(image != None):
return image
else:
raise IOError, "Image file can not be opened"
class SaveFile(OpenFile):
"""
Saves data in a file.
This is a process that saves data in a file. The data to file is a image
or a matrix of float values. Then the input data must be *name of the file*
and *data to save*. The output of this process is a flag that says if the
file has been saved succesfully or not.
"""
def __init__(self, parent=None):
super(SaveFile, self).__init__(parent)
def run(self):
self.saveFile()
def saveFile(self):
fname = self._input[0]
if(fname!=''):
ftype = os.path.splitext(fname)[1]
try:
if ftype == '.flt':
self._output=self._saveFlt(fname)
else:
self._output=self._saveImage(fname)
except IOError:
self.state = self.file_error
else:
self.state = self.file_no_name
return
self.state = self.file_ok
def _writeFltFile(self, fname, data):
try:
f = open(fname,'w')
M = str(data.shape[0])
N = str(data.shape[1])
f.write(M+'\n')
f.write(N+'\n')
for i in xrange(0,data.shape[0]):
for j in xrange(0,data.shape[1]):
val = str(data[i,j])
f.write(val+'\n')
except IOError:
print 'Error: I can not write in oput file.'
return False
return True
def _saveFlt(self, fname):
data = self.getInput()[1]
self._output = self._writeFltFile(fname, data)
if self._output is True:
self.state = self.file_ok
else:
self.state = self.file_error
def _saveImage(self, fname):
data = self.getInput()[1]
image = cv2.normalize(data, 0, 255, cv2.NORM_MINMAX)
self._output = cv2.imwrite(fname, image)
if self._output is True:
self.state = self.file_ok
else:
self.state = self.file_error
class OpenDialog(ActionDialog):
def __init__(self, parent=None, caption = ""):
super(OpenDialog, self).__init__(parent)
self._filedlg = QFileDialog(None, Qt.Widget)
self._filedlg.setFileMode(QFileDialog.ExistingFile)
nameFilter = "Images (*.png *.tif *.tiff *.jpg *.jpeg *.png *.bmp)" +\
";;Float images (*.flt)"
self._filedlg.setNameFilter(nameFilter)
layout = QVBoxLayout()
layout.setSpacing(0)
layout.setContentsMargins(0,0,0,0)
layout.addWidget(self._filedlg)
self.setLayout(layout)
self.setWindowTitle(caption)
self._filedlg.finished.connect(self.fileDlgDone)
def fileDlgDone(self, result):
self._process = OpenFile()
if result == 1:
files = self._filedlg.selectedFiles()
self._process.setInput(files[0])
self.done(result)
class SaveDialog(OpenDialog):
def __init__(self, parent=None, caption = ""):
super(SaveDialog, self).__init__(parent)
self._filedlg.setFileMode(QFileDialog.AnyFile)
def fileDlgDone(self, result):
self._process = SaveFile()
if result == 1:
files = self._filedlg.selectedFiles()
self._process.setInput((files[0], self._input))
self.done(result) | 34.470588 | 87 | 0.568601 |
ac0614953cb64ace12f03a420dc1855dc983faf1 | 34 | py | Python | cct/qtgui2/main/scripting/wizard/sequencewizard/__init__.py | awacha/cct | be1adbed2533df15c778051f3f4f9da0749c873a | [
"BSD-3-Clause"
] | 1 | 2015-11-04T16:37:39.000Z | 2015-11-04T16:37:39.000Z | cct/qtgui2/main/scripting/wizard/sequencewizard/__init__.py | awacha/cct | be1adbed2533df15c778051f3f4f9da0749c873a | [
"BSD-3-Clause"
] | null | null | null | cct/qtgui2/main/scripting/wizard/sequencewizard/__init__.py | awacha/cct | be1adbed2533df15c778051f3f4f9da0749c873a | [
"BSD-3-Clause"
] | 1 | 2020-03-05T02:50:43.000Z | 2020-03-05T02:50:43.000Z | from .wizard import SequenceWizard | 34 | 34 | 0.882353 |
6bb59e0a7ec370c58ce11ff8e19a22ac49c55b32 | 10,070 | py | Python | watson_developer_cloud/alchemy_language_v1.py | mcwilkes/python-sdk | bc545472ad1d00f77e916773e3a949160e4f48c3 | [
"Apache-2.0"
] | null | null | null | watson_developer_cloud/alchemy_language_v1.py | mcwilkes/python-sdk | bc545472ad1d00f77e916773e3a949160e4f48c3 | [
"Apache-2.0"
] | null | null | null | watson_developer_cloud/alchemy_language_v1.py | mcwilkes/python-sdk | bc545472ad1d00f77e916773e3a949160e4f48c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The AlchemyAPI Language service
(http://www.alchemyapi.com/products/alchemylanguage)
"""
from .watson_developer_cloud_service import WatsonDeveloperCloudService
class AlchemyLanguageV1(WatsonDeveloperCloudService):
default_url = 'https://gateway-a.watsonplatform.net/calls'
def __init__(self, url=default_url, **kwargs):
WatsonDeveloperCloudService.__init__(self, 'alchemy_api', url, **kwargs)
def author(self, html=None, url=None):
return self._alchemy_html_request('GetAuthor', html=html, url=url)
def authors(self, html=None, url=None):
return self._alchemy_html_request('GetAuthors', html=html, url=url)
def keywords(self, html=None, text=None, url=None, strict_extract_mode=False, sentiment=False,
show_source_text=False, max_keywords=50):
params = {'keywordExtractMode': 'strict' if strict_extract_mode else 'normal',
'sentiment': sentiment,
'showSourceText': show_source_text,
'maxRetrieve': max_keywords}
return self._alchemy_html_request('GetRankedKeywords', html=html, text=text, url=url, params=params)
def concepts(self, html=None, text=None, url=None, max_items=8, linked_data=True, show_source_text=False):
params = {'maxRetrieve': max_items,
'linkedData': linked_data,
'showSourceText': show_source_text}
return self._alchemy_html_request('GetRankedConcepts', html=html, text=text, url=url, params=params)
def entities(self, html=None, text=None, url=None, disambiguate=True, linked_data=True, coreference=True,
quotations=False, sentiment=False, show_source_text=False, max_items=50):
params = {'disambiguate': disambiguate,
'linkedData': linked_data,
'coreference': coreference,
'quotations': quotations,
'sentiment': sentiment,
'showSourceText': show_source_text,
'maxRetrieve': max_items}
return self._alchemy_html_request('GetRankedNamedEntities', html=html, text=text, url=url, params=params)
def relations(self, html=None, text=None, url=None, sentiment=False, keywords=False, entities=False,
require_entities=False, sentiment_excludes_entities=True, disambiguate=True, linked_data=True,
coreference=True, show_source_text=False, max_items=50):
params = {'sentiment': sentiment,
'keywords': keywords,
'entities': entities,
'requireEntities': require_entities,
'sentimentExcludesEntities': sentiment_excludes_entities,
'disambiguate': disambiguate,
'linkedData': linked_data,
'coreference': coreference,
'showSourceText': show_source_text,
'maxRetrieve': max_items}
return self._alchemy_html_request('GetRelations', html=html, text=text, url=url, params=params)
def language(self, html=None, text=None, url=None):
return self._alchemy_html_request('GetLanguage', html=html, text=text, url=url)
def text(self, html=None, url=None, use_metadata=True, extract_links=False):
params = {'useMetadata': use_metadata,
'extractLinks': extract_links}
return self._alchemy_html_request('GetText', html=html, url=url, params=params)
def raw_text(self, html=None, url=None):
return self._alchemy_html_request('GetRawText', html=html, url=url)
def category(self, html=None, text=None, url=None, show_source_text=False):
params = {'showSourceText': show_source_text}
return self._alchemy_html_request('GetCategory', html=html, text=text, url=url, params=params)
def title(self, html=None, url=None, use_metadata=True):
params = {'useMetadata': use_metadata}
return self._alchemy_html_request('GetTitle', html=html, url=url, params=params)
def feeds(self, html=None, url=None):
return self._alchemy_html_request('GetFeedLinks', html=html, url=url)
def microformats(self, html=None, url=None):
return self._alchemy_html_request('GetMicroformatData', html=html, url=url)
def publication_date(self, html=None, url=None):
return self._alchemy_html_request('GetPubDate', html=html, url=url)
def taxonomy(self, html=None, text=None, url=None, show_source_text=False, source_text_type=None,
constraint_query=None, xpath_query=None, base_url=None):
"""
source_text_type ->
where to obtain the text that will be processed by this API call.
AlchemyAPI supports multiple modes of text extraction:
web page cleaning (removes ads, navigation links, etc.), raw text extraction
(processes all web page text, including ads / nav links), visual constraint queries, and XPath queries.
Possible values:
cleaned_or_raw : cleaning enabled, fallback to raw when cleaning produces no text (default)
cleaned : operate on 'cleaned' web page text (web page cleaning enabled)
raw : operate on raw web page text (web page cleaning disabled)
cquery : operate on the results of a visual constraints query
Note: The 'constraint_query' argument must also be set to a valid visual constraints
query.
xpath : operate on the results of an XPath query
Note: The 'xpath' http argument must also be set to a valid XPath query.
constraint_query ->
a visual constraints query to apply to the web page.
xpath ->
an XPath query to apply to the web page.
base_url ->
rel-tag output base http url (must be uri-argument encoded)
"""
params = {'showSourceText': show_source_text,
'sourceText': source_text_type,
'cquery': constraint_query,
'xpath': xpath_query,
'base_url': base_url}
return self._alchemy_html_request('GetRankedTaxonomy', html=html, text=text, url=url, params=params)
# Some of these options don't appear in the API documentation but are supported by the previous AlchemyAPI SDK
def combined(self, html=None, text=None, url=None, extract=None, disambiguate=True, linked_data=True,
coreference=True, quotations=False, sentiment=False, show_source_text=False, max_items=50,
base_url=None):
"""
Combined call for page-image, entity, keyword, title, author, taxonomy, concept.
INPUT:
extract ->
List or comma separated string
Possible values: page-image, entity, keyword, title, author, taxonomy, concept
default : entity, keyword, taxonomy, concept
disambiguate ->
disambiguate detected entities
Possible values:
True : enabled (default)
False : disabled
linked_data ->
include Linked Data content links with disambiguated entities
Possible values :
True : enabled (default)
False : disabled
coreference ->
resolve he/she/etc coreferences into detected entities
Possible values:
True : enabled (default)
False : disabled
quotations ->
enable quotations extraction
Possible values:
True : enabled
False : disabled (default)
sentiment ->
enable entity-level sentiment analysis
Possible values:
True : enabled
False : disabled (default)
show_source_text ->
include the original 'source text' the entities were extracted from within the API response
Possible values:
True : enabled
False : disabled (default)
max_items ->
maximum number of named entities to extract
default : 50
base_url ->
rel-tag output base http url
OUTPUT:
The response, already converted from JSON to a Python object.
"""
if isinstance(extract, list):
extract = ','.join(extract)
params = {'extract': extract,
'disambiguate': disambiguate,
'linkedData': linked_data,
'coreference': coreference,
'quotations': quotations,
'sentiment': sentiment,
'showSourceText': show_source_text,
'maxRetrieve': max_items,
'baseUrl': base_url}
return self._alchemy_html_request('GetCombinedData', html=html, text=text, url=url, params=params)
def sentiment(self, html=None, text=None, url=None):
return self._alchemy_html_request('GetTextSentiment', html=html, text=text, url=url)
def targeted_sentiment(self, targets, html=None, text=None, url=None):
if isinstance(targets, list):
targets = '|'.join(targets)
params = {'targets': targets}
return self._alchemy_html_request('GetTargetedSentiment', html=html, text=text, url=url, params=params)
| 49.121951 | 119 | 0.630983 |
c8d24a6f7788231824b6f5dec00308f3351c374b | 2,390 | py | Python | docs/Lessons/topics/sql/scrollCanv.py | tgolsson/appJar | 5e2f8bff44e927e7c2bae17fccddc6dbf79952f0 | [
"Apache-2.0"
] | 666 | 2016-11-14T18:17:40.000Z | 2022-03-29T03:53:22.000Z | docs/Lessons/topics/sql/scrollCanv.py | tgolsson/appJar | 5e2f8bff44e927e7c2bae17fccddc6dbf79952f0 | [
"Apache-2.0"
] | 598 | 2016-10-20T21:04:09.000Z | 2022-03-15T22:44:49.000Z | docs/Lessons/topics/sql/scrollCanv.py | tgolsson/appJar | 5e2f8bff44e927e7c2bae17fccddc6dbf79952f0 | [
"Apache-2.0"
] | 95 | 2017-01-19T12:23:58.000Z | 2022-03-06T18:16:21.000Z | import tkinter as tk
class Example(tk.Frame):
def __init__(self, root):
tk.Frame.__init__(self, root)
self.canvas = tk.Canvas(root, borderwidth=0, background="#ffffff")
self.vsb = tk.Scrollbar(root, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.vsb.set)
self.vsb.pack(side="right", fill="y")
self.hsb = tk.Scrollbar(root, orient="horizontal", command=self.canvas.xview)
self.canvas.configure(xscrollcommand=self.hsb.set)
self.hsb.pack(side="bottom", fill="x")
self.canvas.pack(side="left", fill="both", expand=True)
self.frame = tk.Frame(self.canvas, background="#ffffff")
self.canvas.create_window((4,4), window=self.frame, anchor="nw", tags="self.frame")
self.frame.bind("<Configure>", self.OnFrameConfigure)
self.populate()
def populate(self):
data = []
for looper in range(20):
data.append([])
data[looper] = []
for loop in range(10):
data[looper].append(looper*loop)
# loop through each row
for rowNum in range(len(data)):
# then the cells in that row
for cellNum in range(5):
# get a name and val ("" if no val)
name = "c" + str(rowNum) + "-" + str(cellNum)
if cellNum >= len(data[rowNum]) : val = ""
else: val = data[rowNum][cellNum]
lab = tk.Label(self.frame)
if rowNum == 0:
lab.configure( relief=tk.RIDGE,text=val )
else:
lab.configure( relief=tk.RIDGE,text=val)
lab.bind("<Enter>", lambda e: e.widget.config(background='red'))
lab.bind("<Leave>", lambda e: e.widget.config(background='white'))
lab.grid ( row=rowNum, column=cellNum, sticky=tk.N+tk.E+tk.S+tk.W )
tk.Grid.columnconfigure(self.frame, cellNum, weight=1)
tk.Grid.rowconfigure(self.frame, rowNum, weight=1)
def OnFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
if __name__ == "__main__":
root=tk.Tk()
Example(root).pack(side="top", fill="both", expand=True)
root.mainloop()
| 37.936508 | 91 | 0.573222 |
7fd7d312de80f4ce46aa3d7cc60d85d0ad9cadd3 | 2,488 | py | Python | citicup-model/production/model/average_model.py | TianChenjiang/Networker | 07ae61860470de3786162cbdfe7f8ed58d1726ce | [
"MIT"
] | null | null | null | citicup-model/production/model/average_model.py | TianChenjiang/Networker | 07ae61860470de3786162cbdfe7f8ed58d1726ce | [
"MIT"
] | 6 | 2020-01-28T22:59:04.000Z | 2022-01-21T23:29:23.000Z | citicup-model/production/model/average_model.py | TianChenjiang/Networker | 07ae61860470de3786162cbdfe7f8ed58d1726ce | [
"MIT"
] | 1 | 2021-03-21T03:19:29.000Z | 2021-03-21T03:19:29.000Z | from model.files import pro, code_set, JSPATH
import datetime
from model.deeplearning_model import deep_predict
from model.lightgbm_model import lgb_predict
import json
import pandas as pd
from werkzeug.exceptions import NotFound
import bisect
def get_result(code, forecast_close_line, price_df):
deep_prob = deep_predict(code, forecast_close_line, price_df)
lgb_prob = lgb_predict(code, forecast_close_line, price_df)
return 0.5 * deep_prob + 0.5 * lgb_prob
def predict(code):
if not JSPATH.is_file():
predict_all_init()
raise NotFound('Risk probability file not exists! Please wait 5min and try again!')
with open(JSPATH, 'r') as fp:
prob_dict = json.load(fp)
if code not in prob_dict:
raise NotFound('Company ts_code not in our system!')
prob_list = sorted(prob_dict.items(), key=lambda x: x[1], reverse=True)
rank, prob = [(i, prob) for i, (curr_code, prob) in enumerate(prob_list) if curr_code == code][0]
total = len(prob_list)
return prob, rank, total
def predict_given_forecast_close_line(code, forecast_close_line):
if code not in code_set:
raise NotFound('Company not in our dataset')
price_df = pro.daily(ts_code=code, start_date='20190101', end_date=datetime.datetime.now().strftime('%Y%m%d'))
return get_result(code, forecast_close_line, price_df)
# Schedule task
def predict_all_init():
forecast_line_df = pd.read_csv('forecast.csv', index_col=0)
today = datetime.datetime.now()
appended_data = []
for i in range(100): # 不在工作日时可能没有股价
day = today - datetime.timedelta(days=i)
df = pro.daily(trade_date=day.strftime('%Y%m%d'))
appended_data.append(df)
appended_data = pd.concat(appended_data)
prob_dict = dict()
for code in code_set:
price_df = appended_data[appended_data['ts_code'] == code]
if len(price_df) < 48:
continue
price_df = price_df.sort_values(by='trade_date', ascending=False)
price_df = price_df.reset_index(drop=True)
try:
forecast_close_line = forecast_line_df.loc[code][0]
except KeyError:
print(code)
continue
forecast_close_line = float(forecast_close_line)
prob = get_result(code, forecast_close_line, price_df)
prob_dict[code] = prob
with open(JSPATH, 'w') as fp:
json.dump(prob_dict, fp)
if __name__ == '__main__':
predict_all_init()
| 32.311688 | 114 | 0.680466 |
84da16d60f215ebf2de5ae35e9e9227036e844c7 | 1,581 | py | Python | authors/apps/profiles/utilities.py | arthurarty/ah-backend-poseidon | d2b561e83ed1e9a585853f4a4e2e37805e86c35c | [
"BSD-3-Clause"
] | 1 | 2019-01-22T18:00:44.000Z | 2019-01-22T18:00:44.000Z | authors/apps/profiles/utilities.py | arthurarty/ah-backend-poseidon | d2b561e83ed1e9a585853f4a4e2e37805e86c35c | [
"BSD-3-Clause"
] | 24 | 2018-11-27T10:11:13.000Z | 2021-06-10T21:01:15.000Z | authors/apps/profiles/utilities.py | andela/ah-backend-poseidon | d2b561e83ed1e9a585853f4a4e2e37805e86c35c | [
"BSD-3-Clause"
] | 4 | 2019-05-29T12:08:52.000Z | 2020-05-23T11:48:42.000Z | from .exceptions import NotificationDoesNotExist, NotificaionForbidden
from .models import Notification, Profile
from django.dispatch import receiver
from ..comments.models import Comment, Article
from django.db.models.signals import post_save
from ..authentication.models import User
def return_notification(request, notification_id):
try:
query = Notification.objects.get(pk=notification_id)
except Notification.DoesNotExist:
raise NotificationDoesNotExist
if not query.user_id == request.user.pk:
raise NotificaionForbidden
return query
def save_notification_db(follower, title, body):
notify = Notification(user=follower, type=title, body=body)
notify.save()
# notification for creation of article by author user follows
@receiver(post_save, sender=Article)
def create_notification(*args, **kwargs):
user = kwargs['instance'].author
body = 'article has been created by ' + user.username
for i in user.profile.followers():
follower = User.objects.get(pk=i.user_id)
save_notification_db(follower, 'New article created', body)
# notification for comment on article favouriated.
@receiver(post_save, sender=Comment)
def create_notification_comment(*args, **kwargs):
article_slug = kwargs['instance'].slug_id
article = Article.objects.get(slug=article_slug)
body = 'user has posted new comment'
for i in article.is_favourite_by():
user = User.objects.get(username=i)
save_notification_db(user, 'new comment on favourated article', body)
| 35.931818 | 77 | 0.73561 |
e6f22e805b6ab86eb5fc6ce63307faa5bf2ae4a4 | 3,769 | py | Python | xlsxwriter/test/worksheet/test_cond_format09.py | eddiechapman/XlsxWriter | c636117ab30e64e4b7b824c9105595c42887c2c9 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2021-03-27T11:14:47.000Z | 2021-03-27T11:14:47.000Z | xlsxwriter/test/worksheet/test_cond_format09.py | xiaolanmeng86/XlsxWriter | 6c3ea23a410e8216eab8f5751e5544ffb444b3da | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/worksheet/test_cond_format09.py | xiaolanmeng86/XlsxWriter | 6c3ea23a410e8216eab8f5751e5544ffb444b3da | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.write('A1', 10)
worksheet.write('A2', 20)
worksheet.write('A3', 30)
worksheet.write('A4', 40)
worksheet.conditional_format('A1:A4',
{'type': 'blanks',
})
worksheet.conditional_format('A1:A4',
{'type': 'no_blanks',
'format': None,
})
worksheet.conditional_format('A1:A4',
{'type': 'errors',
'format': None,
})
worksheet.conditional_format('A1:A4',
{'type': 'no_errors',
'format': None,
})
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:A4"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:1">
<c r="A1">
<v>10</v>
</c>
</row>
<row r="2" spans="1:1">
<c r="A2">
<v>20</v>
</c>
</row>
<row r="3" spans="1:1">
<c r="A3">
<v>30</v>
</c>
</row>
<row r="4" spans="1:1">
<c r="A4">
<v>40</v>
</c>
</row>
</sheetData>
<conditionalFormatting sqref="A1:A4">
<cfRule type="containsBlanks" priority="1">
<formula>LEN(TRIM(A1))=0</formula>
</cfRule>
<cfRule type="notContainsBlanks" priority="2">
<formula>LEN(TRIM(A1))>0</formula>
</cfRule>
<cfRule type="containsErrors" priority="3">
<formula>ISERROR(A1)</formula>
</cfRule>
<cfRule type="notContainsErrors" priority="4">
<formula>NOT(ISERROR(A1))</formula>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| 35.895238 | 171 | 0.405678 |
a075d89037174b7b5c317979bf4f0a6e3a7446da | 5,164 | py | Python | squeaknode/twitter/twitter_forwarder.py | azernik/squeaknode | 3d29338d5674f55d1c4bc97a370053d43810559f | [
"MIT"
] | null | null | null | squeaknode/twitter/twitter_forwarder.py | azernik/squeaknode | 3d29338d5674f55d1c4bc97a370053d43810559f | [
"MIT"
] | null | null | null | squeaknode/twitter/twitter_forwarder.py | azernik/squeaknode | 3d29338d5674f55d1c4bc97a370053d43810559f | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2020 Jonathan Zernik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import threading
from typing import List
from typing import Optional
from squeaknode.core.twitter_account_entry import TwitterAccountEntry
from squeaknode.node.squeak_controller import SqueakController
from squeaknode.twitter.twitter_stream import TwitterStream
logger = logging.getLogger(__name__)
class TwitterForwarder:
def __init__(
self,
retry_s: int,
):
self.retry_s = retry_s
self.lock = threading.Lock()
self.current_task: Optional[TwitterForwarderTask] = None
def start_processing(self, squeak_controller: SqueakController):
with self.lock:
if self.current_task is not None:
self.current_task.stop_processing()
self.current_task = TwitterForwarderTask(
squeak_controller,
self.retry_s,
)
self.current_task.start_processing()
def stop_processing(self):
with self.lock:
if self.current_task is not None:
self.current_task.stop_processing()
class TwitterForwarderTask:
def __init__(
self,
squeak_controller: SqueakController,
retry_s: int,
):
self.squeak_controller = squeak_controller
self.retry_s = retry_s
self.stopped = threading.Event()
self.tweet_stream = None
def start_processing(self):
logger.info("Starting twitter forwarder task.")
threading.Thread(
target=self.process_forward_tweets,
daemon=True,
).start()
def stop_processing(self):
logger.info("Stopping twitter forwarder task.")
self.stopped.set()
if self.tweet_stream is not None:
self.tweet_stream.cancel_fn()
def process_forward_tweets(self):
while not self.stopped.is_set():
try:
bearer_token = self.get_bearer_token()
handles = self.get_twitter_handles()
if not bearer_token:
return
if not handles:
return
logger.info("Starting forward tweets with bearer token: {} and twitter handles: {}".format(
bearer_token,
handles,
))
twitter_stream = TwitterStream(bearer_token, handles)
self.tweet_stream = twitter_stream.get_tweets()
if self.stopped.is_set():
return
for tweet in self.tweet_stream.result_stream:
self.handle_tweet(tweet)
# TODO: use more specific error.
except Exception:
logger.exception(
"Unable to subscribe tweet stream. Retrying in {} seconds...".format(
self.retry_s,
),
)
self.stopped.wait(self.retry_s)
def get_bearer_token(self) -> str:
return self.squeak_controller.get_twitter_bearer_token() or ''
def get_twitter_handles(self) -> List[str]:
twitter_accounts = self.squeak_controller.get_twitter_accounts()
handles = [account.handle for account in twitter_accounts]
return handles
def is_tweet_a_match(self, tweet: dict, account: TwitterAccountEntry) -> bool:
for rule in tweet['matching_rules']:
if rule['tag'] == account.handle:
return True
return False
def forward_tweet(self, tweet: dict, account: TwitterAccountEntry) -> None:
self.squeak_controller.make_squeak(
profile_id=account.profile_id,
content_str=tweet['data']['text'],
replyto_hash=None,
)
def handle_tweet(self, tweet: dict):
logger.info(
"Got tweet: {}".format(tweet))
twitter_accounts = self.squeak_controller.get_twitter_accounts()
for account in twitter_accounts:
if self.is_tweet_a_match(tweet, account):
self.forward_tweet(tweet, account)
| 36.366197 | 107 | 0.640589 |
2c45edc6ec3742428d3a313ed18c064f133fd58a | 21,707 | py | Python | linen_examples/wmt/input_pipeline.py | isabella232/flax | 39a04e82d6f97ef90c59425599018f2b9df8b6ea | [
"Apache-2.0"
] | null | null | null | linen_examples/wmt/input_pipeline.py | isabella232/flax | 39a04e82d6f97ef90c59425599018f2b9df8b6ea | [
"Apache-2.0"
] | null | null | null | linen_examples/wmt/input_pipeline.py | isabella232/flax | 39a04e82d6f97ef90c59425599018f2b9df8b6ea | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline for a WMT dataset."""
import os
import tempfile
import time
from absl import logging
import jax
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_text as tftxt
from sentencepiece import SentencePieceTrainer
AUTOTUNE = tf.data.experimental.AUTOTUNE
# -----------------------------------------------------------------------------
# Raw TFDS dataset.
# -----------------------------------------------------------------------------
def raw_wmt_datasets(dataset_name='wmt17_translate/de-en',
eval_dataset_name=None,
reverse_translation=False,
shard_idx=0,
shard_count=1,
data_dir=None):
"""Load raw WMT datasets and normalize feature keys.
Args:
dataset_name: str: TFDS WMT dataset name.
eval_dataset_name: Optional[str]: separate dataset name for evaluation.
e.g. for specifying the standard academic WMT14 test set.
reverse_translation: bool: whether to reverse the translation direction.
e.g. for 'de-en' this translates from english to german.
shard_idx: int: for multihost training, index of this host.
shard_count: int: for mulithost training, number of total hosts.
data_dir: str: location of TFDS data directory.
Returns:
training tf.dataset, evaluation tf.dataset, and training features_info
source and target language features are mapped to 'inputs' and 'targets'
keys.
"""
builder = tfds.builder(dataset_name, data_dir=data_dir)
shard_spec = (f'[{int(100 * shard_idx / shard_count)}%'
f':{int(100 * (shard_idx + 1) / shard_count)}%]')
logging.info('Training on TFDS dataset %s with split %s',
dataset_name, 'train' + shard_spec)
train_data = builder.as_dataset(split='train' + shard_spec,
shuffle_files=True)
if eval_dataset_name is None:
logging.info('Evaluating on TFDS dataset %s with split %s',
dataset_name, 'validation' + shard_spec)
eval_data = builder.as_dataset(split='validation' + shard_spec,
shuffle_files=False)
else:
eval_dataset, *eval_split = eval_dataset_name.split(':')
if not eval_split:
eval_split = 'validation'
else:
eval_split = eval_split[0]
logging.info('Evaluating on TFDS dataset %s with split %s',
eval_dataset, eval_split + shard_spec)
eval_builder = tfds.builder(eval_dataset, data_dir=data_dir)
eval_data = eval_builder.as_dataset(split=eval_split + shard_spec,
shuffle_files=False)
features_info = builder.info
# standardize on 'inputs' and 'targets' features.
input_lang = features_info.supervised_keys[0]
target_lang = features_info.supervised_keys[1]
if reverse_translation:
input_lang, target_lang = target_lang, input_lang
def to_features_dict(x):
return {'inputs': x[input_lang], 'targets': x[target_lang]}
train_data = train_data.map(to_features_dict, num_parallel_calls=AUTOTUNE)
eval_data = eval_data.map(to_features_dict, num_parallel_calls=AUTOTUNE)
return train_data, eval_data, features_info
# -----------------------------------------------------------------------------
# Tokenization.
# -----------------------------------------------------------------------------
def dump_chars_to_textfile(dataset,
maxchars=1e7,
data_keys=('inputs', 'targets')):
"""Write part of a TFDS sentence dataset to lines in a text file.
Args:
dataset: tf.dataset containing string-data.
maxchars: int: approximate number of characters to save from dataset.
data_keys: Tuple[str]: what keys in dataset to dump from.
Returns:
name of temp file with dataset bytes, exact number of characters dumped.
"""
char_count = 0
ds_iter = dataset.as_numpy_iterator()
with tempfile.NamedTemporaryFile(delete=False,
prefix='/tmp/ds_chars') as outfp:
while char_count < maxchars:
example = next(ds_iter)
for k in data_keys:
line = example[k] + b'\n'
char_count += len(line)
outfp.write(line)
return outfp.name, char_count
def train_sentencepiece(dataset,
vocab_size,
maxchars=1e7,
character_coverage=1.0,
model_path='wmt_model.model',
model_type='unigram',
data_keys=('inputs', 'targets')):
"""Train SentencePiece tokenizer from subset of tf dataset.
Args:
dataset: tf.dataset
vocab_size: int: size of vocab tokens to train.
maxchars: int: number of characters to use for sentencepiece training.
character_coverage: amount of characters covered by the model, good
defaults are 0.9995 for languages with rich character set like Japanese
or Chinese and 1.0 for other languages with small character set.
model_path: str: path of model file to save vocab model to.
model_type: str: type of sentencepiece vocab to train.
data_keys: Tuple[str]: keys of dataset to use for training.
Returns:
path to the trained sentencepiece vocabulary model.
"""
abs_model_path = os.path.abspath(os.path.expanduser(model_path))
fname, _ = dump_chars_to_textfile(dataset,
maxchars=maxchars,
data_keys=data_keys)
with tempfile.NamedTemporaryFile(delete=False,
prefix='/tmp/sp_tmp') as model_fp:
pass # we just want a prefix'd tmp-filename
argstr = ' '.join(
[f'--input={fname}',
f'--vocab_size={vocab_size}',
f'--character_coverage={character_coverage}',
f'--model_prefix={model_fp.name}',
f'--model_type={model_type}'])
SentencePieceTrainer.Train(argstr)
if jax.host_id() == 0:
# Use an intermediate filename that is renamed to the target name to address
# create and fill delays.
copy_rename_path = abs_model_path + '.rntmp'
tf.io.gfile.copy(model_fp.name + '.model', copy_rename_path, overwrite=True)
tf.io.gfile.rename(copy_rename_path, abs_model_path, overwrite=True)
logging.info('copied %s to %s', model_fp.name+'.model', abs_model_path)
else:
while not tf.io.gfile.exists(abs_model_path):
time.sleep(1)
time.sleep(1)
return abs_model_path
def load_sentencepiece_tokenizer(
model_path, add_bos=False, add_eos=True, reverse=False):
"""Load a tf-text SentencePiece tokenizer from given model filepath."""
with tf.io.gfile.GFile(model_path, 'rb') as model_fp:
sp_model = model_fp.read()
sp_tokenizer = tftxt.SentencepieceTokenizer(
model=sp_model, add_bos=add_bos, add_eos=add_eos, reverse=reverse)
return sp_tokenizer
# -----------------------------------------------------------------------------
# Dynamic to static shape transforms.
# -----------------------------------------------------------------------------
def bin_and_batch(dataset,
n_devices,
batch_size=256,
bucket_length=32,
buckets=None,
drop_remainder=True):
"""Dynamic batching by length-bucketing.
Sorts data into a small number of batch x length "buckets" that have roughly
constant token count.
Args:
dataset: tf.data dataset
n_devices: int: number of local devices
batch_size: int: target batch size
bucket_length: int: target length for target batch size
buckets: List[Tuple[int, int]]: pairs of bucket-length, batch-size
boundaries to define custom length-buckets.
drop_remainder: bool: whether or not to drop the last odd-shaped batch
produced by bucketing a finite input data stream.
Returns:
tf.data dataset with dynamically batched examples.
"""
# Create heuristic buckets is none are specified.
if buckets is None:
logging.info('Heuristically bucketing based on shapes of examples.')
bucket_boundaries = [
bucket_length // 4, bucket_length // 2, bucket_length,
bucket_length * 2, bucket_length * 4, bucket_length * 8,
bucket_length * 16
]
bucket_batch_sizes = [
batch_size * 4, batch_size * 2, batch_size,
batch_size // 2, batch_size // 4, batch_size // 8,
batch_size // 16
]
# TF.data's bucket_by_sequence_length pads to (bucket_boundary - 1):
# we add 1 here to pad to the correct specified length.
bucket_boundaries = [b + 1 for b in bucket_boundaries]
# Make batch sizes divisible by n_devices.
bucket_batch_sizes = [
max(b // n_devices, 1) * n_devices for b in bucket_batch_sizes
]
buckets = (bucket_boundaries, bucket_batch_sizes)
logging.info('Bucketing with buckets %s.', str(buckets))
def example_length(example):
"""The length function used by bucket_by_sequence_length to bucket."""
return tf.maximum(tf.shape(example['inputs'])[0],
tf.shape(example['targets'])[0])
boundaries, batch_sizes = buckets
# bucket_by_sequence_length expects a final dummy 1 batch_size.
batch_sizes.append(1)
dataset = dataset.apply(
tf.data.experimental.bucket_by_sequence_length(
example_length,
boundaries,
batch_sizes,
pad_to_bucket_boundary=True,
drop_remainder=drop_remainder))
return dataset
def pack_dataset(dataset, length, keys=None):
"""Creates a 'packed' version of a dataset on-the-fly.
Adapted from the mesh-tf implementation.
This is meant to replace the irritation of having to create a separate
"packed" version of a dataset to train efficiently on TPU.
Each example in the output dataset represents several examples in the
input dataset.
For each key in the input dataset, two additional keys are created:
<key>_segmentation: an int32 tensor identifying the parts
representing the original example.
<key>_position: an int32 tensor identifying the position within the original
example.
Example:
Two input examples get combined to form an output example.
The input examples are:
{"inputs": [8, 7, 1, 0], "targets":[4, 1, 0]}
{"inputs": [2, 3, 4, 1], "targets":[5, 6, 1]}
The output example is:
{
"inputs": [8, 7, 1, 2, 3, 4, 1, 0, 0, 0]
"inputs_segmentation": [1, 1, 1, 2, 2, 2, 2, 0, 0, 0]
"inputs_position": [0, 1, 2, 0, 1, 2, 3, 0, 0, 0]
"targets": [4, 1, 5, 6, 1, 0, 0, 0, 0, 0]
"targets_segmentation": [1, 1, 2, 2, 2, 0, 0, 0, 0, 0]
"targets_position": [0, 1, 0, 1, 2, 0, 0, 0, 0, 0]
}
0 represents padding in both the inputs and the outputs.
Sequences in the incoming examples are truncated to length "length", and the
sequences in the output examples all have fixed (padded) length "length".
Args:
dataset: a tf.data.Dataset
length: an integer, or a dict from feature-key to integer
keys: a list of strings (e.g. ["inputs", "targets"])
Returns:
a tf.data.Dataset
"""
shapes = tf.nest.map_structure(lambda spec: spec.shape, dataset.element_spec)
if keys is None:
keys = list(shapes.keys())
for k in keys:
if k not in shapes:
raise ValueError('Key %s not found in dataset. Available keys are %s'
% (k, shapes.keys()))
if not shapes[k].is_compatible_with(tf.TensorShape([None])):
raise ValueError('Tensors to be packed must be one-dimensional.')
# make sure that the length dictionary contains all keys as well as the
# keys suffixed by "_segmentation" and "_position"
length_dict = {}
for k in keys:
for suffix in ['', '_segmentation', '_position']:
length_dict[k + suffix] = length if isinstance(length, int) else length[k]
length = length_dict
# trim to length
dataset = dataset.map(lambda x: {k: x[k][:length[k]] for k in keys},
num_parallel_calls=AUTOTUNE)
# Setting batch_size=length ensures that the concatenated sequences (if they
# have length >=1) are sufficient to fill at least one packed example.
batch_size = max(length.values())
dataset = dataset.padded_batch(
batch_size, padded_shapes={k: [-1] for k in keys})
dataset = _pack_with_tf_ops(dataset, keys, length)
# Set the Tensor shapes correctly since they get lost in the process.
def my_fn(x):
return {k: tf.reshape(v, [length[k]]) for k, v in x.items()}
return dataset.map(my_fn, num_parallel_calls=AUTOTUNE)
def _pack_with_tf_ops(dataset, keys, length):
"""Helper-function for packing a dataset which has already been batched.
Helper for pack_dataset() Uses tf.while_loop.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings
length: an dict from feature-key to integer
Returns:
a dataset.
"""
empty_example = {}
for k in keys:
empty_example[k] = tf.zeros([0], dtype=tf.int32)
empty_example[k + '_position'] = tf.zeros([0], dtype=tf.int32)
keys_etc = empty_example.keys()
def write_packed_example(partial, outputs):
new_partial = empty_example.copy()
new_outputs = {}
for k in keys_etc:
new_outputs[k] = outputs[k].write(
outputs[k].size(),
tf.pad(partial[k],
[[0, length[k] - tf.size(partial[k])]]))
return new_partial, new_outputs
def map_fn(x):
"""Internal function to flat_map over.
Consumes a batch of input examples and produces a variable number of output
examples.
Args:
x: a single example
Returns:
a tf.data.Dataset
"""
partial = empty_example.copy()
i = tf.zeros([], dtype=tf.int32)
dynamic_batch_size = tf.shape(x[keys[0]])[0]
outputs = {}
for k in keys:
outputs[k] = tf.TensorArray(
tf.int32, size=0, dynamic_size=True, element_shape=[length[k]])
outputs[k + '_position'] = tf.TensorArray(
tf.int32, size=0, dynamic_size=True, element_shape=[length[k]])
def cond_fn(i, partial, outputs):
del partial, outputs
return i < dynamic_batch_size
def body_fn(i, partial, outputs):
"""Body function for while_loop.
Args:
i: integer scalar
partial: dictionary of Tensor (partially-constructed example)
outputs: dictionary of TensorArray
Returns:
A triple containing the new values of the inputs.
"""
can_append = True
one_example = {}
for k in keys:
val = tf.cast(x[k][i], tf.int32)
val = val[:tf.reduce_sum(tf.cast(tf.not_equal(val, 0), tf.int32))]
one_example[k] = val
for k in keys:
can_append = tf.logical_and(
can_append,
tf.less_equal(
tf.size(partial[k]) + tf.size(one_example[k]), length[k]))
def false_fn():
return write_packed_example(partial, outputs)
def true_fn():
return partial, outputs
partial, outputs = tf.cond(can_append, true_fn, false_fn)
new_partial = {}
for k in keys:
new_seq = one_example[k][:length[k]]
new_seq_len = tf.size(new_seq)
new_partial[k] = tf.concat([partial[k], new_seq], 0)
new_partial[k + '_position'] = tf.concat(
[partial[k + '_position'],
tf.range(new_seq_len, dtype=tf.int32)], 0)
partial = new_partial
return i+1, partial, outputs
i, partial, outputs = \
tf.while_loop(
cond_fn, body_fn, (i, partial, outputs),
shape_invariants=(
tf.TensorShape([]),
{k: tf.TensorShape([None]) for k in keys_etc},
{k: tf.TensorShape(None) for k in keys_etc},
)
)
partial, outputs = write_packed_example(partial, outputs)
packed = {k: outputs[k].stack() for k in keys_etc}
for k in keys:
packed[k + '_segmentation'] = (
tf.cumsum(
tf.cast(tf.equal(packed[k + '_position'], 0), tf.int32), axis=1) *
tf.cast(tf.not_equal(packed[k], 0), tf.int32))
return packed
dataset = dataset.map(map_fn, num_parallel_calls=AUTOTUNE)
return dataset.unbatch()
# -----------------------------------------------------------------------------
# Main dataset prep routines.
# -----------------------------------------------------------------------------
def preprocess_wmt_data(dataset,
training,
n_devices,
dynamic_batching=False,
pack_examples=True,
shuffle_buffer_size=1024,
max_length=512,
batch_size=256,
bucket_length=32,
drop_remainder=True,
prefetch_size=AUTOTUNE):
"""Shuffle and batch/pack the given dataset."""
def length_filter(max_len):
def filter_fn(x):
source, target = x['inputs'], x['targets']
l = tf.maximum(tf.shape(source)[0], tf.shape(target)[0])
return tf.less(l, max_len + 1)
return filter_fn
if max_length > 0:
dataset = dataset.filter(length_filter(max_length))
if training:
dataset = dataset.shuffle(shuffle_buffer_size)
dataset = dataset.repeat()
if pack_examples and dynamic_batching:
raise ValueError(
"Can't use both dynamic batching and packed-examples simultaneously.")
if pack_examples:
dataset = pack_dataset(dataset, max_length)
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
elif dynamic_batching:
dataset = bin_and_batch(
dataset,
n_devices,
batch_size=batch_size,
bucket_length=bucket_length,
drop_remainder=drop_remainder)
else: # simple (static-shape) padded batching
dataset = dataset.padded_batch(
batch_size,
padded_shapes={'inputs': max_length, 'targets': max_length},
padding_values={'inputs': 0, 'targets': 0},
drop_remainder=drop_remainder)
if prefetch_size:
dataset = dataset.prefetch(prefetch_size)
return dataset
def get_wmt_datasets(n_devices,
dataset_name='wmt17_translate/de-en',
eval_dataset_name=None,
reverse_translation=True,
shard_idx=0,
shard_count=1,
data_dir=None,
vocab_path=None,
target_vocab_size=2**15, # 32000
max_corpus_chars=10**7,
batch_size=256,
bucket_length=32,
dynamic_batching=False,
pack_examples=True,
max_length=256,
max_eval_length=256):
"""Load and return dataset of batched examples for use during training."""
if batch_size % n_devices:
raise ValueError("Batch size %d isn't divided evenly by n_devices %d" %
(batch_size, n_devices))
if vocab_path is None:
vocab_path = os.path.expanduser('~/wmt_sentencepiece_model')
train_data, eval_data, _ = raw_wmt_datasets(
dataset_name=dataset_name,
eval_dataset_name=eval_dataset_name,
reverse_translation=reverse_translation,
shard_idx=shard_idx,
shard_count=shard_count,
data_dir=data_dir)
try:
sp_tokenizer = load_sentencepiece_tokenizer(vocab_path, add_eos=True)
except tf.errors.NotFoundError:
logging.info('SentencePiece vocab not found, building one from data.')
abs_vocab_path = train_sentencepiece(
train_data,
target_vocab_size,
maxchars=max_corpus_chars,
character_coverage=1.0,
model_path=vocab_path,
data_keys=('inputs', 'targets'))
sp_tokenizer = load_sentencepiece_tokenizer(abs_vocab_path, add_eos=True)
# Encode strings with sentencepiece tokenizer.
def tokenize(data):
return {'inputs': sp_tokenizer.tokenize(data['inputs']),
'targets': sp_tokenizer.tokenize(data['targets'])}
train_data = train_data.map(tokenize, num_parallel_calls=AUTOTUNE)
eval_data = eval_data.map(tokenize, num_parallel_calls=AUTOTUNE)
train_batches = preprocess_wmt_data(
train_data,
training=True,
dynamic_batching=dynamic_batching,
pack_examples=pack_examples,
n_devices=n_devices,
batch_size=batch_size,
bucket_length=bucket_length,
max_length=max_length)
eval_batches = preprocess_wmt_data(
eval_data,
training=False,
dynamic_batching=dynamic_batching,
pack_examples=False,
n_devices=n_devices,
batch_size=batch_size,
bucket_length=bucket_length,
max_length=max_eval_length)
predict_batches = preprocess_wmt_data(
eval_data,
training=False,
dynamic_batching=dynamic_batching,
pack_examples=False,
n_devices=n_devices,
batch_size=batch_size,
bucket_length=bucket_length,
max_length=max_eval_length,
drop_remainder=False)
return train_batches, eval_batches, predict_batches, sp_tokenizer
| 37.620451 | 80 | 0.633436 |
623619850eed02c3beaf5bd487e2d9b83bcc7d77 | 1,080 | py | Python | sandbox/apps/python/multigrid/jacobi3D/polymage_defect.py | rcodin/polymage | 653487be125dec4950d1c65da4f736fa05fb938f | [
"Apache-2.0"
] | 10 | 2016-07-22T06:53:11.000Z | 2021-02-19T06:22:00.000Z | sandbox/apps/python/multigrid/jacobi3D/polymage_defect.py | rcodin/polymage | 653487be125dec4950d1c65da4f736fa05fb938f | [
"Apache-2.0"
] | null | null | null | sandbox/apps/python/multigrid/jacobi3D/polymage_defect.py | rcodin/polymage | 653487be125dec4950d1c65da4f736fa05fb938f | [
"Apache-2.0"
] | 2 | 2017-11-21T20:29:36.000Z | 2021-05-21T01:52:05.000Z | from __init__ import *
import sys
from polymage_common import set_ghosts
sys.path.insert(0, ROOT)
from compiler import *
from constructs import *
def defect(U_, F_, l, name, pipe_data):
if U_ == None:
return F_
z = pipe_data['z']
y = pipe_data['y']
x = pipe_data['x']
invhh = pipe_data['invhh']
extent = pipe_data['extent']
interior = pipe_data['interior']
ghosts = pipe_data['ghosts']
inner_box = interior[l]['inner_box']
W_ = Function(([z, y, x], [extent[l], extent[l], extent[l]]),
Double, str(name))
W_.defn = [ Case(inner_box,
F_(z , y , x ) \
- (U_(z , y , x ) * 6.0 \
- U_(z-1, y , x ) \
- U_(z+1, y , x ) \
- U_(z , y-1, x ) \
- U_(z , y+1, x ) \
- U_(z , y , x-1) \
- U_(z , y , x+1) \
) * invhh[l]) ]
set_ghosts(W_, ghosts[l], 0.0)
return W_
| 25.116279 | 65 | 0.422222 |
0e9226ebeb9ac9de73c16b46f793333e83198b72 | 7,787 | py | Python | src/morphforgecontrib/simulation/channels/util.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | 1 | 2021-01-21T11:31:59.000Z | 2021-01-21T11:31:59.000Z | src/morphforgecontrib/simulation/channels/util.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | null | null | null | src/morphforgecontrib/simulation/channels/util.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
#from morphforgecontrib.simulation.channels import inftauinterpolated
import numpy as np
from morphforgecontrib.simulation.channels.hh_style import StdChlAlphaBeta
from morphforgecontrib.simulation.channels.hh_style import StdChlAlphaBetaBeta
from morphforge.units import qty
from morphforge import units
class ChannelConverter(object):
@classmethod
def AlphaBetaToInterpolateInfTauFunctorConvertor(cls, chl_functor, new_id=None, new_name=None, clone_id_suffix="_AsInfTau", clone_name_suffix="_AsInfTau", voltage_interpolation_values=None, ):
# Create a new functor:
def newFunctor(env, _voltage_interpolation_values=voltage_interpolation_values):
old_chl = chl_functor(env)
assert isinstance(old_chl, (StdChlAlphaBeta,
StdChlAlphaBetaBeta)) # or issubclass(StdChlAlphaBetaBeta, old_chl)
# New Name
if new_name is not None:
chl_name = new_name
else:
chl_name = old_chl.name + clone_name_suffix
# Interpolation voltages:
# voltage_interpolation_values=voltage_interpolation_values
if _voltage_interpolation_values is None:
_voltage_interpolation_values = np.linspace(-80, 60, 10) * qty('mV')
# Copy the state variables
new_state_vars = {}
for state_var in old_chl.get_state_variables():
alpha, beta = old_chl.get_alpha_beta_at_voltage(statevar=state_var, V=_voltage_interpolation_values)
inf, tau = InfTauCalculator.alpha_beta_to_inf_tau(alpha, beta)
V = _voltage_interpolation_values.rescale('mV').magnitude
inf = inf.rescale(units.dimensionless).magnitude
tau = tau.rescale('ms').magnitude
new_state_vars[state_var] = InfTauInterpolation(V=V, inf=inf, tau=tau)
chl = env.Channel(
MM_InfTauInterpolatedChannel,
name=chl_name,
ion=old_chl.ion,
equation=old_chl.eqn,
conductance=old_chl.conductance,
reversalpotential=old_chl.reversalpotential,
statevars_new=new_state_vars,
)
return chl
return newFunctor
# V1 = self.state1.plotinf.lineplot.index.get_data().tolist()
# inf1 = self.state1.plotinf.lineplot.value.get_data().tolist()
# tau1 = self.state1.plottau.lineplot.value.get_data().tolist()
#
# V2 = self.state2.plotinf.lineplot.index.get_data().tolist()
# inf2 = self.state2.plotinf.lineplot.value.get_data().tolist()
# tau2 = self.state2.plottau.lineplot.value.get_data().tolist()
#
# #V1 = self.state1.plotinf.mx.tolist()
# #inf1 = self.state1.plotinf.my.tolist()
# #tau1 = self.state1.plottau.my.tolist()
# #V2 = self.state2.plotinf.mx.tolist()
# #inf2 = self.state2.plotinf.my.tolist()
# #tau2 = self.state2.plottau.my.tolist()
#
# ks_vars = {
# self.state_var_name1: InfTauInterpolation(V=V1, inf=inf1, tau=tau1),
# self.state_var_name2: InfTauInterpolation(V=V2, inf=inf2, tau=tau2),
# }
#
# #inf_data1 = zip(self.state1.plotinf.mx.tolist(), self.state1.plotinf.my.tolist())
# #tau_data1 = zip(self.state1.plottau.mx.tolist(), self.state1.plottau.my.tolist())
#
# #inf_data2 = zip(self.state2.plotinf.mx.tolist(), self.state2.plotinf.my.tolist())
# #tau_data2 = zip(self.state2.plottau.mx.tolist(), self.state2.plottau.my.tolist())
# #
# #ks_vars = {
# # self.state_var_name1: { 'inf': inf_data1, 'tau': tau_data1, },
# # self.state_var_name2: { 'inf': inf_data2, 'tau': tau_data2, },
# #
# # }
# ks = env.Channel(MM_InfTauInterpolatedChannel,
# name=self.chlname,
# ion='None',
# equation=self.eqn,
# conductance = '%2.2f:mS/cm2' % gbar,
# reversalpotential = '%2.2f:mV' % vrev,
# statevars_new = ks_vars)
#
#
#
#
# ca_state_vars = { "m": {"alpha": [4.05, 0.0, 1.0, -15.32, -13.57], "beta1": [0.093 * 10.63, 0.093, -1, 10.63, 1], "beta2":[1.28, 0, 1, 5.39, 12.11] } }
# caChannels = env.Channel(
# StdChlCalciumAlphaBetaBeta,
# name="CaChl", ion="ca",
# equation="m*m",
# permeability = qty("1.425:cm/s") * 0.1 * 0.15,
# intracellular_concentration = qty("100:nMol"),
# extracellular_concentration = qty("10:uMol"),
# temperature = qty("300:K"),
# beta2threshold = qty("-25:mV"),
# statevars=ca_state_vars,
# )
# return caChannels
#
#
#
#
#
#
# state_names = chl.statevars.keys()
# assert len(state_names) == 2
# state_name1 = state_names[0]
# state_name2 = state_names[1]
#
# [intV, tauV], [intV, infV] = convertAlphaBetaToInfTauInterpolated(chl, state_name1, 10)
# state1=HHGeneralStatePanel(initial_tau= [intV, tauV], initial_inf=[intV, infV])
#
# [intV, tauV], [intV, infV] = convertAlphaBetaToInfTauInterpolated(chl, state_name2, 10)
# state2=HHGeneralStatePanel(initial_tau= [intV, tauV], initial_inf=[intV, infV])
#
# return HHChannelPaneInfTau2(sim_config=sim_config,
# general_pane=general,
# state_pane1=state1,
# state_pane2=state2,
# eqn = chl.eqn,
# state_var_name1 = state_name1,
# state_var_name2 = state_name2,
# chlname = chlname
# )
#
| 44.497143 | 197 | 0.577629 |
f9bee3927790f0c9594d70d9c339fb9e4cf3d7c7 | 1,466 | py | Python | Raspberry_Pi/compare_fp.py | sbcshop/PiFinger | f4825aacade25afbde484f1cd28880223fb89672 | [
"MIT"
] | 3 | 2021-02-26T01:43:22.000Z | 2021-12-09T16:38:02.000Z | Raspberry_Pi/compare_fp.py | sbcshop/PiFinger | f4825aacade25afbde484f1cd28880223fb89672 | [
"MIT"
] | null | null | null | Raspberry_Pi/compare_fp.py | sbcshop/PiFinger | f4825aacade25afbde484f1cd28880223fb89672 | [
"MIT"
] | null | null | null | from oled_091 import SSD1306
from os import path
from time import sleep
DIR_PATH = path.abspath(path.dirname(__file__))
DefaultFont = path.join(DIR_PATH, "Fonts/GothamLight.ttf")
display = SSD1306()
# Import FingerprintSensor class from fingerprint module
from fingerprint import FingerprintSensor
# Set the serial comport for the connected sensor
COM_PORT = "/dev/ttyS0" #
# Create class object
fp = FingerprintSensor()
display = SSD1306()
# Initialise the sensor serial with the COM port and fixed baud rate of
# 115200, set "use_thread" argument as false
fp.connect_sensor(port=COM_PORT, baud_rate=9600, use_thread=False)
# Use unlock_with_fingerprint function of FingerprintSensor to send
# fingerprint unlock command to the sensor
fp.unlock_with_fingerprint()
display.DirImage(path.join(DIR_PATH, "Images/SB.png"))
display.DrawRect()
display.ShowImage()
sleep(1)
display.PrintText("Place your Finger", FontSize=14)
display.ShowImage()
# Wait for the sensor to compare and send the success message
while True:
rec = fp.read_rx()
if rec:
print(rec)
# If the sensor sends "Matched" string exit the loop
if "Matched!" in rec:
display.PrintText(rec, cords=(2, 2), FontSize=14)
display.ShowImage()
break
else:
display.PrintText(rec, cords=(2, 2), FontSize=14)
display.ShowImage()
# Disconnect the serial fingerprint sensor
fp.disconnect_sensor()
| 29.32 | 72 | 0.721692 |
e7e97332c38f23c403998751e6bad8d216b5eccc | 2,527 | py | Python | accelbyte_py_sdk/api/lobby/models/model_user_accept_friend_request.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/lobby/models/model_user_accept_friend_request.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/lobby/models/model_user_accept_friend_request.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | # Auto-generated at 2021-09-27T17:12:33.456547+08:00
# from: Justice Lobby Service (1.33.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class ModelUserAcceptFriendRequest(Model):
"""Model user accept friend request
Properties:
friend_id: (friendId) REQUIRED str
"""
# region fields
friend_id: str # REQUIRED
# endregion fields
# region with_x methods
def with_friend_id(self, value: str) -> ModelUserAcceptFriendRequest:
self.friend_id = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "friend_id") and self.friend_id:
result["friendId"] = str(self.friend_id)
elif include_empty:
result["friendId"] = str()
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
friend_id: str,
) -> ModelUserAcceptFriendRequest:
instance = cls()
instance.friend_id = friend_id
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> ModelUserAcceptFriendRequest:
instance = cls()
if not dict_:
return instance
if "friendId" in dict_ and dict_["friendId"] is not None:
instance.friend_id = str(dict_["friendId"])
elif include_empty:
instance.friend_id = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"friendId": "friend_id",
}
# endregion static methods
| 28.077778 | 109 | 0.649387 |
6753c919d390bf184cef9defbfba8f1e3ff83c78 | 132 | py | Python | Section 2/TokenizeInputData.py | PacktPublishing/Clean-Data-Tips-Tricks-and-Techniques | 4ca6a8025e3825f1e3083a9162117c9d888571e9 | [
"MIT"
] | 11 | 2018-11-10T16:42:59.000Z | 2021-10-01T23:23:52.000Z | Section 2/TokenizeInputData.py | PacktPublishing/Clean-Data-Tips-Tricks-and-Techniques | 4ca6a8025e3825f1e3083a9162117c9d888571e9 | [
"MIT"
] | null | null | null | Section 2/TokenizeInputData.py | PacktPublishing/Clean-Data-Tips-Tricks-and-Techniques | 4ca6a8025e3825f1e3083a9162117c9d888571e9 | [
"MIT"
] | 7 | 2018-10-31T13:07:56.000Z | 2021-03-13T19:48:38.000Z | from nltk.tokenize import word_tokenize
text1 = "It's true that the chicken was smart."
tokens = word_tokenize(text1)
print(tokens)
| 26.4 | 47 | 0.787879 |
15e40208aa229506c9b539f2b22806f08d671fb1 | 117 | py | Python | Clase 8 otro ejemplo/test/test_multiplicar.py | JoselynRuiz/Programacion-III | 9c09378e1e933372d0dfba2d2261f04c5aee86b3 | [
"MIT"
] | null | null | null | Clase 8 otro ejemplo/test/test_multiplicar.py | JoselynRuiz/Programacion-III | 9c09378e1e933372d0dfba2d2261f04c5aee86b3 | [
"MIT"
] | null | null | null | Clase 8 otro ejemplo/test/test_multiplicar.py | JoselynRuiz/Programacion-III | 9c09378e1e933372d0dfba2d2261f04c5aee86b3 | [
"MIT"
] | null | null | null | from unittest import TestCase
class TestMultiplicar(TestCase):
def test_multiplicar(self):
self.fail()
| 16.714286 | 32 | 0.726496 |
d22d9fe5307e3678f17236c4d3837f3bad6ba3e0 | 3,174 | py | Python | python/pysoarlib/IdentifierExtensions.py | amininger/vim-soar-debugger | c1a4cfa8b0e4760470c5619325f522f2f5021c25 | [
"MIT"
] | 10 | 2019-05-31T19:00:27.000Z | 2022-01-15T10:07:51.000Z | python/pysoarlib/IdentifierExtensions.py | amininger/vim-soar-debugger | c1a4cfa8b0e4760470c5619325f522f2f5021c25 | [
"MIT"
] | 2 | 2020-04-21T12:36:01.000Z | 2020-04-28T13:18:03.000Z | python/pysoarlib/IdentifierExtensions.py | amininger/vim-soar-debugger | c1a4cfa8b0e4760470c5619325f522f2f5021c25 | [
"MIT"
] | 4 | 2020-04-21T14:51:27.000Z | 2022-01-15T10:07:35.000Z | """ Defines additional helper methods for the Identifier class for accessing child values
This module is not intended to be imported directly,
Importing the pysoarlib module will cause these to be added to the Identifier class
Note that the methods will use CamelCase, so get_child_str => GetChildStr
"""
_INTEGER_VAL = "int"
_FLOAT_VAL = "double"
_STRING_VAL = "string"
def get_child_str(self, attribute):
""" Given id and attribute, returns value for WME as string (self ^attribute value) """
wme = self.FindByAttribute(attribute, 0)
if wme == None or len(wme.GetValueAsString()) == 0:
return None
return wme.GetValueAsString()
def get_child_int(self, attribute):
""" Given id and attribute, returns integer value for WME (self ^attribute value) """
wme = self.FindByAttribute(attribute, 0)
if wme == None or wme.GetValueType() != _INTEGER_VAL:
return None
return wme.ConvertToIntElement().GetValue()
def get_child_float(self, attribute):
""" Given id and attribute, returns float value for WME (self ^attribute value) """
wme = self.FindByAttribute(attribute, 0)
if wme == None or wme.GetValueType() != _FLOAT_VAL:
return None
return wme.ConvertToFloatElement().GetValue()
def get_child_id(self, attribute):
""" Given id and attribute, returns identifier value of WME (self ^attribute child_id) """
wme = self.FindByAttribute(attribute, 0)
if wme == None or not wme.IsIdentifier():
return None
return wme.ConvertToIdentifier()
def get_all_child_ids(self, attribute=None):
""" Given id and attribute, returns a list of child identifiers from all WME's matching (self ^attribute child_id)
If no attribute is specified, all child identifiers are returned
"""
child_ids = []
for index in range(self.GetNumberChildren()):
wme = self.GetChild(index)
if not wme.IsIdentifier():
continue
if attribute == None or wme.GetAttribute() == attribute:
child_ids.append(wme.ConvertToIdentifier())
return child_ids
def get_all_child_values(self, attribute=None):
""" Given id and attribute, returns a list of strings of non-identifier values from all WME's matching (self ^attribute value)
If no attribute is specified, all child values (non-identifiers) are returned
"""
child_values = []
for index in range(self.GetNumberChildren()):
wme = self.GetChild(index)
if wme.IsIdentifier():
continue
if attribute == None or wme.GetAttribute() == attribute:
child_values.append(wme.GetValueAsString())
return child_values
def get_all_child_wmes(self):
""" Returns a list of (attr, val) tuples representing all wmes rooted at this identifier
val will either be an Identifier or a string, depending on its type """
wmes = []
for index in range(self.GetNumberChildren()):
wme = self.GetChild(index)
if wme.IsIdentifier():
wmes.append( (wme.GetAttribute(), wme.ConvertToIdentifier()) )
else:
wmes.append( (wme.GetAttribute(), wme.GetValueAsString()) )
return wmes
| 39.675 | 130 | 0.687146 |
50089aa7630077ad0b4162fb733b4b56c73d0d6d | 14,286 | py | Python | tasks/pipeline.py | psviderski/datadog-agent | dd41dd75294f4605409aea7545c26e61f77d7abb | [
"Apache-2.0"
] | null | null | null | tasks/pipeline.py | psviderski/datadog-agent | dd41dd75294f4605409aea7545c26e61f77d7abb | [
"Apache-2.0"
] | null | null | null | tasks/pipeline.py | psviderski/datadog-agent | dd41dd75294f4605409aea7545c26e61f77d7abb | [
"Apache-2.0"
] | null | null | null | import io
import os
import re
import traceback
from collections import defaultdict
from invoke import task
from invoke.exceptions import Exit
from tasks.utils import DEFAULT_BRANCH
from .libs.common.color import color_message
from .libs.common.gitlab import Gitlab
from .libs.pipeline_notifications import (
base_message,
find_job_owners,
get_failed_jobs,
get_failed_tests,
send_slack_message,
)
from .libs.pipeline_tools import (
cancel_pipelines_with_confirmation,
get_running_pipelines_on_same_ref,
trigger_agent_pipeline,
wait_for_pipeline,
)
from .libs.types import SlackMessage, TeamMessage
# Tasks to trigger pipelines
ALLOWED_REPO_BRANCHES = {"stable", "beta", "nightly", "none"}
def check_deploy_pipeline(gitlab, project_name, git_ref, release_version_6, release_version_7, repo_branch):
"""
Run checks to verify a deploy pipeline is valid:
- it targets a valid repo branch
- it has matching Agent 6 and Agent 7 tags (depending on release_version_* values)
"""
# Check that the target repo branch is valid
if repo_branch not in ALLOWED_REPO_BRANCHES:
print(
"--repo-branch argument '{}' is not in the list of allowed repository branches: {}".format(
repo_branch, ALLOWED_REPO_BRANCHES
)
)
raise Exit(code=1)
#
# If git_ref matches v7 pattern and release_version_6 is not empty, make sure Gitlab has v6 tag.
# If git_ref matches v6 pattern and release_version_7 is not empty, make sure Gitlab has v7 tag.
# v7 version pattern should be able to match 7.12.24-rc2 and 7.12.34
#
v7_pattern = r'^7\.(\d+\.\d+)(-.+|)$'
v6_pattern = r'^6\.(\d+\.\d+)(-.+|)$'
match = re.match(v7_pattern, git_ref)
if release_version_6 and match:
# release_version_6 is not empty and git_ref matches v7 pattern, construct v6 tag and check.
tag_name = "6." + "".join(match.groups())
gitlab_tag = gitlab.find_tag(project_name, tag_name)
if ("name" not in gitlab_tag) or gitlab_tag["name"] != tag_name:
print("Cannot find GitLab v6 tag {} while trying to build git ref {}".format(tag_name, git_ref))
raise Exit(code=1)
print("Successfully cross checked v6 tag {} and git ref {}".format(tag_name, git_ref))
else:
match = re.match(v6_pattern, git_ref)
if release_version_7 and match:
# release_version_7 is not empty and git_ref matches v6 pattern, construct v7 tag and check.
tag_name = "7." + "".join(match.groups())
gitlab_tag = gitlab.find_tag(project_name, tag_name)
if ("name" not in gitlab_tag) or gitlab_tag["name"] != tag_name:
print("Cannot find GitLab v7 tag {} while trying to build git ref {}".format(tag_name, git_ref))
raise Exit(code=1)
print("Successfully cross checked v7 tag {} and git ref {}".format(tag_name, git_ref))
@task
def clean_running_pipelines(ctx, git_ref=DEFAULT_BRANCH, here=False, use_latest_sha=False, sha=None):
"""
Fetch running pipelines on a target ref (+ optionally a git sha), and ask the user if they
should be cancelled.
"""
project_name = "DataDog/datadog-agent"
gitlab = Gitlab()
gitlab.test_project_found(project_name)
if here:
git_ref = ctx.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip()
print("Fetching running pipelines on {}".format(git_ref))
if not sha and use_latest_sha:
sha = ctx.run("git rev-parse {}".format(git_ref), hide=True).stdout.strip()
print("Git sha not provided, using the one {} currently points to: {}".format(git_ref, sha))
elif not sha:
print("Git sha not provided, fetching all running pipelines on {}".format(git_ref))
pipelines = get_running_pipelines_on_same_ref(gitlab, project_name, git_ref, sha)
print(
"Found {} running pipeline(s) matching the request.".format(len(pipelines)),
"They are ordered from the newest one to the oldest one.\n",
sep='\n',
)
cancel_pipelines_with_confirmation(gitlab, project_name, pipelines)
@task
def trigger(
ctx, git_ref=DEFAULT_BRANCH, release_version_6="nightly", release_version_7="nightly-a7", repo_branch="nightly"
):
"""
DEPRECATED: Trigger a deploy pipeline on the given git ref. Use pipeline.run with the --deploy option instead.
The --release-version-6 and --release-version-7 options indicate which release.json entries are used.
To not build Agent 6, set --release-version-6 "". To not build Agent 7, set --release-version-7 "".
The --repo-branch option indicates which branch of the staging repository the packages will be deployed to.
Example:
inv pipeline.trigger --git-ref 7.22.0 --release-version-6 "6.22.0" --release-version-7 "7.22.0" --repo-branch "stable"
"""
print(
color_message(
"WARNING: the pipeline.trigger invoke task is deprecated and will be removed in the future.\n"
+ " Use pipeline.run with the --deploy option instead.",
"orange",
)
)
run(
ctx,
git_ref=git_ref,
release_version_6=release_version_6,
release_version_7=release_version_7,
repo_branch=repo_branch,
deploy=True,
all_builds=True,
kitchen_tests=True,
)
@task
def run(
ctx,
git_ref=DEFAULT_BRANCH,
here=False,
release_version_6="nightly",
release_version_7="nightly-a7",
repo_branch="nightly",
deploy=False,
all_builds=True,
kitchen_tests=True,
):
"""
Run a pipeline on the given git ref, or on the current branch if --here is given.
By default, this pipeline will run all builds & tests, including all kitchen tests, but is not a deploy pipeline.
Use --deploy to make this pipeline a deploy pipeline, which will upload artifacts to the staging repositories.
Use --no-all-builds to not run builds for all architectures (only a subset of jobs will run. No effect on pipelines on the default branch).
Use --no-kitchen-tests to not run all kitchen tests on the pipeline.
The --release-version-6 and --release-version-7 options indicate which release.json entries are used.
To not build Agent 6, set --release-version-6 "". To not build Agent 7, set --release-version-7 "".
The --repo-branch option indicates which branch of the staging repository the packages will be deployed to (useful only on deploy pipelines).
If other pipelines are already running on the git ref, the script will prompt the user to confirm if these previous
pipelines should be cancelled.
Examples
Run a pipeline on my-branch:
inv pipeline.run --git-ref my-branch
Run a pipeline on the current branch:
inv pipeline.run --here
Run a pipeline without kitchen tests on the current branch:
inv pipeline.run --here --no-kitchen-tests
Run a deploy pipeline on the 7.28.0 tag, uploading the artifacts to the stable branch of the staging repositories:
inv pipeline.run --deploy --git-ref 7.28.0 --release-version-6 "6.28.0" --release-version-7 "7.28.0" --repo-branch "stable"
"""
project_name = "DataDog/datadog-agent"
gitlab = Gitlab()
gitlab.test_project_found(project_name)
if deploy:
# Check the validity of the deploy pipeline
check_deploy_pipeline(
gitlab, project_name, git_ref, release_version_6, release_version_7, repo_branch,
)
# Force all builds and kitchen tests to be run
if not all_builds:
print(
color_message(
"WARNING: ignoring --no-all-builds option, RUN_ALL_BUILDS is automatically set to true on deploy pipelines",
"orange",
)
)
all_builds = True
if not kitchen_tests:
print(
color_message(
"WARNING: ignoring --no-kitchen-tests option, RUN_KITCHEN_TESTS is automatically set to true on deploy pipelines",
"orange",
)
)
kitchen_tests = True
if here:
git_ref = ctx.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip()
pipelines = get_running_pipelines_on_same_ref(gitlab, project_name, git_ref)
if pipelines:
print(
"There are already {} pipeline(s) running on the target git ref.".format(len(pipelines)),
"For each of them, you'll be asked whether you want to cancel them or not.",
"If you don't need these pipelines, please cancel them to save CI resources.",
"They are ordered from the newest one to the oldest one.\n",
sep='\n',
)
cancel_pipelines_with_confirmation(gitlab, project_name, pipelines)
pipeline_id = trigger_agent_pipeline(
gitlab,
project_name,
git_ref,
release_version_6,
release_version_7,
repo_branch,
deploy=deploy,
all_builds=all_builds,
kitchen_tests=kitchen_tests,
)
wait_for_pipeline(gitlab, project_name, pipeline_id)
@task
def follow(ctx, id=None, git_ref=None, here=False):
"""
Follow a pipeline's progress in the CLI.
Use --here to follow the latest pipeline on your current branch.
Use --git-ref to follow the latest pipeline on a given tag or branch.
Use --id to follow a specific pipeline.
Examples:
inv pipeline.follow --git-ref my-branch
inv pipeline.follow --here
inv pipeline.follow --id 1234567
"""
project_name = "DataDog/datadog-agent"
gitlab = Gitlab()
gitlab.test_project_found(project_name)
if id is not None:
wait_for_pipeline(gitlab, project_name, id)
elif git_ref is not None:
wait_for_pipeline_from_ref(gitlab, project_name, git_ref)
elif here:
git_ref = ctx.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip()
wait_for_pipeline_from_ref(gitlab, project_name, git_ref)
def wait_for_pipeline_from_ref(gitlab, project_name, ref):
pipeline = Gitlab().last_pipeline_for_ref(project_name, ref)
if pipeline is not None:
wait_for_pipeline(gitlab, project_name, pipeline['id'])
else:
print("No pipelines found for {ref}".format(ref=ref))
raise Exit(code=1)
# Tasks to trigger pipeline notifications
GITHUB_SLACK_MAP = {
"@DataDog/agent-platform": "#agent-platform",
"@DataDog/container-integrations": "#container-integration",
"@DataDog/integrations-tools-and-libraries": "#intg-tools-libs",
"@DataDog/agent-network": "#network-agent",
"@DataDog/agent-security": "#security-and-compliance-agent",
"@DataDog/agent-apm": "#apm-agent",
"@DataDog/infrastructure-integrations": "#infrastructure-integrations",
"@DataDog/processes": "#processes",
"@DataDog/agent-core": "#agent-core",
"@DataDog/container-app": "#container-app",
"@Datadog/metrics-aggregation": "#metrics-aggregation",
"@Datadog/serverless": "#serverless-agent",
"@DataDog/agent-all": "#datadog-agent-pipelines",
}
UNKNOWN_OWNER_TEMPLATE = """The owner `{owner}` is not mapped to any slack channel.
Please check for typos in the JOBOWNERS file and/or add them to the Github <-> Slack map.
"""
def generate_failure_messages(base):
project_name = "DataDog/datadog-agent"
all_teams = "@DataDog/agent-all"
failed_jobs = get_failed_jobs(project_name, os.getenv("CI_PIPELINE_ID"))
# Generate messages for each team
messages_to_send = defaultdict(lambda: TeamMessage(base))
messages_to_send[all_teams] = SlackMessage(base, jobs=failed_jobs)
failed_job_owners = find_job_owners(failed_jobs)
for owner, jobs in failed_job_owners.items():
if owner == "@DataDog/multiple":
for job in jobs:
for test in get_failed_tests(project_name, job):
messages_to_send[all_teams].add_test_failure(test, job)
for owner in test.owners:
messages_to_send[owner].add_test_failure(test, job)
elif owner == "@DataDog/do-not-notify":
# Jobs owned by @DataDog/do-not-notify do not send team messages
pass
elif owner == all_teams:
# Jobs owned by @DataDog/agent-all will already be in the global
# message, do not overwrite the failed jobs list
pass
else:
messages_to_send[owner].failed_jobs = jobs
return messages_to_send
@task
def notify_failure(_, notification_type="merge", print_to_stdout=False):
"""
Send failure notifications for the current pipeline. CI-only task.
Use the --print-to-stdout option to test this locally, without sending
real slack messages.
"""
header = ""
if notification_type == "merge":
header = ":host-red: :merged: datadog-agent merge"
elif notification_type == "deploy":
header = ":host-red: :rocket: datadog-agent deploy"
base = base_message(header)
try:
messages_to_send = generate_failure_messages(base)
except Exception as e:
buffer = io.StringIO()
print(base, file=buffer)
print("Found exception when generating notification:", file=buffer)
traceback.print_exc(limit=-1, file=buffer)
print("See the job log for the full exception traceback.", file=buffer)
messages_to_send = {
"@DataDog/agent-all": SlackMessage(buffer.getvalue()),
}
# Print traceback on job log
print(e)
traceback.print_exc()
# Send messages
for owner, message in messages_to_send.items():
channel = GITHUB_SLACK_MAP.get(owner, "#datadog-agent-pipelines")
if owner not in GITHUB_SLACK_MAP.keys():
message.base_message += UNKNOWN_OWNER_TEMPLATE.format(owner=owner)
message.coda = "If there is something wrong with the notification please contact #agent-platform"
if print_to_stdout:
print("Would send to {channel}:\n{message}".format(channel=channel, message=str(message)))
else:
send_slack_message(channel, str(message)) # TODO: use channel variable
| 38.096 | 145 | 0.666807 |
b34c2d49fd98275f79a55a1bee988d1b09ff2e17 | 1,142 | py | Python | venv/lib/python3.7/site-packages/djangosecure/fileslib.py | Justin-Bee/SER401_Trynkit | 3bf826121b2115f67271d88aa253075d71e81a90 | [
"Apache-2.0"
] | 4 | 2017-01-15T08:46:56.000Z | 2021-03-11T21:35:48.000Z | venv/lib/python3.7/site-packages/djangosecure/fileslib.py | Justin-Bee/SER401-Trynkit | 3bf826121b2115f67271d88aa253075d71e81a90 | [
"Apache-2.0"
] | 51 | 2019-10-08T01:53:02.000Z | 2021-06-04T22:02:21.000Z | djangosecure/fileslib.py | rafahsolis/djangosecure | 698169e8f9cd2665fa4db27f27b9dcf0f09ad54b | [
"Apache-2.0"
] | 2 | 2019-10-10T17:06:33.000Z | 2020-06-16T05:34:13.000Z | # -*-coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
# from django.utils.translation import ugettext as _
def check_or_create_dir(directorio, logger=None):
"""
Check if some path exists, creating it if not
:param directorio:
:param logger: logging logger instance
:return: True si existe o lo crea, False si hay errores
"""
# TODO: Modificar para que en lugar de un bucle use un parametro tipo: mkdir -p
if not directorio:
return
if not os.path.isdir(directorio):
try:
os.makedirs(directorio)
if logger:
logger.info('Created directory: ' + directorio)
else:
print('Created directory: ' + directorio)
except IOError:
if logger:
logger.info('Error creating directory: ' + directorio + ' Check user permissions')
else:
print('Error creating directory: ' + directorio + ' Check user permissions')
return False
return True
def set_perms(path, perms):
raise NotImplementedError('Not implemented yet')
| 30.864865 | 98 | 0.626095 |
4b7b978f42688b99f8622d1cfd80d14252969ecb | 25,606 | py | Python | tests/test_factory.py | g4brielvs/faker | bf206c96d347f948cc189e0f321e8dd0b55d0149 | [
"MIT"
] | null | null | null | tests/test_factory.py | g4brielvs/faker | bf206c96d347f948cc189e0f321e8dd0b55d0149 | [
"MIT"
] | null | null | null | tests/test_factory.py | g4brielvs/faker | bf206c96d347f948cc189e0f321e8dd0b55d0149 | [
"MIT"
] | null | null | null | # coding=utf-8
from __future__ import unicode_literals
import re
import unittest
import string
import sys
from ipaddress import ip_address, ip_network
import six
from faker import Generator, Faker
from faker.generator import random
from faker.utils import text, decorators
class BarProvider(object):
def foo_formatter(self):
return 'barfoo'
class FooProvider(object):
def foo_formatter(self):
return 'foobar'
def foo_formatter_with_arguments(self, param='', append=''):
return 'baz' + param + append
class FactoryTestCase(unittest.TestCase):
def setUp(self):
self.generator = Generator()
self.provider = FooProvider()
self.generator.add_provider(self.provider)
def test_add_provider_gives_priority_to_newly_added_provider(self):
self.generator.add_provider(BarProvider())
self.assertEqual('barfoo', self.generator.format('foo_formatter'))
def test_get_formatter_returns_callable(self):
formatter = self.generator.get_formatter('foo_formatter')
self.assertTrue(hasattr(formatter, '__call__')
or isinstance(formatter, (classmethod, staticmethod)))
def test_get_formatter_returns_correct_formatter(self):
self.assertEqual(self.provider.foo_formatter,
self.generator.get_formatter('foo_formatter'))
def test_get_formatter_throws_exception_on_incorrect_formatter(self):
with self.assertRaises(AttributeError) as exc:
self.generator.get_formatter('barFormatter')
self.assertEqual(exc.args[0], 'Unknown formatter "barFormatter"')
faker = Faker('it_IT')
with self.assertRaises(AttributeError) as exc:
faker.get_formatter('barFormatter')
self.assertEqual(exc.args[0], 'Unknown formatter "barFormatter" with locale "it_IT"')
def test_invalid_locale(self):
with self.assertRaises(AttributeError):
Faker('foo_Bar')
def test_format_calls_formatter_on_provider(self):
self.assertEqual('foobar', self.generator.format('foo_formatter'))
def test_format_transfers_arguments_to_formatter(self):
result = self.generator.format('foo_formatter_with_arguments',
'foo', append='!')
self.assertEqual('bazfoo!', result)
def test_parse_returns_same_string_when_it_contains_no_curly_braces(self):
self.assertEqual('fooBar#?', self.generator.parse('fooBar#?'))
def test_parse_returns_string_with_tokens_replaced_by_formatters(self):
result = self.generator.parse(
'This is {{foo_formatter}} a text with "{{ foo_formatter }}"')
self.assertEqual('This is foobar a text with " foobar "', result)
def test_magic_call_calls_format(self):
self.assertEqual('foobar', self.generator.foo_formatter())
def test_magic_call_calls_format_with_arguments(self):
self.assertEqual('bazfoo',
self.generator.foo_formatter_with_arguments('foo'))
def test_documentor(self):
from faker.cli import print_doc
output = six.StringIO()
print_doc(output=output)
print_doc('address', output=output)
print_doc('faker.providers.person.it_IT', output=output)
assert output.getvalue()
with self.assertRaises(AttributeError):
self.generator.get_formatter('barFormatter')
def test_command(self):
from faker.cli import Command
orig_stdout = sys.stdout
try:
sys.stdout = six.StringIO()
command = Command(['faker', 'address'])
command.execute()
assert sys.stdout.getvalue()
finally:
sys.stdout = orig_stdout
def test_command_custom_provider(self):
from faker.cli import Command
orig_stdout = sys.stdout
try:
sys.stdout = six.StringIO()
command = Command(['faker', 'foo', '-i', 'tests.mymodule.en_US'])
command.execute()
assert sys.stdout.getvalue()
finally:
sys.stdout = orig_stdout
def test_cli_seed(self):
from faker.cli import Command
orig_stdout = sys.stdout
try:
sys.stdout = six.StringIO()
base_args = ['faker', 'address']
target_args = ['--seed', '967']
commands = [Command(base_args + target_args), Command(base_args + target_args)]
cli_output = [None] * 2
for i in range(2):
commands[i].execute()
cli_output[i] = sys.stdout.getvalue()
cli_output[1] = cli_output[1][len(cli_output[0]):]
self.assertEqual(cli_output[0][:10], cli_output[1][:10])
finally:
sys.stdout = orig_stdout
def test_cli_seed_with_repeat(self):
from faker.cli import Command
orig_stdout = sys.stdout
try:
sys.stdout = six.StringIO()
base_args = ['faker', 'address', '-r', '3']
target_args = ['--seed', '967']
commands = [Command(base_args + target_args), Command(base_args + target_args)]
cli_output = [None] * 2
for i in range(2):
commands[i].execute()
cli_output[i] = sys.stdout.getvalue()
cli_output[1] = cli_output[1][len(cli_output[0]):]
self.assertEqual(cli_output[0], cli_output[1])
finally:
sys.stdout = orig_stdout
def test_cli_verbosity(self):
from faker.cli import Command
orig_stdout = sys.stdout
try:
sys.stdout = six.StringIO()
base_args = ['faker', 'address', '--seed', '769']
target_args = ['-v']
commands = [Command(base_args), Command(base_args + target_args)]
cli_output = [None] * 2
for i in range(2):
commands[i].execute()
cli_output[i] = sys.stdout.getvalue()
simple_output, verbose_output = cli_output
self.assertNotEqual(simple_output, verbose_output)
finally:
sys.stdout = orig_stdout
def test_slugify(self):
slug = text.slugify("a'b/c")
self.assertEqual(slug, 'abc')
slug = text.slugify("àeìöú")
self.assertEqual(slug, 'aeiou')
slug = text.slugify("àeì.öú")
self.assertEqual(slug, 'aeiou')
slug = text.slugify("àeì.öú", allow_dots=True)
self.assertEqual(slug, 'aei.ou')
slug = text.slugify("àeì.öú", allow_unicode=True)
self.assertEqual(slug, 'àeìöú')
slug = text.slugify("àeì.öú", allow_unicode=True, allow_dots=True)
self.assertEqual(slug, 'àeì.öú')
@decorators.slugify
def fn(s):
return s
slug = fn("a'b/c")
self.assertEqual(slug, 'abc')
@decorators.slugify_domain
def fn(s):
return s
slug = fn("a'b/.c")
self.assertEqual(slug, 'ab.c')
@decorators.slugify_unicode
def fn(s):
return s
slug = fn("a'b/.cé")
self.assertEqual(slug, 'abcé')
def test_random_element(self):
from faker.providers import BaseProvider
provider = BaseProvider(self.generator)
choices = ('a', 'b', 'c', 'd')
pick = provider.random_element(choices)
self.assertTrue(pick in choices)
choices = {'a': 5, 'b': 2, 'c': 2, 'd': 1}
pick = provider.random_element(choices)
self.assertTrue(pick in choices)
choices = {'a': 0.5, 'b': 0.2, 'c': 0.2, 'd': 0.1}
pick = provider.random_element(choices)
self.assertTrue(pick in choices)
def test_binary(self):
from faker.providers.misc import Provider
provider = Provider(self.generator)
for _ in range(999):
length = random.randint(0, 2 ** 10)
binary = provider.binary(length)
self.assertTrue(isinstance(binary, (bytes, bytearray)))
self.assertTrue(len(binary) == length)
for _ in range(999):
self.generator.seed(_)
binary1 = provider.binary(_)
self.generator.seed(_)
binary2 = provider.binary(_)
self.assertTrue(binary1 == binary2)
def test_language_code(self):
from faker.providers.misc import Provider
provider = Provider(self.generator)
for _ in range(99):
language_code = provider.language_code()
self.assertTrue(isinstance(language_code, six.string_types))
self.assertTrue(re.match(r'^[a-z]{2,3}$', language_code))
def test_locale(self):
from faker.providers.misc import Provider
provider = Provider(self.generator)
for _ in range(99):
locale = provider.locale()
self.assertTrue(re.match(r'^[a-z]{2,3}_[A-Z]{2}$', locale))
def test_password(self):
from faker.providers.misc import Provider
provider = Provider(self.generator)
def in_string(char, _str):
return char in _str
for _ in range(999):
password = provider.password()
self.assertTrue(any([in_string(char, password) for char in "!@#$%^&*()_+"]))
self.assertTrue(any([in_string(char, password) for char in string.digits]))
self.assertTrue(any([in_string(char, password) for char in string.ascii_uppercase]))
self.assertTrue(any([in_string(char, password) for char in string.ascii_lowercase]))
with self.assertRaises(AssertionError):
provider.password(length=2)
def test_prefix_suffix_always_string(self):
# Locales known to contain `*_male` and `*_female`.
for locale in ("bg_BG", "dk_DK", "en", "ru_RU", "tr_TR"):
f = Faker(locale=locale)
for x in range(20): # Probabilistic testing.
self.assertIsInstance(f.prefix(), six.string_types)
self.assertIsInstance(f.suffix(), six.string_types)
def test_no_words_sentence(self):
from faker.providers.lorem import Provider
provider = Provider(self.generator)
paragraph = provider.paragraph(0)
self.assertEqual(paragraph, '')
def test_words_valueerror(self):
f = Faker()
self.assertRaises(ValueError, f.text, max_nb_chars=4)
def test_no_words_paragraph(self):
from faker.providers.lorem import Provider
provider = Provider(self.generator)
sentence = provider.sentence(0)
self.assertEqual(sentence, '')
def test_ext_word_list(self):
fake = Faker()
my_word_list = [
'danish',
'cheesecake',
'sugar',
'Lollipop',
'wafer',
'Gummies',
'Jelly',
'pie',
]
word = fake.word(ext_word_list=my_word_list)
self.assertIn(word, my_word_list)
def test_no_words(self):
fake = Faker()
words = fake.words(0)
self.assertEqual(words, [])
def test_some_words(self):
fake = Faker()
num_words = 5
words = fake.words(num_words)
self.assertTrue(isinstance(words, list))
self.assertEqual(len(words), num_words)
for word in words:
self.assertTrue(isinstance(word, six.string_types))
self.assertTrue(re.match(r'^[a-z].*$', word))
def test_words_ext_word_list(self):
fake = Faker()
my_word_list = [
'danish',
'cheesecake',
'sugar',
'Lollipop',
'wafer',
'Gummies',
'Jelly',
'pie',
]
num_words = 5
words = fake.words(5, ext_word_list=my_word_list)
self.assertTrue(isinstance(words, list))
self.assertEqual(len(words), num_words)
for word in words:
self.assertTrue(isinstance(word, six.string_types))
self.assertIn(word, my_word_list)
def test_words_ext_word_list_unique(self):
fake = Faker()
my_word_list = [
'danish',
'cheesecake',
'sugar',
'Lollipop',
'wafer',
'Gummies',
'Jelly',
'pie',
]
num_words = 5
words = fake.words(5, ext_word_list=my_word_list, unique=True)
self.assertTrue(isinstance(words, list))
self.assertEqual(len(words), num_words)
checked_words = []
for word in words:
self.assertTrue(isinstance(word, six.string_types))
self.assertIn(word, my_word_list)
# Check that word is unique
self.assertTrue(word not in checked_words)
checked_words.append(word)
def test_unique_words(self):
fake = Faker()
num_words = 20
words = fake.words(num_words, unique=True)
self.assertTrue(isinstance(words, list))
self.assertEqual(len(words), num_words)
checked_words = []
for word in words:
self.assertTrue(isinstance(word, six.string_types))
# Check that word is only letters. No numbers, symbols, etc
self.assertTrue(re.match(r'^[a-zA-Z].*$', word))
# Check that word list is unique
self.assertTrue(word not in checked_words)
checked_words.append(word)
def test_random_pystr_characters(self):
from faker.providers.python import Provider
provider = Provider(self.generator)
characters = provider.pystr()
self.assertEqual(len(characters), 20)
characters = provider.pystr(max_chars=255)
self.assertEqual(len(characters), 255)
characters = provider.pystr(max_chars=0)
self.assertEqual(characters, '')
characters = provider.pystr(max_chars=-10)
self.assertEqual(characters, '')
characters = provider.pystr(min_chars=10, max_chars=255)
self.assertTrue((len(characters) >= 10))
def test_random_pyfloat(self):
from faker.providers.python import Provider
provider = Provider(self.generator)
self.assertTrue(0 <= abs(provider.pyfloat(left_digits=1)) < 10)
self.assertTrue(0 <= abs(provider.pyfloat(left_digits=0)) < 1)
x = abs(provider.pyfloat(right_digits=0))
self.assertTrue(x - int(x) == 0)
with self.assertRaises(ValueError,
msg='A float number cannot have 0 digits '
'in total'):
provider.pyfloat(left_digits=0, right_digits=0)
def test_us_ssn_valid(self):
from faker.providers.ssn.en_US import Provider
provider = Provider(self.generator)
for i in range(1000):
ssn = provider.ssn()
self.assertEqual(len(ssn), 11)
self.assertNotEqual(ssn[0], '9')
self.assertNotEqual(ssn[0:3], '666')
self.assertNotEqual(ssn[0:3], '000')
self.assertNotEqual(ssn[4:6], '00')
self.assertNotEqual(ssn[7:11], '0000')
def test_nl_BE_ssn_valid(self):
provider = Faker('nl_BE').provider('faker.providers.ssn')
for i in range(1000):
ssn = provider.ssn()
self.assertEqual(len(ssn), 11)
gen_seq = ssn[6:9]
gen_chksum = ssn[9:11]
gen_seq_as_int = int(gen_seq)
gen_chksum_as_int = int(gen_chksum)
# Check that the sequence nr is between 1 inclusive and 998 inclusive
self.assertGreater(gen_seq_as_int, 0)
self.assertLessEqual(gen_seq_as_int, 998)
# validate checksum calculation
# Since the century is not part of ssn, try both below and above year 2000
ssn_below = int(ssn[0:9])
chksum_below = 97 - (ssn_below % 97)
ssn_above = ssn_below + 2000000000
chksum_above = 97 - (ssn_above % 97)
results = [chksum_above, chksum_below]
self.assertIn(gen_chksum_as_int, results)
def test_email(self):
factory = Faker()
for _ in range(99):
email = factory.email()
self.assertTrue('@' in email)
def test_ipv4(self):
from faker.providers.internet import Provider
provider = Provider(self.generator)
for _ in range(99):
address = provider.ipv4()
self.assertTrue(len(address) >= 7)
self.assertTrue(len(address) <= 15)
self.assertTrue(
re.compile(r'^(\d{1,3}\.){3}\d{1,3}$').search(address))
for _ in range(99):
address = provider.ipv4(network=True)
self.assertTrue(len(address) >= 9)
self.assertTrue(len(address) <= 18)
self.assertTrue(
re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$').search(address))
address = provider.ipv4(private=True)
self.assertTrue(len(address) >= 7)
self.assertTrue(len(address) <= 15)
self.assertTrue(
re.compile(r'^(\d{1,3}\.){3}\d{1,3}$').search(address))
address = provider.ipv4(private=False)
self.assertTrue(len(address) >= 7)
self.assertTrue(len(address) <= 15)
self.assertTrue(
re.compile(r'^(\d{1,3}\.){3}\d{1,3}$').search(address))
def test_ipv4_network_class(self):
from faker.providers.internet import Provider
provider = Provider(self.generator)
for _ in range(99):
klass = provider.ipv4_network_class()
self.assertIn(klass, 'abc')
def test_ipv4_private(self):
from faker.providers.internet import Provider
provider = Provider(self.generator)
for _ in range(99):
address = provider.ipv4_private()
address = six.text_type(address)
self.assertTrue(len(address) >= 7)
self.assertTrue(len(address) <= 15)
self.assertTrue(ip_address(address).is_private)
self.assertTrue(
re.compile(r'^(\d{1,3}\.){3}\d{1,3}$').search(address))
for _ in range(99):
address = provider.ipv4_private(network=True)
address = six.text_type(address)
self.assertTrue(len(address) >= 9)
self.assertTrue(len(address) <= 18)
self.assertTrue(ip_network(address)[0].is_private)
self.assertTrue(
re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$').search(address))
def test_ipv4_private_class_a(self):
from faker.providers.internet import Provider, _IPv4Constants
provider = Provider(self.generator)
class_network = _IPv4Constants._network_classes['a']
class_min = class_network.network_address
class_max = class_network.broadcast_address
for _ in range(99):
address = provider.ipv4_private(address_class='a')
address = six.text_type(address)
self.assertTrue(len(address) >= 7)
self.assertTrue(len(address) <= 15)
self.assertTrue(ip_address(address).is_private)
self.assertTrue(ip_address(address) >= class_min)
self.assertTrue(ip_address(address) <= class_max)
def test_ipv4_private_class_b(self):
from faker.providers.internet import Provider, _IPv4Constants
provider = Provider(self.generator)
class_network = _IPv4Constants._network_classes['b']
class_min = class_network.network_address
class_max = class_network.broadcast_address
for _ in range(99):
address = provider.ipv4_private(address_class='b')
address = six.text_type(address)
self.assertTrue(len(address) >= 7)
self.assertTrue(len(address) <= 15)
self.assertTrue(ip_address(address).is_private)
self.assertTrue(ip_address(address) >= class_min)
self.assertTrue(ip_address(address) <= class_max)
def test_ipv4_private_class_c(self):
from faker.providers.internet import Provider, _IPv4Constants
provider = Provider(self.generator)
class_network = _IPv4Constants._network_classes['c']
class_min = class_network.network_address
class_max = class_network.broadcast_address
for _ in range(99):
address = provider.ipv4_private(address_class='c')
address = six.text_type(address)
self.assertTrue(len(address) >= 7)
self.assertTrue(len(address) <= 15)
self.assertTrue(ip_address(address).is_private)
self.assertTrue(ip_address(address) >= class_min)
self.assertTrue(ip_address(address) <= class_max)
def test_ipv4_public(self):
from faker.providers.internet import Provider
provider = Provider(self.generator)
for _ in range(99):
address = provider.ipv4_public()
address = six.text_type(address)
self.assertTrue(len(address) >= 7)
self.assertTrue(len(address) <= 15)
self.assertFalse(ip_address(address).is_private, address)
self.assertTrue(
re.compile(r'^(\d{1,3}\.){3}\d{1,3}$').search(address))
for _ in range(99):
address = provider.ipv4_public(network=True)
address = six.text_type(address)
self.assertTrue(len(address) >= 9)
self.assertTrue(len(address) <= 18)
# Hack around ipaddress module
# As 192.0.0.0 is net addr of many 192.0.0.0/* nets
# ipaddress considers them as private
if ip_network(address).network_address != ip_address('192.0.0.0'):
self.assertFalse(ip_network(address)[0].is_private, address)
self.assertTrue(
re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$').search(address))
def test_ipv4_public_class_a(self):
from faker.providers.internet import Provider
provider = Provider(self.generator)
for _ in range(99):
address = provider.ipv4_public(address_class='a')
address = six.text_type(address)
self.assertTrue(len(address) >= 7)
self.assertTrue(len(address) <= 15)
self.assertFalse(ip_address(address).is_private, address)
def test_ipv4_public_class_b(self):
from faker.providers.internet import Provider
provider = Provider(self.generator)
for _ in range(99):
address = provider.ipv4_public(address_class='b')
address = six.text_type(address)
self.assertTrue(len(address) >= 7)
self.assertTrue(len(address) <= 15)
self.assertFalse(ip_address(address).is_private, address)
def test_ipv4_public_class_c(self):
from faker.providers.internet import Provider
provider = Provider(self.generator)
for _ in range(99):
address = provider.ipv4_public(address_class='c')
address = six.text_type(address)
self.assertTrue(len(address) >= 7)
self.assertTrue(len(address) <= 15)
self.assertFalse(ip_address(address).is_private, address)
def test_ipv6(self):
from faker.providers.internet import Provider
provider = Provider(self.generator)
for _ in range(99):
address = provider.ipv6()
self.assertTrue(len(address) >= 3) # ::1
self.assertTrue(len(address) <= 39)
self.assertTrue(
re.compile(r'^([0-9a-f]{0,4}:){2,7}[0-9a-f]{1,4}$').search(address))
for _ in range(99):
address = provider.ipv6(network=True)
self.assertTrue(len(address) >= 4) # ::/8
self.assertTrue(len(address) <= 39 + 4)
self.assertTrue(
re.compile(r'^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}/\d{1,3}$').search(
address))
def test_random_sample_unique(self):
from faker.providers import BaseProvider
provider = BaseProvider(self.generator)
# Too many items requested
with self.assertRaises(ValueError):
provider.random_sample('abcde', 6)
# Same length
sample = provider.random_sample('abcd', 4)
self.assertEqual(sorted(sample), list('abcd'))
sample = provider.random_sample('abcde', 5)
self.assertEqual(sorted(sample), list('abcde'))
# Length = 3
sample = provider.random_sample('abcde', 3)
self.assertEqual(len(sample), 3)
self.assertTrue(set(sample).issubset(set('abcde')))
# Length = 1
sample = provider.random_sample('abcde', 1)
self.assertEqual(len(sample), 1)
self.assertTrue(set(sample).issubset(set('abcde')))
# Length = 0
sample = provider.random_sample('abcde', 0)
self.assertEqual(sample, [])
def test_random_number(self):
from faker.providers import BaseProvider
provider = BaseProvider(self.generator)
number = provider.random_number(10, True)
self.assertEqual(len(str(number)), 10)
def test_instance_seed_chain(self):
factory = Faker()
names = ['Real Name0', 'Real Name1', 'Real Name2', 'Real Name0', 'Real Name2']
anonymized = [factory.seed_instance(name).name() for name in names]
self.assertEqual(anonymized[0], anonymized[3])
self.assertEqual(anonymized[2], anonymized[4])
if __name__ == '__main__':
unittest.main() # pragma: no cover
| 35.514563 | 97 | 0.599742 |
be396374442be330e4430c5c700ed4d24e175ed6 | 619 | py | Python | metf_python_client/examples/03-BlynkUnitTest.py | dontsovcmc/metf-python-client | 7d9100bfdb0dc14857fb8cd1f49b50439cba18f0 | [
"MIT"
] | null | null | null | metf_python_client/examples/03-BlynkUnitTest.py | dontsovcmc/metf-python-client | 7d9100bfdb0dc14857fb8cd1f49b50439cba18f0 | [
"MIT"
] | null | null | null | metf_python_client/examples/03-BlynkUnitTest.py | dontsovcmc/metf-python-client | 7d9100bfdb0dc14857fb8cd1f49b50439cba18f0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from metf_python_client import METFClient, HIGH, LOW, OUTPUT
from metf_python_client.boards.nodemcu import LED_BUILTIN_AUX
ESP_HOST = '192.168.3.49'
class TestMethods(unittest.TestCase):
def test_blynk(self):
# Blynk build id NodeMCU LED
# https://lowvoltage.github.io/2017/07/09/Onboard-LEDs-NodeMCU-Got-Two
api = METFClient(ESP_HOST)
pin = LED_BUILTIN_AUX
api.pinMode(pin, OUTPUT)
api.digitalWrite(pin, LOW)
api.delay(1000)
api.digitalWrite(pin, HIGH)
if __name__ == '__main__':
unittest.main()
| 22.107143 | 78 | 0.673667 |
8e041c0b8bff9af136b7b62833dca827e752528d | 576 | py | Python | fairmotion/utils/constants.py | CristianNajeraL/fairmotion | 60955cf208af5a1b5bbc46152c29dfa8f60fd9d2 | [
"BSD-3-Clause"
] | 419 | 2020-08-28T20:06:33.000Z | 2022-03-26T19:33:54.000Z | fairmotion/utils/constants.py | CristianNajeraL/fairmotion | 60955cf208af5a1b5bbc46152c29dfa8f60fd9d2 | [
"BSD-3-Clause"
] | 18 | 2020-09-08T21:13:19.000Z | 2022-03-08T17:33:30.000Z | fairmotion/utils/constants.py | CristianNajeraL/fairmotion | 60955cf208af5a1b5bbc46152c29dfa8f60fd9d2 | [
"BSD-3-Clause"
] | 65 | 2020-08-31T18:31:49.000Z | 2022-03-19T06:26:13.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
EPSILON = np.finfo(float).eps
EYE_R = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], float)
EYE_T = np.array(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
float,
)
ZERO_P = np.array([0.0, 0.0, 0.0], float)
ZERO_R = np.zeros((3, 3))
def eye_T():
return EYE_T.copy()
def eye_R():
return EYE_R.copy()
def zero_p():
return ZERO_P.copy()
def zero_R():
return ZERO_R.copy()
| 14.769231 | 76 | 0.508681 |
f6ebee3151d7ebc21ad0f6173e8ad2b71bfe2910 | 79 | py | Python | tests/perfs/test_ozone_perf_measure_L2.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/perfs/test_ozone_perf_measure_L2.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/perfs/test_ozone_perf_measure_L2.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z |
import tests.perfs.test_ozone_perf_measure as tperf
tperf.build_model("L2");
| 15.8 | 51 | 0.810127 |
002986f93736843311f40e8e7caefcbc2a67468d | 1,518 | py | Python | pysimplegui/DemoPrograms/Demo_OpenCV_Webcam.py | konsan1101/py-etc | bcca13119b0d2453866988404fd1c4976f55d4d5 | [
"MIT"
] | null | null | null | pysimplegui/DemoPrograms/Demo_OpenCV_Webcam.py | konsan1101/py-etc | bcca13119b0d2453866988404fd1c4976f55d4d5 | [
"MIT"
] | 2 | 2020-06-06T00:30:56.000Z | 2021-06-10T22:30:37.000Z | pysimplegui/DemoPrograms/Demo_OpenCV_Webcam.py | konsan1101/py-etc | bcca13119b0d2453866988404fd1c4976f55d4d5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import PySimpleGUI as sg
import cv2
import numpy as np
"""
Demo program that displays a webcam using OpenCV
"""
def main():
sg.theme('Black')
# define the window layout
layout = [[sg.Text('OpenCV Demo', size=(40, 1), justification='center', font='Helvetica 20')],
[sg.Image(filename='', key='image')],
[sg.Button('Record', size=(10, 1), font='Helvetica 14'),
sg.Button('Stop', size=(10, 1), font='Any 14'),
sg.Button('Exit', size=(10, 1), font='Helvetica 14'), ]]
# create the window and show it without the plot
window = sg.Window('Demo Application - OpenCV Integration',
layout, location=(800, 400))
# ---===--- Event LOOP Read and display frames, operate the GUI --- #
cap = cv2.VideoCapture(0)
recording = False
while True:
event, values = window.read(timeout=20)
if event == 'Exit' or event is None:
return
elif event == 'Record':
recording = True
elif event == 'Stop':
recording = False
img = np.full((480, 640), 255)
# this is faster, shorter and needs less includes
imgbytes = cv2.imencode('.png', img)[1].tobytes()
window['image'].update(data=imgbytes)
if recording:
ret, frame = cap.read()
imgbytes = cv2.imencode('.png', frame)[1].tobytes() # ditto
window['image'].update(data=imgbytes)
main()
| 29.192308 | 98 | 0.557312 |
bdbda4a618a20b678d6f6e622802e9e786dc5033 | 480 | py | Python | clinic/migrations/0004_auto_20181105_1918.py | Pranavraut033/Patient-Management | 4c48087d8a2aa095e63200af5232f738513333ce | [
"Apache-2.0"
] | null | null | null | clinic/migrations/0004_auto_20181105_1918.py | Pranavraut033/Patient-Management | 4c48087d8a2aa095e63200af5232f738513333ce | [
"Apache-2.0"
] | null | null | null | clinic/migrations/0004_auto_20181105_1918.py | Pranavraut033/Patient-Management | 4c48087d8a2aa095e63200af5232f738513333ce | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.0.4 on 2018-11-05 13:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clinic', '0003_auto_20181105_1916'),
]
operations = [
migrations.AlterField(
model_name='person',
name='profile',
field=models.ImageField(default='clinic/static/clinic/user_profile/no-profile.png', upload_to='clinic/static/clinic/user_profile/'),
),
]
| 25.263158 | 144 | 0.641667 |
66a3479b2cc92353a1d7c43a0cc83b65f09659c9 | 3,626 | py | Python | src/sentry/utils/data_filters.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | src/sentry/utils/data_filters.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/utils/data_filters.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import fnmatch
import ipaddress
import six
from django.utils.encoding import force_text
from sentry import tsdb
from sentry.utils.safe import get_path
class FilterStatKeys(object):
"""
NOTE: This enum also exists in semaphore, check if alignment is needed when
editing this.
"""
IP_ADDRESS = "ip-address"
RELEASE_VERSION = "release-version"
ERROR_MESSAGE = "error-message"
BROWSER_EXTENSION = "browser-extensions"
LEGACY_BROWSER = "legacy-browsers"
LOCALHOST = "localhost"
WEB_CRAWLER = "web-crawlers"
INVALID_CSP = "invalid-csp"
CORS = "cors"
DISCARDED_HASH = "discarded-hash"
FILTER_STAT_KEYS_TO_VALUES = {
FilterStatKeys.IP_ADDRESS: tsdb.models.project_total_received_ip_address,
FilterStatKeys.RELEASE_VERSION: tsdb.models.project_total_received_release_version,
FilterStatKeys.ERROR_MESSAGE: tsdb.models.project_total_received_error_message,
FilterStatKeys.BROWSER_EXTENSION: tsdb.models.project_total_received_browser_extensions,
FilterStatKeys.LEGACY_BROWSER: tsdb.models.project_total_received_legacy_browsers,
FilterStatKeys.LOCALHOST: tsdb.models.project_total_received_localhost,
FilterStatKeys.WEB_CRAWLER: tsdb.models.project_total_received_web_crawlers,
FilterStatKeys.INVALID_CSP: tsdb.models.project_total_received_invalid_csp,
FilterStatKeys.CORS: tsdb.models.project_total_received_cors,
FilterStatKeys.DISCARDED_HASH: tsdb.models.project_total_received_discarded,
}
class FilterTypes(object):
ERROR_MESSAGES = "error_messages"
RELEASES = "releases"
def is_valid_ip(project_config, ip_address):
"""
Verify that an IP address is not being blacklisted
for the given project.
"""
blacklist = get_path(project_config.config, "filter_settings", "client_ips", "blacklisted_ips")
if not blacklist:
return True
for addr in blacklist:
# We want to error fast if it's an exact match
if ip_address == addr:
return False
# Check to make sure it's actually a range before
try:
if "/" in addr and (
ipaddress.ip_address(six.text_type(ip_address))
in ipaddress.ip_network(six.text_type(addr), strict=False)
):
return False
except ValueError:
# Ignore invalid values here
pass
return True
def is_valid_release(project_config, release):
"""
Verify that a release is not being filtered
for the given project.
"""
invalid_versions = get_path(
project_config.config, "filter_settings", FilterTypes.RELEASES, "releases"
)
if not invalid_versions:
return True
release = force_text(release).lower()
for version in invalid_versions:
if fnmatch.fnmatch(release, version.lower()):
return False
return True
def is_valid_error_message(project_config, message):
"""
Verify that an error message is not being filtered
for the given project.
"""
filtered_errors = get_path(
project_config.config, "filter_settings", FilterTypes.ERROR_MESSAGES, "patterns"
)
if not filtered_errors:
return True
message = force_text(message).lower()
for error in filtered_errors:
try:
if fnmatch.fnmatch(message, error.lower()):
return False
except Exception:
# fnmatch raises a string when the pattern is bad.
# Patterns come from end users and can be full of mistakes.
pass
return True
| 29.479675 | 99 | 0.699669 |
294ae008f23633193b477e660601e11a1c5cbae5 | 862 | py | Python | textrenderer/corpus/corpus_utils.py | Sand0001/OCR_textrender_jap_chn_eng | 87a01946bb8cd5229d2babcdf42a18df5b3e561f | [
"MIT"
] | null | null | null | textrenderer/corpus/corpus_utils.py | Sand0001/OCR_textrender_jap_chn_eng | 87a01946bb8cd5229d2babcdf42a18df5b3e561f | [
"MIT"
] | null | null | null | textrenderer/corpus/corpus_utils.py | Sand0001/OCR_textrender_jap_chn_eng | 87a01946bb8cd5229d2babcdf42a18df5b3e561f | [
"MIT"
] | null | null | null | from textrenderer.corpus.chn_corpus import ChnCorpus
from textrenderer.corpus.jap_corpus import JAPCorpus
from textrenderer.corpus.eng_corpus import EngCorpus
from textrenderer.corpus.list_corpus import ListCorpus
from textrenderer.corpus.random_corpus import RandomCorpus
def corpus_factory(corpus_mode: str, chars_file: str, corpus_dir: str, length: int):
corpus_classes = {
"random": RandomCorpus,
"chn": ChnCorpus,
"eng": EngCorpus,
"jap" : JAPCorpus,
"list": ListCorpus
}
if corpus_mode not in corpus_classes.keys():
print("Corpus mode [%s] not implemented yet" % corpus_mode)
exit(-1)
corpus_class = corpus_classes[corpus_mode]
if length == 10 and corpus_mode == 'eng':
length = 3
return corpus_class(chars_file=chars_file, corpus_dir=corpus_dir, length=length)
| 31.925926 | 84 | 0.715777 |
92ed5327185be6fc53251062d468af44037b8684 | 4,451 | py | Python | cnc-gcode/cnc_gui.py | shadelx/cnc-gcode | 01be8dfa83829500672a4862dffa8f30ab1f9f71 | [
"MIT"
] | 2 | 2021-03-05T18:15:41.000Z | 2022-02-16T17:46:40.000Z | cnc-gcode/cnc_gui.py | shadelx/cnc-gcode | 01be8dfa83829500672a4862dffa8f30ab1f9f71 | [
"MIT"
] | null | null | null | cnc-gcode/cnc_gui.py | shadelx/cnc-gcode | 01be8dfa83829500672a4862dffa8f30ab1f9f71 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'cnc-gui.ui'
#
# Created by: PyQt5 UI code generator 5.15.3
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(720, 480)
MainWindow.setStyleSheet("background-color: rgb(46, 52, 54);")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.fileLabel = QtWidgets.QLabel(self.centralwidget)
self.fileLabel.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"font: 75 16pt \"Ubuntu Condensed\";\n"
"color: rgb(46, 52, 54);")
self.fileLabel.setAlignment(QtCore.Qt.AlignCenter)
self.fileLabel.setObjectName("fileLabel")
self.horizontalLayout.addWidget(self.fileLabel)
spacerItem = QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.selectBtn = QtWidgets.QPushButton(self.centralwidget)
self.selectBtn.setStyleSheet("color: rgb(255, 255, 255);\n"
"font: 75 16pt \"Ubuntu Condensed\";")
self.selectBtn.setObjectName("selectBtn")
self.horizontalLayout.addWidget(self.selectBtn)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout_3.addLayout(self.verticalLayout)
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"font: 13pt \"Ubuntu\";\n"
"")
self.textEdit.setReadOnly(True)
self.textEdit.setObjectName("textEdit")
self.verticalLayout_3.addWidget(self.textEdit)
self.clearBtn = QtWidgets.QPushButton(self.centralwidget)
self.clearBtn.setStyleSheet("color: rgb(255, 255, 255);\n"
"font: 75 16pt \"Ubuntu Condensed\";")
self.clearBtn.setObjectName("clearBtn")
self.verticalLayout_3.addWidget(self.clearBtn)
self.printBtn = QtWidgets.QPushButton(self.centralwidget)
self.printBtn.setStyleSheet("color: rgb(255, 255, 255);\n"
"font: 75 16pt \"Ubuntu Condensed\";")
self.printBtn.setObjectName("printBtn")
self.verticalLayout_3.addWidget(self.printBtn)
self.verticalLayout_4.addLayout(self.verticalLayout_3)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "cnc pint"))
self.fileLabel.setText(_translate("MainWindow", "this is a file"))
self.selectBtn.setText(_translate("MainWindow", "select"))
self.textEdit.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:9pt;\">[+ ]Welcome to this program</span></p></body></html>"))
self.clearBtn.setText(_translate("MainWindow", "clear"))
self.printBtn.setText(_translate("MainWindow", "print"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 49.455556 | 209 | 0.702539 |
733ac77798cf95eafa95b5967c2c9355be5a16d3 | 360 | py | Python | g-code-testing/g_code_parsing/g_code_functionality_defs/smoothie/reset_from_error_g_code_functionality_def.py | Opentrons/protocol_framework | ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f | [
"Apache-2.0"
] | null | null | null | g-code-testing/g_code_parsing/g_code_functionality_defs/smoothie/reset_from_error_g_code_functionality_def.py | Opentrons/protocol_framework | ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f | [
"Apache-2.0"
] | null | null | null | g-code-testing/g_code_parsing/g_code_functionality_defs/smoothie/reset_from_error_g_code_functionality_def.py | Opentrons/protocol_framework | ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f | [
"Apache-2.0"
] | null | null | null | from typing import Dict
from g_code_parsing.g_code_functionality_defs.g_code_functionality_def_base import (
GCodeFunctionalityDefBase,
)
class ResetFromErrorGCodeFunctionalityDef(GCodeFunctionalityDefBase):
@classmethod
def _generate_command_explanation(cls, g_code_args: Dict[str, str]) -> str:
return "Resetting OT-2 from error state"
| 32.727273 | 84 | 0.805556 |
7cbc2deae7088efd9f71225319430c3ad0283bb1 | 106 | py | Python | exercicios-turtle/.history/clown_20210623225950.py | Aleff13/poo-ufsc | bc1574df26f840a3c0fd5b1e0c72e5d69f61493d | [
"MIT"
] | 1 | 2021-11-28T18:49:21.000Z | 2021-11-28T18:49:21.000Z | exercicios-turtle/.history/clown_20210623225950.py | Aleff13/poo-ufsc | bc1574df26f840a3c0fd5b1e0c72e5d69f61493d | [
"MIT"
] | null | null | null | exercicios-turtle/.history/clown_20210623225950.py | Aleff13/poo-ufsc | bc1574df26f840a3c0fd5b1e0c72e5d69f61493d | [
"MIT"
] | null | null | null | import turtle
tortuguita= turtle.Turtle()
tortuguita.dot(30,"black")
tortuguita.forward()
turtle.done() | 13.25 | 27 | 0.764151 |
60ef6442c131f07aad73ffbc33ec6ca20286d0c3 | 815 | py | Python | users/migrations/0001_initial.py | tomito26/awwards | e91c611c90be5f4bc2b0ef9a46cf1b2517ed17c0 | [
"MIT"
] | 1 | 2020-10-27T15:04:34.000Z | 2020-10-27T15:04:34.000Z | users/migrations/0001_initial.py | tomito26/awwards | e91c611c90be5f4bc2b0ef9a46cf1b2517ed17c0 | [
"MIT"
] | null | null | null | users/migrations/0001_initial.py | tomito26/awwards | e91c611c90be5f4bc2b0ef9a46cf1b2517ed17c0 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-24 06:22
import cloudinary.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', cloudinary.models.CloudinaryField(max_length=255, verbose_name='profile-photo')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 30.185185 | 121 | 0.660123 |
596f4b27d5f7747dc8211477e4a83579e01a6aeb | 381 | py | Python | apps/accounts/migrations/0002_account_attendant.py | SnifferNandez/bCTF | 76db910d8cd8465e1375a3a02014a4f66d7a4087 | [
"MIT"
] | null | null | null | apps/accounts/migrations/0002_account_attendant.py | SnifferNandez/bCTF | 76db910d8cd8465e1375a3a02014a4f66d7a4087 | [
"MIT"
] | null | null | null | apps/accounts/migrations/0002_account_attendant.py | SnifferNandez/bCTF | 76db910d8cd8465e1375a3a02014a4f66d7a4087 | [
"MIT"
] | 1 | 2019-10-25T20:00:41.000Z | 2019-10-25T20:00:41.000Z | # Generated by Django 2.1.2 on 2018-10-15 22:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='account',
name='attendant',
field=models.BooleanField(default=False),
),
]
| 20.052632 | 53 | 0.593176 |
f69e41ec4f68b328cc09c0a81f4dbc74d0ceca00 | 3,037 | py | Python | blog/templatetags/tag_cloud.py | jacobian/jacobian.org | cacff78aa68cb1585a63c04ff5bd7fc484066730 | [
"Apache-2.0"
] | 8 | 2018-11-12T21:11:18.000Z | 2020-10-20T09:03:54.000Z | blog/templatetags/tag_cloud.py | jacobian/jacobian.org | cacff78aa68cb1585a63c04ff5bd7fc484066730 | [
"Apache-2.0"
] | 5 | 2018-11-28T12:56:57.000Z | 2020-02-05T21:56:48.000Z | blog/templatetags/tag_cloud.py | jacobian/jacobian.org | cacff78aa68cb1585a63c04ff5bd7fc484066730 | [
"Apache-2.0"
] | 5 | 2018-11-19T16:47:15.000Z | 2020-02-14T22:34:26.000Z | from django import template
from django.utils.safestring import mark_safe
register = template.Library()
from blog.models import Tag
# Classes for different levels
CLASSES = (
"--skip--", # We don't show the least popular tags
"not-popular-at-all",
"not-very-popular",
"somewhat-popular",
"somewhat-more-popular",
"popular",
"more-than-just-popular",
"very-popular",
"ultra-popular",
)
def make_css_rules(
min_size=0.7, max_size=2.0, units="em", selector_prefix=".tag-cloud ."
):
num_classes = len(CLASSES)
diff_each_time = (max_size - min_size) / (num_classes - 1)
for i, klass in enumerate(CLASSES):
print(
"%s%s { font-size: %.2f%s; }"
% (selector_prefix, klass, min_size + (i * diff_each_time), units)
)
import math
def log(f):
try:
return math.log(f)
except OverflowError:
return 0
@register.inclusion_tag("includes/tag_cloud.html")
def tag_cloud_for_tags(tags):
"""
Renders a tag cloud of tags. Input should be a non-de-duped list of tag
strings.
"""
return _tag_cloud_helper(tags)
def _tag_cloud_helper(tags):
# Count them all up
tag_counts = {}
for tag in tags:
try:
tag_counts[tag] += 1
except KeyError:
tag_counts[tag] = 1
min_count = min(tag_counts.values())
max_count = max(tag_counts.values())
tags = list(tag_counts.keys())
tags.sort()
html_tags = []
intervals = 10.0
log_max = log(max_count)
log_min = log(min_count)
diff = log_max - log_min
if diff < 0.01:
# Avoid divide-by-zero problems
diff = 0.01
for tag in tags:
score = tag_counts[tag]
index = int((len(CLASSES) - 1) * (log(score) - log_min) / diff)
if CLASSES[index] == "--skip--":
continue
html_tags.append(
mark_safe(
'<a href="/tags/%s/" title="%d item%s" class="%s">%s</a>'
% (tag, score, (score != 1 and "s" or ""), CLASSES[index], tag)
)
)
return {"tags": html_tags}
@register.inclusion_tag("includes/tag_cloud.html")
def tag_cloud():
# We do this with raw SQL for efficiency
from django.db import connection
# Get tags for entries, blogmarks, quotations
cursor = connection.cursor()
cursor.execute(
"select tag from blog_entry_tags, blog_tag where blog_entry_tags.tag_id = blog_tag.id"
)
entry_tags = [row[0] for row in cursor.fetchall()]
cursor.execute(
"select tag from blog_blogmark_tags, blog_tag where blog_blogmark_tags.tag_id = blog_tag.id"
)
blogmark_tags = [row[0] for row in cursor.fetchall()]
cursor.execute(
"select tag from blog_quotation_tags, blog_tag where blog_quotation_tags.tag_id = blog_tag.id"
)
quotation_tags = [row[0] for row in cursor.fetchall()]
cursor.close()
# Add them together
tags = entry_tags + blogmark_tags + quotation_tags
return _tag_cloud_helper(tags)
| 27.609091 | 102 | 0.620678 |
17208a6bae9bd6d371e77af7605be3b806223b75 | 2,157 | py | Python | src/run/runcore.py | ufo2011/NXP-MCUBootUtility | 5d1840b1beee997e5453484309f5466ce166bfbe | [
"Apache-2.0"
] | 174 | 2018-12-30T16:20:36.000Z | 2022-03-24T03:02:02.000Z | src/run/runcore.py | ufo2011/NXP-MCUBootUtility | 5d1840b1beee997e5453484309f5466ce166bfbe | [
"Apache-2.0"
] | 138 | 2019-01-02T16:18:44.000Z | 2022-03-30T15:57:24.000Z | src/run/runcore.py | ufo2011/NXP-MCUBootUtility | 5d1840b1beee997e5453484309f5466ce166bfbe | [
"Apache-2.0"
] | 86 | 2018-12-27T13:16:58.000Z | 2022-02-03T11:42:35.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import rundef
import boot
sys.path.append(os.path.abspath(".."))
from gen import gencore
from ui import uidef
from ui import uivar
from ui import uilang
##
# @brief
class secBootRun(gencore.secBootGen):
def __init__(self, parent):
gencore.secBootGen.__init__(self, parent)
self.blhost = None
self.tgt = None
self.cpuDir = None
self.blhostVectorsDir = None
self.bootDeviceMemId = None
self.bootDeviceMemBase = None
self.comMemWriteUnit = 0x1
self.comMemEraseUnit = 0x1
self.comMemReadUnit = 0x1
def showAsOptimalMemoryUnit( self, memSizeBytes ):
strMemSize = ''
if memSizeBytes >= 0x40000000:
strMemSize = str(memSizeBytes * 1.0 / 0x40000000) + ' GB'
elif memSizeBytes >= 0x100000:
strMemSize = str(memSizeBytes * 1.0 / 0x100000) + ' MB'
elif memSizeBytes >= 0x400:
strMemSize = str(memSizeBytes * 1.0 / 0x400) + ' KB'
else:
strMemSize = str(memSizeBytes) + ' Bytes'
return strMemSize
def _formatBootloaderVersion( self, version):
identifier0 = chr((version & 0xff000000) >> 24)
identifier1 = str((version & 0xff0000) >> 16)
identifier2 = str((version & 0xff00) >> 8)
identifier3 = str(version & 0xff)
return identifier0 + identifier1 + '.' + identifier2 + '.' + identifier3
def getMcuDeviceBootloaderVersion( self ):
status, results, cmdStr = self.blhost.getProperty(boot.properties.kPropertyTag_CurrentVersion)
self.printLog(cmdStr)
if status == boot.status.kStatus_Success:
self.printDeviceStatus('Current Version = ' + self._formatBootloaderVersion(results[0]))
else:
pass
status, results, cmdStr = self.blhost.getProperty(boot.properties.kPropertyTag_TargetVersion)
self.printLog(cmdStr)
if status == boot.status.kStatus_Success:
self.printDeviceStatus('Target Version = ' + self._formatBootloaderVersion(results[0]))
else:
pass
| 33.184615 | 102 | 0.636532 |
77e931670ac1087980d246bf4b7457dc25216a4e | 211,717 | py | Python | test/unit/proxy/controllers/test_obj.py | Priyanka-Askani/swift | 1ab691f63778008015b34ce004992844acee9968 | [
"Apache-2.0"
] | 1 | 2019-05-25T10:55:58.000Z | 2019-05-25T10:55:58.000Z | test/unit/proxy/controllers/test_obj.py | Priyanka-Askani/swift | 1ab691f63778008015b34ce004992844acee9968 | [
"Apache-2.0"
] | 2 | 2015-01-20T10:38:14.000Z | 2015-01-20T10:39:40.000Z | test/unit/proxy/controllers/test_obj.py | Priyanka-Askani/swift | 1ab691f63778008015b34ce004992844acee9968 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import email.parser
import itertools
import math
import random
import time
import unittest
from collections import defaultdict
from contextlib import contextmanager
import json
from hashlib import md5
import mock
from eventlet import Timeout
from six import BytesIO
from six.moves import range
import swift
from swift.common import utils, swob, exceptions
from swift.common.exceptions import ChunkWriteTimeout
from swift.common.utils import Timestamp, list_from_csv
from swift.proxy import server as proxy_server
from swift.proxy.controllers import obj
from swift.proxy.controllers.base import \
get_container_info as _real_get_container_info
from swift.common.storage_policy import POLICIES, ECDriverError, \
StoragePolicy, ECStoragePolicy
from test.unit import FakeRing, FakeMemcache, fake_http_connect, \
debug_logger, patch_policies, SlowBody, FakeStatus, \
DEFAULT_TEST_EC_TYPE, encode_frag_archive_bodies, make_ec_object_stub, \
fake_ec_node_response, StubResponse, mocked_http_conn
from test.unit.proxy.test_server import node_error_count
def unchunk_body(chunked_body):
body = ''
remaining = chunked_body
while remaining:
hex_length, remaining = remaining.split('\r\n', 1)
length = int(hex_length, 16)
body += remaining[:length]
remaining = remaining[length + 2:]
return body
@contextmanager
def set_http_connect(*args, **kwargs):
old_connect = swift.proxy.controllers.base.http_connect
new_connect = fake_http_connect(*args, **kwargs)
try:
swift.proxy.controllers.base.http_connect = new_connect
swift.proxy.controllers.obj.http_connect = new_connect
swift.proxy.controllers.account.http_connect = new_connect
swift.proxy.controllers.container.http_connect = new_connect
yield new_connect
left_over_status = list(new_connect.code_iter)
if left_over_status:
raise AssertionError('left over status %r' % left_over_status)
finally:
swift.proxy.controllers.base.http_connect = old_connect
swift.proxy.controllers.obj.http_connect = old_connect
swift.proxy.controllers.account.http_connect = old_connect
swift.proxy.controllers.container.http_connect = old_connect
class PatchedObjControllerApp(proxy_server.Application):
"""
This patch is just a hook over the proxy server's __call__ to ensure
that calls to get_container_info will return the stubbed value for
container_info if it's a container info call.
"""
container_info = {}
per_container_info = {}
def __call__(self, *args, **kwargs):
def _fake_get_container_info(env, app, swift_source=None):
_vrs, account, container, _junk = utils.split_path(
env['PATH_INFO'], 3, 4)
# Seed the cache with our container info so that the real
# get_container_info finds it.
ic = env.setdefault('swift.infocache', {})
cache_key = "container/%s/%s" % (account, container)
old_value = ic.get(cache_key)
# Copy the container info so we don't hand out a reference to a
# mutable thing that's set up only once at compile time. Nothing
# *should* mutate it, but it's better to be paranoid than wrong.
if container in self.per_container_info:
ic[cache_key] = self.per_container_info[container].copy()
else:
ic[cache_key] = self.container_info.copy()
real_info = _real_get_container_info(env, app, swift_source)
if old_value is None:
del ic[cache_key]
else:
ic[cache_key] = old_value
return real_info
with mock.patch('swift.proxy.server.get_container_info',
new=_fake_get_container_info), \
mock.patch('swift.proxy.controllers.base.get_container_info',
new=_fake_get_container_info):
return super(
PatchedObjControllerApp, self).__call__(*args, **kwargs)
def make_footers_callback(body=None):
# helper method to create a footers callback that will generate some fake
# footer metadata
cont_etag = 'container update etag may differ'
crypto_etag = '20242af0cd21dd7195a10483eb7472c9'
etag_crypto_meta = \
'{"cipher": "AES_CTR_256", "iv": "sD+PSw/DfqYwpsVGSo0GEw=="}'
etag = md5(body).hexdigest() if body is not None else None
footers_to_add = {
'X-Object-Sysmeta-Container-Update-Override-Etag': cont_etag,
'X-Object-Sysmeta-Crypto-Etag': crypto_etag,
'X-Object-Sysmeta-Crypto-Meta-Etag': etag_crypto_meta,
'X-I-Feel-Lucky': 'Not blocked',
'Etag': etag}
def footers_callback(footers):
footers.update(footers_to_add)
return footers_callback
class BaseObjectControllerMixin(object):
container_info = {
'status': 200,
'write_acl': None,
'read_acl': None,
'storage_policy': None,
'sync_key': None,
'versions': None,
}
# this needs to be set on the test case
controller_cls = None
def setUp(self):
# setup fake rings with handoffs
for policy in POLICIES:
policy.object_ring.max_more_nodes = policy.object_ring.replicas
self.logger = debug_logger('proxy-server')
self.logger.thread_locals = ('txn1', '127.0.0.2')
# increase connection timeout to avoid intermittent failures
conf = {'conn_timeout': 1.0}
self.app = PatchedObjControllerApp(
conf, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing(), logger=self.logger)
# you can over-ride the container_info just by setting it on the app
# (see PatchedObjControllerApp for details)
self.app.container_info = dict(self.container_info)
# default policy and ring references
self.policy = POLICIES.default
self.obj_ring = self.policy.object_ring
self._ts_iter = (utils.Timestamp(t) for t in
itertools.count(int(time.time())))
def ts(self):
return next(self._ts_iter)
def replicas(self, policy=None):
policy = policy or POLICIES.default
return policy.object_ring.replicas
def quorum(self, policy=None):
policy = policy or POLICIES.default
return policy.quorum
def test_iter_nodes_local_first_noops_when_no_affinity(self):
# this test needs a stable node order - most don't
self.app.sort_nodes = lambda l, *args, **kwargs: l
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy = self.policy
self.app.get_policy_options(policy).write_affinity_is_local_fn = None
object_ring = policy.object_ring
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1))
self.maxDiff = None
self.assertEqual(all_nodes, local_first_nodes)
def test_iter_nodes_local_first_moves_locals_first(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
# we'll write to one more than replica count local nodes
policy_conf.write_affinity_node_count_fn = lambda r: r + 1
object_ring = self.policy.object_ring
# make our fake ring have plenty of nodes, and not get limited
# artificially by the proxy max request node count
object_ring.max_more_nodes = 100000
# nothing magic about * 2 + 3, just a way to make it bigger
self.app.request_node_count = lambda r: r * 2 + 3
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
# limit to the number we're going to look at in this request
nodes_requested = self.app.request_node_count(object_ring.replicas)
all_nodes = all_nodes[:nodes_requested]
# make sure we have enough local nodes (sanity)
all_local_nodes = [n for n in all_nodes if
policy_conf.write_affinity_is_local_fn(n)]
self.assertGreaterEqual(len(all_local_nodes), self.replicas() + 1)
# finally, create the local_first_nodes iter and flatten it out
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1))
# the local nodes move up in the ordering
self.assertEqual([1] * (self.replicas() + 1), [
node['region'] for node in local_first_nodes[
:self.replicas() + 1]])
# we don't skip any nodes
self.assertEqual(len(all_nodes), len(local_first_nodes))
self.assertEqual(sorted(all_nodes), sorted(local_first_nodes))
def test_iter_nodes_local_first_best_effort(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
object_ring = self.policy.object_ring
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1))
# we won't have quite enough local nodes...
self.assertEqual(len(all_nodes), self.replicas() +
POLICIES.default.object_ring.max_more_nodes)
all_local_nodes = [n for n in all_nodes if
policy_conf.write_affinity_is_local_fn(n)]
self.assertEqual(len(all_local_nodes), self.replicas())
# but the local nodes we do have are at the front of the local iter
first_n_local_first_nodes = local_first_nodes[:len(all_local_nodes)]
self.assertEqual(sorted(all_local_nodes),
sorted(first_n_local_first_nodes))
# but we *still* don't *skip* any nodes
self.assertEqual(len(all_nodes), len(local_first_nodes))
self.assertEqual(sorted(all_nodes), sorted(local_first_nodes))
def test_iter_nodes_local_handoff_first_noops_when_no_affinity(self):
# this test needs a stable node order - most don't
self.app.sort_nodes = lambda l, *args, **kwargs: l
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy = self.policy
self.app.get_policy_options(policy).write_affinity_is_local_fn = None
object_ring = policy.object_ring
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, local_handoffs_first=True))
self.maxDiff = None
self.assertEqual(all_nodes, local_first_nodes)
def test_iter_nodes_handoff_local_first_default(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
object_ring = self.policy.object_ring
primary_nodes = object_ring.get_part_nodes(1)
handoff_nodes_iter = object_ring.get_more_nodes(1)
all_nodes = primary_nodes + list(handoff_nodes_iter)
handoff_nodes_iter = object_ring.get_more_nodes(1)
local_handoffs = [n for n in handoff_nodes_iter if
policy_conf.write_affinity_is_local_fn(n)]
prefered_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, local_handoffs_first=True))
self.assertEqual(len(all_nodes), self.replicas() +
POLICIES.default.object_ring.max_more_nodes)
first_primary_nodes = prefered_nodes[:len(primary_nodes)]
self.assertEqual(sorted(primary_nodes), sorted(first_primary_nodes))
handoff_count = self.replicas() - len(primary_nodes)
first_handoffs = prefered_nodes[len(primary_nodes):][:handoff_count]
self.assertEqual(first_handoffs, local_handoffs[:handoff_count])
def test_iter_nodes_handoff_local_first_non_default(self):
# Obviously this test doesn't work if we're testing 1 replica.
# In that case, we don't have any failovers to check.
if self.replicas() == 1:
return
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
policy_conf.write_affinity_handoff_delete_count = 1
object_ring = self.policy.object_ring
primary_nodes = object_ring.get_part_nodes(1)
handoff_nodes_iter = object_ring.get_more_nodes(1)
all_nodes = primary_nodes + list(handoff_nodes_iter)
handoff_nodes_iter = object_ring.get_more_nodes(1)
local_handoffs = [n for n in handoff_nodes_iter if
policy_conf.write_affinity_is_local_fn(n)]
prefered_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, local_handoffs_first=True))
self.assertEqual(len(all_nodes), self.replicas() +
POLICIES.default.object_ring.max_more_nodes)
first_primary_nodes = prefered_nodes[:len(primary_nodes)]
self.assertEqual(sorted(primary_nodes), sorted(first_primary_nodes))
handoff_count = policy_conf.write_affinity_handoff_delete_count
first_handoffs = prefered_nodes[len(primary_nodes):][:handoff_count]
self.assertEqual(first_handoffs, local_handoffs[:handoff_count])
def test_connect_put_node_timeout(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
req = swift.common.swob.Request.blank('/v1/a/c/o')
self.app.conn_timeout = 0.05
with set_http_connect(slow_connect=True):
nodes = [dict(ip='', port='', device='')]
res = controller._connect_put_node(nodes, '', req, {}, ('', ''))
self.assertIsNone(res)
def test_DELETE_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas()
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_missing_one(self):
# Obviously this test doesn't work if we're testing 1 replica.
# In that case, we don't have any failovers to check.
if self.replicas() == 1:
return
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [404] + [204] * (self.replicas() - 1)
random.shuffle(codes)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_not_found(self):
# Obviously this test doesn't work if we're testing 1 replica.
# In that case, we don't have any failovers to check.
if self.replicas() == 1:
return
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [404] * (self.replicas() - 1) + [204]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_DELETE_mostly_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
mostly_204s = [204] * self.quorum()
codes = mostly_204s + [404] * (self.replicas() - len(mostly_204s))
self.assertEqual(len(codes), self.replicas())
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_mostly_not_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
mostly_404s = [404] * self.quorum()
codes = mostly_404s + [204] * (self.replicas() - len(mostly_404s))
self.assertEqual(len(codes), self.replicas())
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_DELETE_half_not_found_statuses(self):
self.obj_ring.set_replicas(4)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
with set_http_connect(404, 204, 404, 204):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_half_not_found_headers_and_body(self):
# Transformed responses have bogus bodies and headers, so make sure we
# send the client headers and body from a real node's response.
self.obj_ring.set_replicas(4)
status_codes = (404, 404, 204, 204)
bodies = ('not found', 'not found', '', '')
headers = [{}, {}, {'Pick-Me': 'yes'}, {'Pick-Me': 'yes'}]
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
with set_http_connect(*status_codes, body_iter=bodies,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('Pick-Me'), 'yes')
self.assertEqual(resp.body, '')
def test_DELETE_handoff(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas()
with set_http_connect(507, *codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_limits_expirer_queue_updates(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas()
captured_headers = []
def capture_headers(ip, port, device, part, method, path,
headers=None, **kwargs):
captured_headers.append(headers)
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204) # sanity check
counts = {True: 0, False: 0, None: 0}
for headers in captured_headers:
v = headers.get('X-Backend-Clean-Expiring-Object-Queue')
norm_v = None if v is None else utils.config_true_value(v)
counts[norm_v] += 1
max_queue_updates = 2
o_replicas = self.replicas()
self.assertEqual(counts, {
True: min(max_queue_updates, o_replicas),
False: max(o_replicas - max_queue_updates, 0),
None: 0,
})
def test_expirer_DELETE_suppresses_expirer_queue_updates(self):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='DELETE', headers={
'X-Backend-Clean-Expiring-Object-Queue': 'no'})
codes = [204] * self.replicas()
captured_headers = []
def capture_headers(ip, port, device, part, method, path,
headers=None, **kwargs):
captured_headers.append(headers)
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204) # sanity check
counts = {True: 0, False: 0, None: 0}
for headers in captured_headers:
v = headers.get('X-Backend-Clean-Expiring-Object-Queue')
norm_v = None if v is None else utils.config_true_value(v)
counts[norm_v] += 1
o_replicas = self.replicas()
self.assertEqual(counts, {
True: 0,
False: o_replicas,
None: 0,
})
# Make sure we're not sending any expirer-queue update headers here.
# Since we're not updating the expirer queue, these headers would be
# superfluous.
for headers in captured_headers:
self.assertNotIn('X-Delete-At-Container', headers)
self.assertNotIn('X-Delete-At-Partition', headers)
self.assertNotIn('X-Delete-At-Host', headers)
self.assertNotIn('X-Delete-At-Device', headers)
def test_DELETE_write_affinity_before_replication(self):
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_handoff_delete_count = self.replicas() / 2
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
handoff_count = policy_conf.write_affinity_handoff_delete_count
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas() + [404] * handoff_count
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_write_affinity_after_replication(self):
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_handoff_delete_count = self.replicas() / 2
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
handoff_count = policy_conf.write_affinity_handoff_delete_count
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = ([204] * (self.replicas() - handoff_count) +
[404] * handoff_count +
[204] * handoff_count)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_PUT_limits_expirer_queue_deletes(self):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'application/octet-stream'})
codes = [201] * self.replicas()
captured_headers = []
def capture_headers(ip, port, device, part, method, path,
headers=None, **kwargs):
captured_headers.append(headers)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, give_connect=capture_headers,
expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201) # sanity check
counts = {True: 0, False: 0, None: 0}
for headers in captured_headers:
v = headers.get('X-Backend-Clean-Expiring-Object-Queue')
norm_v = None if v is None else utils.config_true_value(v)
counts[norm_v] += 1
max_queue_updates = 2
o_replicas = self.replicas()
self.assertEqual(counts, {
True: min(max_queue_updates, o_replicas),
False: max(o_replicas - max_queue_updates, 0),
None: 0,
})
def test_POST_limits_expirer_queue_deletes(self):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='POST', body='',
headers={'Content-Type': 'application/octet-stream'})
codes = [201] * self.replicas()
captured_headers = []
def capture_headers(ip, port, device, part, method, path,
headers=None, **kwargs):
captured_headers.append(headers)
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201) # sanity check
counts = {True: 0, False: 0, None: 0}
for headers in captured_headers:
v = headers.get('X-Backend-Clean-Expiring-Object-Queue')
norm_v = None if v is None else utils.config_true_value(v)
counts[norm_v] += 1
max_queue_updates = 2
o_replicas = self.replicas()
self.assertEqual(counts, {
True: min(max_queue_updates, o_replicas),
False: max(o_replicas - max_queue_updates, 0),
None: 0,
})
def test_POST_non_int_delete_after(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-After', resp.body)
def test_PUT_non_int_delete_after(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-After', resp.body)
def test_POST_negative_delete_after(self):
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '-60'})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-After in past', resp.body)
def test_PUT_negative_delete_after(self):
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '-60'})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-After in past', resp.body)
def test_POST_delete_at_non_integer(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-At', resp.body)
def test_PUT_delete_at_non_integer(self):
t = str(int(time.time() - 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-At', resp.body)
def test_POST_delete_at_in_past(self):
t = str(int(time.time() - 100))
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-At in past', resp.body)
def test_PUT_delete_at_in_past(self):
t = str(int(time.time() - 100))
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-At in past', resp.body)
def test_HEAD_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(200):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertIn('Accept-Ranges', resp.headers)
def test_HEAD_x_newest(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
with set_http_connect(*([200] * self.replicas())):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_HEAD_x_newest_different_timestamps(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
timestamps = [next(ts) for i in range(self.replicas())]
newest_timestamp = timestamps[-1]
random.shuffle(timestamps)
backend_response_headers = [{
'X-Backend-Timestamp': t.internal,
'X-Timestamp': t.normal
} for t in timestamps]
with set_http_connect(*([200] * self.replicas()),
headers=backend_response_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-timestamp'], newest_timestamp.normal)
def test_HEAD_x_newest_with_two_vector_timestamps(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp.now(offset=offset)
for offset in itertools.count())
timestamps = [next(ts) for i in range(self.replicas())]
newest_timestamp = timestamps[-1]
random.shuffle(timestamps)
backend_response_headers = [{
'X-Backend-Timestamp': t.internal,
'X-Timestamp': t.normal
} for t in timestamps]
with set_http_connect(*([200] * self.replicas()),
headers=backend_response_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-backend-timestamp'],
newest_timestamp.internal)
def test_HEAD_x_newest_with_some_missing(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
request_count = self.app.request_node_count(self.obj_ring.replicas)
backend_response_headers = [{
'x-timestamp': next(ts).normal,
} for i in range(request_count)]
responses = [404] * (request_count - 1)
responses.append(200)
request_log = []
def capture_requests(ip, port, device, part, method, path,
headers=None, **kwargs):
req = {
'ip': ip,
'port': port,
'device': device,
'part': part,
'method': method,
'path': path,
'headers': headers,
}
request_log.append(req)
with set_http_connect(*responses,
headers=backend_response_headers,
give_connect=capture_requests):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
for req in request_log:
self.assertEqual(req['method'], 'HEAD')
self.assertEqual(req['path'], '/a/c/o')
def test_container_sync_delete(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
req = swob.Request.blank(
'/v1/a/c/o', method='DELETE', headers={
'X-Timestamp': next(ts).internal})
codes = [409] * self.obj_ring.replicas
ts_iter = itertools.repeat(next(ts).internal)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 409)
def test_PUT_requires_length(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 411)
def test_container_update_backend_requests(self):
for policy in POLICIES:
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT',
headers={'Content-Length': '0',
'X-Backend-Storage-Policy-Index': int(policy)})
controller = self.controller_cls(self.app, 'a', 'c', 'o')
# This is the number of container updates we're doing, simulating
# 1 to 15 container replicas.
for num_containers in range(1, 16):
containers = [{'ip': '1.0.0.%s' % i,
'port': '60%s' % str(i).zfill(2),
'device': 'sdb'} for i in range(num_containers)]
backend_headers = controller._backend_requests(
req, self.replicas(policy), 1, containers)
# how many of the backend headers have a container update
n_container_updates = len(
[headers for headers in backend_headers
if 'X-Container-Partition' in headers])
# how many object-server PUTs can fail and still let the
# client PUT succeed
n_can_fail = self.replicas(policy) - self.quorum(policy)
n_expected_updates = (
n_can_fail + utils.quorum_size(num_containers))
# you get at least one update per container no matter what
n_expected_updates = max(
n_expected_updates, num_containers)
# you can't have more object requests with updates than you
# have object requests (the container stuff gets doubled up,
# but that's not important for purposes of durability)
n_expected_updates = min(
n_expected_updates, self.replicas(policy))
self.assertEqual(n_expected_updates, n_container_updates)
def test_delete_at_backend_requests(self):
t = str(int(time.time() + 100))
for policy in POLICIES:
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT',
headers={'Content-Length': '0',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Delete-At': t})
controller = self.controller_cls(self.app, 'a', 'c', 'o')
for num_del_at_nodes in range(1, 16):
containers = [
{'ip': '2.0.0.%s' % i, 'port': '70%s' % str(i).zfill(2),
'device': 'sdc'} for i in range(num_del_at_nodes)]
del_at_nodes = [
{'ip': '1.0.0.%s' % i, 'port': '60%s' % str(i).zfill(2),
'device': 'sdb'} for i in range(num_del_at_nodes)]
backend_headers = controller._backend_requests(
req, self.replicas(policy), 1, containers,
delete_at_container='dac', delete_at_partition=2,
delete_at_nodes=del_at_nodes)
devices = []
hosts = []
part = ctr = 0
for given_headers in backend_headers:
self.assertEqual(given_headers.get('X-Delete-At'), t)
if 'X-Delete-At-Partition' in given_headers:
self.assertEqual(
given_headers.get('X-Delete-At-Partition'), '2')
part += 1
if 'X-Delete-At-Container' in given_headers:
self.assertEqual(
given_headers.get('X-Delete-At-Container'), 'dac')
ctr += 1
devices += (
list_from_csv(given_headers.get('X-Delete-At-Device')))
hosts += (
list_from_csv(given_headers.get('X-Delete-At-Host')))
# same as in test_container_update_backend_requests
n_can_fail = self.replicas(policy) - self.quorum(policy)
n_expected_updates = (
n_can_fail + utils.quorum_size(num_del_at_nodes))
n_expected_hosts = max(
n_expected_updates, num_del_at_nodes)
self.assertEqual(len(hosts), n_expected_hosts)
self.assertEqual(len(devices), n_expected_hosts)
# parts don't get doubled up, maximum is count of obj requests
n_expected_parts = min(
n_expected_hosts, self.replicas(policy))
self.assertEqual(part, n_expected_parts)
self.assertEqual(ctr, n_expected_parts)
# check that hosts are correct
self.assertEqual(
set(hosts),
set('%s:%s' % (h['ip'], h['port']) for h in del_at_nodes))
self.assertEqual(set(devices), set(('sdb',)))
def test_smooth_distributed_backend_requests(self):
t = str(int(time.time() + 100))
for policy in POLICIES:
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT',
headers={'Content-Length': '0',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Delete-At': t})
controller = self.controller_cls(self.app, 'a', 'c', 'o')
for num_containers in range(1, 16):
containers = [
{'ip': '2.0.0.%s' % i, 'port': '70%s' % str(i).zfill(2),
'device': 'sdc'} for i in range(num_containers)]
del_at_nodes = [
{'ip': '1.0.0.%s' % i, 'port': '60%s' % str(i).zfill(2),
'device': 'sdb'} for i in range(num_containers)]
backend_headers = controller._backend_requests(
req, self.replicas(policy), 1, containers,
delete_at_container='dac', delete_at_partition=2,
delete_at_nodes=del_at_nodes)
# caculate no of expected updates, see
# test_container_update_backend_requests for explanation
n_expected_updates = min(max(
self.replicas(policy) - self.quorum(policy) +
utils.quorum_size(num_containers), num_containers),
self.replicas(policy))
# the first n_expected_updates servers should have received
# a container update
self.assertTrue(
all([h.get('X-Container-Partition')
for h in backend_headers[:n_expected_updates]]))
# the last n_expected_updates servers should have received
# the x-delete-at* headers
self.assertTrue(
all([h.get('X-Delete-At-Container')
for h in backend_headers[-n_expected_updates:]]))
def _check_write_affinity(
self, conf, policy_conf, policy, affinity_regions, affinity_count):
conf['policy_config'] = policy_conf
app = PatchedObjControllerApp(
conf, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing(), logger=self.logger)
controller = self.controller_cls(app, 'a', 'c', 'o')
object_ring = app.get_object_ring(int(policy))
# make our fake ring have plenty of nodes, and not get limited
# artificially by the proxy max request node count
object_ring.max_more_nodes = 100
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
# make sure we have enough local nodes (sanity)
all_local_nodes = [n for n in all_nodes if
n['region'] in affinity_regions]
self.assertGreaterEqual(len(all_local_nodes), affinity_count)
# finally, create the local_first_nodes iter and flatten it out
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, policy))
# check that the required number of local nodes were moved up the order
node_regions = [node['region'] for node in local_first_nodes]
self.assertTrue(
all(r in affinity_regions for r in node_regions[:affinity_count]),
'Unexpected region found in local nodes, expected %s but got %s' %
(affinity_regions, node_regions))
return app
def test_write_affinity_not_configured(self):
# default is no write affinity so expect both regions 0 and 1
self._check_write_affinity({}, {}, POLICIES[0], [0, 1],
2 * self.replicas(POLICIES[0]))
self._check_write_affinity({}, {}, POLICIES[1], [0, 1],
2 * self.replicas(POLICIES[1]))
def test_write_affinity_proxy_server_config(self):
# without overrides policies use proxy-server config section options
conf = {'write_affinity_node_count': '1 * replicas',
'write_affinity': 'r0'}
self._check_write_affinity(conf, {}, POLICIES[0], [0],
self.replicas(POLICIES[0]))
self._check_write_affinity(conf, {}, POLICIES[1], [0],
self.replicas(POLICIES[1]))
def test_write_affinity_per_policy_config(self):
# check only per-policy configuration is sufficient
conf = {}
policy_conf = {'0': {'write_affinity_node_count': '1 * replicas',
'write_affinity': 'r1'},
'1': {'write_affinity_node_count': '5',
'write_affinity': 'r0'}}
self._check_write_affinity(conf, policy_conf, POLICIES[0], [1],
self.replicas(POLICIES[0]))
self._check_write_affinity(conf, policy_conf, POLICIES[1], [0], 5)
def test_write_affinity_per_policy_config_overrides_and_inherits(self):
# check per-policy config is preferred over proxy-server section config
conf = {'write_affinity_node_count': '1 * replicas',
'write_affinity': 'r0'}
policy_conf = {'0': {'write_affinity': 'r1'},
'1': {'write_affinity_node_count': '3 * replicas'}}
# policy 0 inherits default node count, override affinity to r1
self._check_write_affinity(conf, policy_conf, POLICIES[0], [1],
self.replicas(POLICIES[0]))
# policy 1 inherits default affinity to r0, overrides node count
self._check_write_affinity(conf, policy_conf, POLICIES[1], [0],
3 * self.replicas(POLICIES[1]))
# end of BaseObjectControllerMixin
@patch_policies()
class TestReplicatedObjController(BaseObjectControllerMixin,
unittest.TestCase):
controller_cls = obj.ReplicatedObjectController
def test_PUT_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_error_with_footers(self):
footers_callback = make_footers_callback('')
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
environ=env)
req.headers['content-length'] = '0'
codes = [503] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def _test_PUT_with_no_footers(self, test_body='', chunked=False):
# verify that when no footers are required then the PUT uses a regular
# single part body
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=test_body)
if chunked:
req.headers['Transfer-Encoding'] = 'chunked'
etag = md5(test_body).hexdigest()
req.headers['Etag'] = etag
put_requests = defaultdict(
lambda: {'headers': None, 'chunks': [], 'connection': None})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
put_requests[conn.connection_id]['connection'] = conn
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['headers'] = headers
codes = [201] * self.replicas()
expect_headers = {'X-Obj-Metadata-Footer': 'yes'}
resp_headers = {
'Some-Header': 'Four',
'Etag': '"%s"' % etag,
}
with set_http_connect(*codes, expect_headers=expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['headers']['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Etag': etag,
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
})
for connection_id, info in put_requests.items():
body = ''.join(info['chunks'])
headers = info['headers']
if chunked or not test_body:
body = unchunk_body(body)
self.assertEqual('100-continue', headers['Expect'])
self.assertEqual('chunked', headers['Transfer-Encoding'])
else:
self.assertNotIn('Transfer-Encoding', headers)
if body or not test_body:
self.assertEqual('100-continue', headers['Expect'])
else:
self.assertNotIn('Expect', headers)
self.assertNotIn('X-Backend-Obj-Multipart-Mime-Boundary', headers)
self.assertNotIn('X-Backend-Obj-Metadata-Footer', headers)
self.assertNotIn('X-Backend-Obj-Multiphase-Commit', headers)
self.assertEqual(etag, headers['Etag'])
self.assertEqual(test_body, body)
self.assertTrue(info['connection'].closed)
def test_PUT_with_chunked_body_and_no_footers(self):
self._test_PUT_with_no_footers(test_body='asdf', chunked=True)
def test_PUT_with_body_and_no_footers(self):
self._test_PUT_with_no_footers(test_body='asdf', chunked=False)
def test_PUT_with_no_body_and_no_footers(self):
self._test_PUT_with_no_footers(test_body='', chunked=False)
def _test_PUT_with_footers(self, test_body=''):
# verify that when footers are required the PUT body is multipart
# and the footers are appended
footers_callback = make_footers_callback(test_body)
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
environ=env)
req.body = test_body
# send bogus Etag header to differentiate from footer value
req.headers['Etag'] = 'header_etag'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes'
}
put_requests = defaultdict(
lambda: {'headers': None, 'chunks': [], 'connection': None})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
put_requests[conn.connection_id]['connection'] = conn
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['headers'] = headers
resp_headers = {
'Etag': '"resp_etag"',
# NB: ignored!
'Some-Header': 'Four',
}
with set_http_connect(*codes, expect_headers=expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['headers']['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Etag': 'resp_etag',
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
})
for connection_id, info in put_requests.items():
body = unchunk_body(''.join(info['chunks']))
headers = info['headers']
boundary = headers['X-Backend-Obj-Multipart-Mime-Boundary']
self.assertTrue(boundary is not None,
"didn't get boundary for conn %r" % (
connection_id,))
self.assertEqual('chunked', headers['Transfer-Encoding'])
self.assertEqual('100-continue', headers['Expect'])
self.assertEqual('yes', headers['X-Backend-Obj-Metadata-Footer'])
self.assertNotIn('X-Backend-Obj-Multiphase-Commit', headers)
self.assertEqual('header_etag', headers['Etag'])
# email.parser.FeedParser doesn't know how to take a multipart
# message and boundary together and parse it; it only knows how
# to take a string, parse the headers, and figure out the
# boundary on its own.
parser = email.parser.FeedParser()
parser.feed(
"Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n" %
boundary)
parser.feed(body)
message = parser.close()
self.assertTrue(message.is_multipart()) # sanity check
mime_parts = message.get_payload()
# notice, no commit confirmation
self.assertEqual(len(mime_parts), 2)
obj_part, footer_part = mime_parts
self.assertEqual(obj_part['X-Document'], 'object body')
self.assertEqual(test_body, obj_part.get_payload())
# validate footer metadata
self.assertEqual(footer_part['X-Document'], 'object metadata')
footer_metadata = json.loads(footer_part.get_payload())
self.assertTrue(footer_metadata)
expected = {}
footers_callback(expected)
self.assertDictEqual(expected, footer_metadata)
self.assertTrue(info['connection'].closed)
def test_PUT_with_body_and_footers(self):
self._test_PUT_with_footers(test_body='asdf')
def test_PUT_with_no_body_and_footers(self):
self._test_PUT_with_footers()
def test_txn_id_logging_on_PUT(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
self.app.logger.txn_id = req.environ['swift.trans_id'] = 'test-txn-id'
req.headers['content-length'] = '0'
# we capture stdout since the debug log formatter prints the formatted
# message to stdout
stdout = BytesIO()
with set_http_connect((100, Timeout()), 503, 503), \
mock.patch('sys.stdout', stdout):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
for line in stdout.getvalue().splitlines():
self.assertIn('test-txn-id', line)
self.assertIn('Trying to get final status of PUT to',
stdout.getvalue())
def test_PUT_empty_bad_etag(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['Content-Length'] = '0'
req.headers['Etag'] = '"catbus"'
# The 2-tuple here makes getexpect() return 422, not 100. For objects
# that are >0 bytes, you get a 100 Continue and then a 422
# Unprocessable Entity after sending the body. For zero-byte objects,
# though, you get the 422 right away because no Expect header is sent
# with zero-byte PUT. The second status in the tuple should not be
# consumed, it's just there to make the FakeStatus treat the first as
# an expect status, but we'll make it something other than a 422 so
# that if it is consumed then the test should fail.
codes = [FakeStatus((422, 200))
for _junk in range(self.replicas())]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 422)
def test_PUT_if_none_match(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_if_none_match_denied(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
with set_http_connect(201, 412, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 412)
def test_PUT_if_none_match_not_star(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = 'somethingelse'
req.headers['content-length'] = '0'
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
def test_PUT_connect_exceptions(self):
object_ring = self.app.get_object_ring(None)
self.app.sort_nodes = lambda n, *args, **kwargs: n # disable shuffle
def test_status_map(statuses, expected):
self.app._error_limiting = {}
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, expected)
base_status = [201] * 3
# test happy path
test_status_map(list(base_status), 201)
for i in range(3):
self.assertEqual(node_error_count(
self.app, object_ring.devs[i]), 0)
# single node errors and test isolation
for i in range(3):
status_list = list(base_status)
status_list[i] = 503
test_status_map(status_list, 201)
for j in range(3):
self.assertEqual(node_error_count(
self.app, object_ring.devs[j]), 1 if j == i else 0)
# connect errors
test_status_map((201, Timeout(), 201, 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[1]), 1)
test_status_map((Exception('kaboom!'), 201, 201, 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[0]), 1)
# expect errors
test_status_map((201, 201, (503, None), 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[2]), 1)
test_status_map(((507, None), 201, 201, 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[0]),
self.app.error_suppression_limit + 1)
# response errors
test_status_map(((100, Timeout()), 201, 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[0]), 1)
test_status_map((201, 201, (100, Exception())), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[2]), 1)
test_status_map((201, (100, 507), 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[1]),
self.app.error_suppression_limit + 1)
def test_PUT_connect_exception_with_unicode_path(self):
expected = 201
statuses = (
Exception('Connection refused: Please insert ten dollars'),
201, 201, 201)
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body='life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, expected)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
self.assertIn('ERROR with Object server', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn('re: Expect: 100-continue', log_lines[0])
def test_PUT_get_expect_errors_with_unicode_path(self):
def do_test(statuses):
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body='life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
return log_lines
log_lines = do_test((201, (507, None), 201, 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
log_lines = do_test((201, (503, None), 201, 201))
self.assertIn('ERROR 503 Expect: 100-continue From Object Server',
log_lines[0])
def test_PUT_send_exception_with_unicode_path(self):
def do_test(exc):
conns = set()
def capture_send(conn, data):
conns.add(conn)
if len(conns) == 2:
raise exc
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body='life is utf-gr8')
self.app.logger.clear()
with set_http_connect(201, 201, 201, give_send=capture_send):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
self.assertIn('ERROR with Object server', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn('Trying to write to', log_lines[0])
do_test(Exception('Exception while sending data on connection'))
do_test(ChunkWriteTimeout())
def test_PUT_final_response_errors_with_unicode_path(self):
def do_test(statuses):
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body='life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
return req, log_lines
req, log_lines = do_test((201, (100, Exception('boom')), 201))
self.assertIn('ERROR with Object server', log_lines[0])
self.assertIn(req.path.decode('utf-8'), log_lines[0])
self.assertIn('Trying to get final status of PUT', log_lines[0])
req, log_lines = do_test((201, (100, Timeout()), 201))
self.assertIn('ERROR with Object server', log_lines[0])
self.assertIn(req.path.decode('utf-8'), log_lines[0])
self.assertIn('Trying to get final status of PUT', log_lines[0])
req, log_lines = do_test((201, (100, 507), 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
req, log_lines = do_test((201, (100, 500), 201))
self.assertIn('ERROR 500 From Object Server', log_lines[0])
self.assertIn(req.path.decode('utf-8'), log_lines[0])
def test_DELETE_errors(self):
# verify logged errors with and without non-ascii characters in path
def do_test(path, statuses):
req = swob.Request.blank('/v1' + path,
method='DELETE',
body='life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
return req, log_lines
req, log_lines = do_test('/AUTH_kilroy/ascii/ascii',
(201, 500, 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn(' From Object Server', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/%ED%88%8E/%E9%90%89',
(201, 500, 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn(' From Object Server', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/ascii/ascii',
(201, 507, 201, 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/%ED%88%8E/%E9%90%89',
(201, 507, 201, 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/ascii/ascii',
(201, Exception(), 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn('ERROR with Object server', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/%ED%88%8E/%E9%90%89',
(201, Exception(), 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn('ERROR with Object server', log_lines[0])
def test_PUT_error_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise IOError('error message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_chunkreadtimeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.ChunkReadTimeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 408)
def test_PUT_timeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise Timeout()
conns = []
def capture_expect(conn):
# stash connections so that we can verify they all get closed
conns.append(conn)
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201, give_expect=capture_expect):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
self.assertEqual(self.replicas(), len(conns))
for conn in conns:
self.assertTrue(conn.closed)
def test_PUT_exception_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise Exception('exception message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
def test_GET_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200, headers={'Connection': 'close'}):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertIn('Accept-Ranges', resp.headers)
self.assertNotIn('Connection', resp.headers)
def test_GET_transfer_encoding_chunked(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200, headers={'transfer-encoding': 'chunked'}):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['Transfer-Encoding'], 'chunked')
def _test_removes_swift_bytes(self, method):
req = swift.common.swob.Request.blank('/v1/a/c/o', method=method)
with set_http_connect(
200, headers={'content-type': 'image/jpeg; swift_bytes=99'}):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['Content-Type'], 'image/jpeg')
def test_GET_removes_swift_bytes(self):
self._test_removes_swift_bytes('GET')
def test_HEAD_removes_swift_bytes(self):
self._test_removes_swift_bytes('HEAD')
def test_GET_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
self.app.logger.txn_id = req.environ['swift.trans_id'] = 'my-txn-id'
stdout = BytesIO()
with set_http_connect(503, 200), \
mock.patch('sys.stdout', stdout):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
for line in stdout.getvalue().splitlines():
self.assertIn('my-txn-id', line)
self.assertIn('From Object Server', stdout.getvalue())
def test_GET_handoff(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [503] * self.obj_ring.replicas + [200]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_not_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [404] * (self.obj_ring.replicas +
self.obj_ring.max_more_nodes)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_GET_not_found_when_404_newer(self):
# if proxy receives a 404, it keeps waiting for other connections until
# max number of nodes in hopes of finding an object, but if 404 is
# more recent than a 200, then it should ignore 200 and return 404
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [404] * self.obj_ring.replicas + \
[200] * self.obj_ring.max_more_nodes
ts_iter = iter([2] * self.obj_ring.replicas +
[1] * self.obj_ring.max_more_nodes)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_GET_x_newest_not_found_when_404_newer(self):
# if proxy receives a 404, it keeps waiting for other connections until
# max number of nodes in hopes of finding an object, but if 404 is
# more recent than a 200, then it should ignore 200 and return 404
req = swift.common.swob.Request.blank('/v1/a/c/o',
headers={'X-Newest': 'true'})
codes = ([200] +
[404] * self.obj_ring.replicas +
[200] * (self.obj_ring.max_more_nodes - 1))
ts_iter = iter([1] +
[2] * self.obj_ring.replicas +
[1] * (self.obj_ring.max_more_nodes - 1))
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_PUT_delete_at(self):
t = str(int(time.time() + 100))
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
put_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
if method == 'PUT':
put_headers.append(headers)
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
for given_headers in put_headers:
self.assertEqual(given_headers.get('X-Delete-At'), t)
self.assertIn('X-Delete-At-Host', given_headers)
self.assertIn('X-Delete-At-Device', given_headers)
self.assertIn('X-Delete-At-Partition', given_headers)
self.assertIn('X-Delete-At-Container', given_headers)
def test_PUT_converts_delete_after_to_delete_at(self):
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '60'})
put_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
if method == 'PUT':
put_headers.append(headers)
codes = [201] * self.obj_ring.replicas
t = time.time()
with set_http_connect(*codes, give_connect=capture_headers):
with mock.patch('time.time', lambda: t):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
expected_delete_at = str(int(t) + 60)
for given_headers in put_headers:
self.assertEqual(given_headers.get('X-Delete-At'),
expected_delete_at)
self.assertIn('X-Delete-At-Host', given_headers)
self.assertIn('X-Delete-At-Device', given_headers)
self.assertIn('X-Delete-At-Partition', given_headers)
self.assertIn('X-Delete-At-Container', given_headers)
def test_container_sync_put_x_timestamp_not_found(self):
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.app.container_info['storage_policy'] = policy_index
put_timestamp = utils.Timestamp.now().normal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_container_sync_put_x_timestamp_match(self):
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.app.container_info['storage_policy'] = policy_index
put_timestamp = utils.Timestamp.now().normal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
ts_iter = itertools.repeat(put_timestamp)
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_older(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.app.container_info['storage_policy'] = policy_index
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = itertools.repeat(next(ts).internal)
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_newer(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
orig_timestamp = next(ts).internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = itertools.repeat(orig_timestamp)
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_put_x_timestamp_conflict(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = iter([next(ts).internal, None, None])
codes = [409] + [201] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_put_x_timestamp_conflict_with_missing_backend_timestamp(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = iter([None, None, None])
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_put_x_timestamp_conflict_with_other_weird_success_response(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = iter([next(ts).internal, None, None])
codes = [409] + [(201, 'notused')] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_put_x_timestamp_conflict_with_if_none_match(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'If-None-Match': '*',
'X-Timestamp': next(ts).internal})
ts_iter = iter([next(ts).internal, None, None])
codes = [409] + [(412, 'notused')] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 412)
def test_container_sync_put_x_timestamp_race(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
put_timestamp = next(ts).internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
# object nodes they respond 409 because another in-flight request
# finished and now the on disk timestamp is equal to the request.
put_ts = [put_timestamp] * self.obj_ring.replicas
codes = [409] * self.obj_ring.replicas
ts_iter = iter(put_ts)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_unsynced_race(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
put_timestamp = next(ts).internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
# only one in-flight request finished
put_ts = [None] * (self.obj_ring.replicas - 1)
put_resp = [201] * (self.obj_ring.replicas - 1)
put_ts += [put_timestamp]
put_resp += [409]
ts_iter = iter(put_ts)
codes = put_resp
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_x_timestamp_not_overridden(self):
def do_test(method, base_headers, resp_code):
# no given x-timestamp
req = swob.Request.blank(
'/v1/a/c/o', method=method, headers=base_headers)
codes = [resp_code] * self.replicas()
with mocked_http_conn(*codes) as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, resp_code)
self.assertEqual(self.replicas(), len(fake_conn.requests))
for req in fake_conn.requests:
self.assertIn('X-Timestamp', req['headers'])
# check value can be parsed as valid timestamp
Timestamp(req['headers']['X-Timestamp'])
# given x-timestamp is retained
def do_check(ts):
headers = dict(base_headers)
headers['X-Timestamp'] = ts.internal
req = swob.Request.blank(
'/v1/a/c/o', method=method, headers=headers)
codes = [resp_code] * self.replicas()
with mocked_http_conn(*codes) as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, resp_code)
self.assertEqual(self.replicas(), len(fake_conn.requests))
for req in fake_conn.requests:
self.assertEqual(ts.internal,
req['headers']['X-Timestamp'])
do_check(Timestamp.now())
do_check(Timestamp.now(offset=123))
# given x-timestamp gets sanity checked
headers = dict(base_headers)
headers['X-Timestamp'] = 'bad timestamp'
req = swob.Request.blank(
'/v1/a/c/o', method=method, headers=headers)
with mocked_http_conn() as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertIn('X-Timestamp should be a UNIX timestamp ', resp.body)
do_test('PUT', {'Content-Length': 0}, 200)
do_test('DELETE', {}, 204)
@patch_policies(
[StoragePolicy(0, '1-replica', True),
StoragePolicy(1, '5-replica', False),
StoragePolicy(2, '8-replica', False),
StoragePolicy(3, '15-replica', False)],
fake_ring_args=[
{'replicas': 1}, {'replicas': 5}, {'replicas': 8}, {'replicas': 15}])
class TestReplicatedObjControllerVariousReplicas(BaseObjectControllerMixin,
unittest.TestCase):
controller_cls = obj.ReplicatedObjectController
@contextmanager
def capture_http_requests(get_response):
class FakeConn(object):
def __init__(self, req):
self.req = req
self.resp = None
def getresponse(self):
self.resp = get_response(self.req)
return self.resp
class ConnectionLog(object):
def __init__(self):
self.connections = []
def __len__(self):
return len(self.connections)
def __getitem__(self, i):
return self.connections[i]
def __iter__(self):
return iter(self.connections)
def __call__(self, ip, port, method, path, headers, qs, ssl):
req = {
'ip': ip,
'port': port,
'method': method,
'path': path,
'headers': headers,
'qs': qs,
'ssl': ssl,
}
conn = FakeConn(req)
self.connections.append(conn)
return conn
fake_conn = ConnectionLog()
with mock.patch('swift.common.bufferedhttp.http_connect_raw',
new=fake_conn):
yield fake_conn
class ECObjectControllerMixin(BaseObjectControllerMixin):
# Add a few helper methods for EC tests.
def _make_ec_archive_bodies(self, test_body, policy=None):
policy = policy or self.policy
return encode_frag_archive_bodies(policy, test_body)
def _make_ec_object_stub(self, pattern='test', policy=None,
timestamp=None):
policy = policy or self.policy
test_body = pattern * policy.ec_segment_size
test_body = test_body[:-random.randint(1, 1000)]
return make_ec_object_stub(test_body, policy, timestamp)
def _fake_ec_node_response(self, node_frags):
return fake_ec_node_response(node_frags, self.policy)
def test_GET_with_duplicate_but_sufficient_frag_indexes(self):
obj1 = self._make_ec_object_stub()
# proxy should ignore duplicated frag indexes and continue search for
# a set of unique indexes, finding last one on a handoff
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj1, 'frag': 0}, # duplicate frag
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 1}, # duplicate frag
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 2}, # duplicate frag
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 3}, # duplicate frag
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 4}, # duplicate frag
{'obj': obj1, 'frag': 10},
{'obj': obj1, 'frag': 11},
{'obj': obj1, 'frag': 12},
{'obj': obj1, 'frag': 13},
] * self.policy.ec_duplication_factor
node_frags.append({'obj': obj1, 'frag': 5}) # first handoff
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
# expect a request to all primaries plus one handoff
self.assertEqual(self.replicas() + 1, len(log))
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), self.policy.ec_ndata)
def test_GET_with_duplicate_but_insufficient_frag_indexes(self):
obj1 = self._make_ec_object_stub()
# proxy should ignore duplicated frag indexes and continue search for
# a set of unique indexes, but fails to find one
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj1, 'frag': 0}, # duplicate frag
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 1}, # duplicate frag
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 2}, # duplicate frag
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 3}, # duplicate frag
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 4}, # duplicate frag
{'obj': obj1, 'frag': 10},
{'obj': obj1, 'frag': 11},
{'obj': obj1, 'frag': 12},
{'obj': obj1, 'frag': 13},
]
# ... and the rests are 404s which is limited by request_count
# (2 * replicas in default) rather than max_extra_requests limitation
# because the retries will be in ResumingGetter if the responses
# are 404s
node_frags += [[]] * (self.replicas() * 2 - len(node_frags))
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# expect a request to all nodes
self.assertEqual(2 * self.replicas(), len(log))
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), self.policy.ec_ndata - 1)
@patch_policies(with_ec_default=True)
class TestECObjController(ECObjectControllerMixin, unittest.TestCase):
container_info = {
'status': 200,
'read_acl': None,
'write_acl': None,
'sync_key': None,
'versions': None,
'storage_policy': '0',
}
controller_cls = obj.ECObjectController
def _add_frag_index(self, index, headers):
# helper method to add a frag index header to an existing header dict
hdr_name = 'X-Object-Sysmeta-Ec-Frag-Index'
return dict(headers.items() + [(hdr_name, index)])
def test_determine_chunk_destinations(self):
class FakePutter(object):
def __init__(self, index):
self.node_index = index
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
# create a dummy list of putters, check no handoffs
putters = []
for index in range(self.policy.object_ring.replica_count):
putters.append(FakePutter(index))
got = controller._determine_chunk_destinations(putters, self.policy)
expected = {}
for i, p in enumerate(putters):
expected[p] = i
self.assertEqual(got, expected)
# now lets make a handoff at the end
orig_index = putters[-1].node_index = None
putters[-1].node_index = None
got = controller._determine_chunk_destinations(putters, self.policy)
self.assertEqual(got, expected)
putters[-1].node_index = orig_index
# now lets make a handoff at the start
putters[0].node_index = None
got = controller._determine_chunk_destinations(putters, self.policy)
self.assertEqual(got, expected)
putters[0].node_index = 0
# now lets make a handoff in the middle
putters[2].node_index = None
got = controller._determine_chunk_destinations(putters, self.policy)
self.assertEqual(got, expected)
putters[2].node_index = 2
# now lets make all of them handoffs
for index in range(self.policy.object_ring.replica_count):
putters[index].node_index = None
got = controller._determine_chunk_destinations(putters, self.policy)
self.assertEqual(sorted(got), sorted(expected))
def test_GET_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
get_statuses = [200] * self.policy.ec_ndata
get_hdrs = [{'Connection': 'close'}] * self.policy.ec_ndata
with set_http_connect(*get_statuses, headers=get_hdrs):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertIn('Accept-Ranges', resp.headers)
self.assertNotIn('Connection', resp.headers)
def _test_if_match(self, method):
num_responses = self.policy.ec_ndata if method == 'GET' else 1
def _do_test(match_value, backend_status,
etag_is_at='X-Object-Sysmeta-Does-Not-Exist'):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'If-Match': match_value,
'X-Backend-Etag-Is-At': etag_is_at})
get_resp = [backend_status] * num_responses
resp_headers = {'Etag': 'frag_etag',
'X-Object-Sysmeta-Ec-Etag': 'data_etag',
'X-Object-Sysmeta-Alternate-Etag': 'alt_etag'}
with set_http_connect(*get_resp, headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual('data_etag', resp.headers['Etag'])
return resp
# wildcard
resp = _do_test('*', 200)
self.assertEqual(resp.status_int, 200)
# match
resp = _do_test('"data_etag"', 200)
self.assertEqual(resp.status_int, 200)
# no match
resp = _do_test('"frag_etag"', 412)
self.assertEqual(resp.status_int, 412)
# match wildcard against an alternate etag
resp = _do_test('*', 200,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 200)
# match against an alternate etag
resp = _do_test('"alt_etag"', 200,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 200)
# no match against an alternate etag
resp = _do_test('"data_etag"', 412,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 412)
def test_GET_if_match(self):
self._test_if_match('GET')
def test_HEAD_if_match(self):
self._test_if_match('HEAD')
def _test_if_none_match(self, method):
num_responses = self.policy.ec_ndata if method == 'GET' else 1
def _do_test(match_value, backend_status,
etag_is_at='X-Object-Sysmeta-Does-Not-Exist'):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'If-None-Match': match_value,
'X-Backend-Etag-Is-At': etag_is_at})
get_resp = [backend_status] * num_responses
resp_headers = {'Etag': 'frag_etag',
'X-Object-Sysmeta-Ec-Etag': 'data_etag',
'X-Object-Sysmeta-Alternate-Etag': 'alt_etag'}
with set_http_connect(*get_resp, headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual('data_etag', resp.headers['Etag'])
return resp
# wildcard
resp = _do_test('*', 304)
self.assertEqual(resp.status_int, 304)
# match
resp = _do_test('"data_etag"', 304)
self.assertEqual(resp.status_int, 304)
# no match
resp = _do_test('"frag_etag"', 200)
self.assertEqual(resp.status_int, 200)
# match wildcard against an alternate etag
resp = _do_test('*', 304,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 304)
# match against an alternate etag
resp = _do_test('"alt_etag"', 304,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 304)
# no match against an alternate etag
resp = _do_test('"data_etag"', 200,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 200)
def test_GET_if_none_match(self):
self._test_if_none_match('GET')
def test_HEAD_if_none_match(self):
self._test_if_none_match('HEAD')
def test_GET_simple_x_newest(self):
req = swift.common.swob.Request.blank('/v1/a/c/o',
headers={'X-Newest': 'true'})
codes = [200] * self.policy.ec_ndata
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
get_resp = [503] + [200] * self.policy.ec_ndata
with set_http_connect(*get_resp):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_with_body(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
# turn a real body into fragments
segment_size = self.policy.ec_segment_size
real_body = ('asdf' * segment_size)[:-10]
# split it up into chunks
chunks = [real_body[x:x + segment_size]
for x in range(0, len(real_body), segment_size)]
fragment_payloads = []
for chunk in chunks:
fragments = self.policy.pyeclib_driver.encode(chunk)
if not fragments:
break
fragment_payloads.append(
fragments * self.policy.ec_duplication_factor)
# sanity
sanity_body = ''
for fragment_payload in fragment_payloads:
sanity_body += self.policy.pyeclib_driver.decode(
fragment_payload)
self.assertEqual(len(real_body), len(sanity_body))
self.assertEqual(real_body, sanity_body)
# list(zip(...)) for py3 compatibility (zip is lazy there)
node_fragments = list(zip(*fragment_payloads))
self.assertEqual(len(node_fragments), self.replicas()) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))}
responses = [(200, ''.join(node_fragments[i]), headers)
for i in range(POLICIES.default.ec_ndata)]
status_codes, body_iter, headers = zip(*responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(real_body), len(resp.body))
self.assertEqual(real_body, resp.body)
def test_PUT_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_with_body_and_bad_etag(self):
segment_size = self.policy.ec_segment_size
test_body = ('asdf' * segment_size)[:-10]
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
conns = []
def capture_expect(conn):
# stash the backend connection so we can verify that it is closed
# (no data will be sent)
conns.append(conn)
# send a bad etag in the request headers
headers = {'Etag': 'bad etag'}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', headers=headers, body=test_body)
with set_http_connect(*codes, expect_headers=expect_headers,
give_expect=capture_expect):
resp = req.get_response(self.app)
self.assertEqual(422, resp.status_int)
self.assertEqual(self.replicas(), len(conns))
for conn in conns:
self.assertTrue(conn.closed)
# make the footers callback send a bad Etag footer
footers_callback = make_footers_callback('not the test body')
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', environ=env, body=test_body)
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(422, resp.status_int)
def test_txn_id_logging_ECPUT(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
self.app.logger.txn_id = req.environ['swift.trans_id'] = 'test-txn-id'
codes = [(100, Timeout(), 503, 503)] * self.replicas()
stdout = BytesIO()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers), \
mock.patch('sys.stdout', stdout):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
for line in stdout.getvalue().splitlines():
self.assertIn('test-txn-id', line)
self.assertIn('Trying to get ',
stdout.getvalue())
def test_PUT_with_explicit_commit_status(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [(100, 100, 201)] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [503] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_PUT_mostly_success(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * self.quorum()
codes += [503] * (self.replicas() - len(codes))
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_error_commit(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [(100, 503, Exception('not used'))] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_PUT_mostly_success_commit(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * self.quorum()
codes += [(100, 503, Exception('not used'))] * (
self.replicas() - len(codes))
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_mostly_error_commit(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [(100, 503, Exception('not used'))] * self.quorum()
if isinstance(self.policy, ECStoragePolicy):
codes *= self.policy.ec_duplication_factor
codes += [201] * (self.replicas() - len(codes))
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_PUT_commit_timeout(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.replicas() - 1)
codes.append((100, Timeout(), Exception('not used')))
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_commit_exception(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.replicas() - 1)
codes.append((100, Exception('kaboom!'), Exception('not used')))
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_ec_error_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise IOError('error message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_ec_chunkreadtimeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.ChunkReadTimeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 408)
def test_PUT_ec_timeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.Timeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_ec_exception_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise Exception('exception message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
def test_PUT_with_body(self):
segment_size = self.policy.ec_segment_size
test_body = ('asdf' * segment_size)[:-10]
# make the footers callback not include Etag footer so that we can
# verify that the correct EC-calculated Etag is included in footers
# sent to backend
footers_callback = make_footers_callback()
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', environ=env)
etag = md5(test_body).hexdigest()
size = len(test_body)
req.body = test_body
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
resp_headers = {
'Some-Other-Header': 'Four',
'Etag': 'ignored',
}
put_requests = defaultdict(lambda: {'boundary': None, 'chunks': []})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['boundary'] = headers[
'X-Backend-Obj-Multipart-Mime-Boundary']
put_requests[conn_id]['backend-content-length'] = headers[
'X-Backend-Obj-Content-Length']
put_requests[conn_id]['x-timestamp'] = headers[
'X-Timestamp']
with set_http_connect(*codes, expect_headers=expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
'Etag': etag,
})
frag_archives = []
for connection_id, info in put_requests.items():
body = unchunk_body(''.join(info['chunks']))
self.assertIsNotNone(info['boundary'],
"didn't get boundary for conn %r" % (
connection_id,))
self.assertTrue(size > int(info['backend-content-length']) > 0,
"invalid backend-content-length for conn %r" % (
connection_id,))
# email.parser.FeedParser doesn't know how to take a multipart
# message and boundary together and parse it; it only knows how
# to take a string, parse the headers, and figure out the
# boundary on its own.
parser = email.parser.FeedParser()
parser.feed(
"Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n" %
info['boundary'])
parser.feed(body)
message = parser.close()
self.assertTrue(message.is_multipart()) # sanity check
mime_parts = message.get_payload()
self.assertEqual(len(mime_parts), 3)
obj_part, footer_part, commit_part = mime_parts
# attach the body to frag_archives list
self.assertEqual(obj_part['X-Document'], 'object body')
frag_archives.append(obj_part.get_payload())
# assert length was correct for this connection
self.assertEqual(int(info['backend-content-length']),
len(frag_archives[-1]))
# assert length was the same for all connections
self.assertEqual(int(info['backend-content-length']),
len(frag_archives[0]))
# validate some footer metadata
self.assertEqual(footer_part['X-Document'], 'object metadata')
footer_metadata = json.loads(footer_part.get_payload())
self.assertTrue(footer_metadata)
expected = {}
# update expected with footers from the callback...
footers_callback(expected)
expected.update({
'X-Object-Sysmeta-Ec-Content-Length': str(size),
'X-Backend-Container-Update-Override-Size': str(size),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Container-Update-Override-Etag': etag,
'X-Object-Sysmeta-Ec-Segment-Size': str(segment_size),
'Etag': md5(obj_part.get_payload()).hexdigest()})
for header, value in expected.items():
self.assertEqual(footer_metadata[header], value)
# sanity on commit message
self.assertEqual(commit_part['X-Document'], 'put commit')
self.assertEqual(len(frag_archives), self.replicas())
fragment_size = self.policy.fragment_size
node_payloads = []
for fa in frag_archives:
payload = [fa[x:x + fragment_size]
for x in range(0, len(fa), fragment_size)]
node_payloads.append(payload)
fragment_payloads = zip(*node_payloads)
expected_body = ''
for fragment_payload in fragment_payloads:
self.assertEqual(len(fragment_payload), self.replicas())
if True:
fragment_payload = list(fragment_payload)
expected_body += self.policy.pyeclib_driver.decode(
fragment_payload)
self.assertEqual(len(test_body), len(expected_body))
self.assertEqual(test_body, expected_body)
def test_PUT_with_footers(self):
# verify footers supplied by a footers callback being added to
# trailing metadata
segment_size = self.policy.ec_segment_size
test_body = ('asdf' * segment_size)[:-10]
etag = md5(test_body).hexdigest()
size = len(test_body)
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
resp_headers = {
'Some-Other-Header': 'Four',
'Etag': 'ignored',
}
def do_test(footers_to_add, expect_added):
put_requests = defaultdict(
lambda: {'boundary': None, 'chunks': []})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['boundary'] = headers[
'X-Backend-Obj-Multipart-Mime-Boundary']
put_requests[conn_id]['x-timestamp'] = headers[
'X-Timestamp']
def footers_callback(footers):
footers.update(footers_to_add)
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', environ=env, body=test_body)
with set_http_connect(*codes, expect_headers=expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
'Etag': etag,
})
for connection_id, info in put_requests.items():
body = unchunk_body(''.join(info['chunks']))
# email.parser.FeedParser doesn't know how to take a multipart
# message and boundary together and parse it; it only knows how
# to take a string, parse the headers, and figure out the
# boundary on its own.
parser = email.parser.FeedParser()
parser.feed(
"Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n"
% info['boundary'])
parser.feed(body)
message = parser.close()
self.assertTrue(message.is_multipart()) # sanity check
mime_parts = message.get_payload()
self.assertEqual(len(mime_parts), 3)
obj_part, footer_part, commit_part = mime_parts
# validate EC footer metadata - should always be present
self.assertEqual(footer_part['X-Document'], 'object metadata')
footer_metadata = json.loads(footer_part.get_payload())
self.assertIsNotNone(
footer_metadata.pop('X-Object-Sysmeta-Ec-Frag-Index'))
expected = {
'X-Object-Sysmeta-Ec-Scheme':
self.policy.ec_scheme_description,
'X-Object-Sysmeta-Ec-Content-Length': str(size),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Segment-Size': str(segment_size),
'Etag': md5(obj_part.get_payload()).hexdigest()}
expected.update(expect_added)
for header, value in expected.items():
self.assertIn(header, footer_metadata)
self.assertEqual(value, footer_metadata[header])
footer_metadata.pop(header)
self.assertFalse(footer_metadata)
# sanity check - middleware sets no footer, expect EC overrides
footers_to_add = {}
expect_added = {
'X-Backend-Container-Update-Override-Size': str(size),
'X-Backend-Container-Update-Override-Etag': etag}
do_test(footers_to_add, expect_added)
# middleware cannot overwrite any EC sysmeta
footers_to_add = {
'X-Object-Sysmeta-Ec-Content-Length': str(size + 1),
'X-Object-Sysmeta-Ec-Etag': 'other etag',
'X-Object-Sysmeta-Ec-Segment-Size': str(segment_size + 1),
'X-Object-Sysmeta-Ec-Unused-But-Reserved': 'ignored'}
do_test(footers_to_add, expect_added)
# middleware can add x-object-sysmeta- headers including
# x-object-sysmeta-container-update-override headers
footers_to_add = {
'X-Object-Sysmeta-Foo': 'bar',
'X-Object-Sysmeta-Container-Update-Override-Size':
str(size + 1),
'X-Object-Sysmeta-Container-Update-Override-Etag': 'other etag',
'X-Object-Sysmeta-Container-Update-Override-Ping': 'pong'
}
expect_added.update(footers_to_add)
do_test(footers_to_add, expect_added)
# middleware can also overwrite x-backend-container-update-override
# headers
override_footers = {
'X-Backend-Container-Update-Override-Wham': 'bam',
'X-Backend-Container-Update-Override-Size': str(size + 2),
'X-Backend-Container-Update-Override-Etag': 'another etag'}
footers_to_add.update(override_footers)
expect_added.update(override_footers)
do_test(footers_to_add, expect_added)
def test_PUT_old_obj_server(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
responses = [
# one server will response 100-continue but not include the
# needful expect headers and the connection will be dropped
((100, Exception('not used')), {}),
] + [
# and pleanty of successful responses too
(201, {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes',
}),
] * self.replicas()
random.shuffle(responses)
if responses[-1][0] != 201:
# whoops, stupid random
responses = responses[1:] + [responses[0]]
codes, expect_headers = zip(*responses)
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_GET_with_frags_swapped_around(self):
segment_size = self.policy.ec_segment_size
test_data = ('test' * segment_size)[:-657]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
_part, primary_nodes = self.obj_ring.get_nodes('a', 'c', 'o')
node_key = lambda n: (n['ip'], n['port'])
backend_index = self.policy.get_backend_index
ts = self._ts_iter.next()
response_map = {
node_key(n): StubResponse(
200, ec_archive_bodies[backend_index(i)], {
'X-Object-Sysmeta-Ec-Content-Length': len(test_data),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Frag-Index': backend_index(i),
'X-Timestamp': ts.normal,
'X-Backend-Timestamp': ts.internal
}) for i, n in enumerate(primary_nodes)
}
# swap a parity response into a data node
data_node = random.choice(primary_nodes[:self.policy.ec_ndata])
parity_node = random.choice(
primary_nodes[
self.policy.ec_ndata:self.policy.ec_n_unique_fragments])
(response_map[node_key(data_node)],
response_map[node_key(parity_node)]) = \
(response_map[node_key(parity_node)],
response_map[node_key(data_node)])
def get_response(req):
req_key = (req['ip'], req['port'])
return response_map.pop(req_key)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(log), self.policy.ec_ndata)
self.assertEqual(len(response_map),
len(primary_nodes) - self.policy.ec_ndata)
def test_GET_with_no_success(self):
node_frags = [[]] * 28 # no frags on any node
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.assertEqual(len(log), 2 * self.replicas())
def test_GET_with_only_handoffs(self):
obj1 = self._make_ec_object_stub()
node_frags = [[]] * self.replicas() # all primaries missing
node_frags = node_frags + [ # handoffs
{'obj': obj1, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj1, 'frag': 7},
{'obj': obj1, 'frag': 8},
{'obj': obj1, 'frag': 9},
{'obj': obj1, 'frag': 10}, # parity
{'obj': obj1, 'frag': 11}, # parity
{'obj': obj1, 'frag': 12}, # parity
{'obj': obj1, 'frag': 13}, # parity
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
collected_responses = defaultdict(list)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].append(index)
# GETS would be required to all primaries and then ndata handoffs
self.assertEqual(len(log), self.replicas() + self.policy.ec_ndata)
self.assertEqual(2, len(collected_responses))
# 404s
self.assertEqual(self.replicas(), len(collected_responses[None]))
self.assertEqual(self.policy.ec_ndata,
len(collected_responses[obj1['etag']]))
def test_GET_with_single_missed_overwrite_does_not_need_handoff(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
node_frags = [
{'obj': obj2, 'frag': 0},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2}, # missed over write
{'obj': obj2, 'frag': 3},
{'obj': obj2, 'frag': 4},
{'obj': obj2, 'frag': 5},
{'obj': obj2, 'frag': 6},
{'obj': obj2, 'frag': 7},
{'obj': obj2, 'frag': 8},
{'obj': obj2, 'frag': 9},
{'obj': obj2, 'frag': 10}, # parity
{'obj': obj2, 'frag': 11}, # parity
{'obj': obj2, 'frag': 12}, # parity
{'obj': obj2, 'frag': 13}, # parity
# {'obj': obj2, 'frag': 2}, # handoff (not used in this test)
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# because the primary nodes are shuffled, it's possible the proxy
# didn't even notice the missed overwrite frag - but it might have
self.assertLessEqual(len(log), self.policy.ec_ndata + 1)
self.assertLessEqual(len(collected_responses), 2)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_many_missed_overwrite_will_need_handoff(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
node_frags = [
{'obj': obj2, 'frag': 0},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2}, # missed
{'obj': obj2, 'frag': 3},
{'obj': obj2, 'frag': 4},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6}, # missed
{'obj': obj2, 'frag': 7},
{'obj': obj2, 'frag': 8},
{'obj': obj1, 'frag': 9}, # missed
{'obj': obj1, 'frag': 10}, # missed
{'obj': obj1, 'frag': 11}, # missed
{'obj': obj2, 'frag': 12},
{'obj': obj2, 'frag': 13},
{'obj': obj2, 'frag': 6}, # handoff
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# there's not enough of the obj2 etag on the primaries, we would
# have collected responses for both etags, and would have made
# one more request to the handoff node
self.assertEqual(len(log), self.replicas() + 1)
self.assertEqual(len(collected_responses), 2)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_succeed(self):
obj1 = self._make_ec_object_stub(pattern='obj1', timestamp=self.ts())
obj2 = self._make_ec_object_stub(pattern='obj2', timestamp=self.ts())
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
[],
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
[],
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
[],
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
[],
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
[],
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
[],
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
[],
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
[],
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
[],
{'obj': obj2, 'frag': 9},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# we go exactly as long as we have to, finding two different
# etags and some 404's (i.e. collected_responses[None])
self.assertEqual(len(log), len(node_frags))
self.assertEqual(len(collected_responses), 3)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_stop(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
[],
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
[],
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
[],
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
[],
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
[],
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
[],
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
[],
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
[],
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
[],
# handoffs are iter'd in order so proxy will see 404 from this
# final handoff
[],
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# default node_iter will exhaust at 2 * replicas
self.assertEqual(len(log), 2 * self.replicas())
self.assertEqual(len(collected_responses), 3)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_duplicate_and_hidden_frag_indexes(self):
obj1 = self._make_ec_object_stub()
# proxy should ignore duplicated frag indexes and continue search for
# a set of unique indexes, finding last one on a handoff
node_frags = [
[{'obj': obj1, 'frag': 0}, {'obj': obj1, 'frag': 5}],
{'obj': obj1, 'frag': 0}, # duplicate frag
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 1}, # duplicate frag
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 2}, # duplicate frag
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 3}, # duplicate frag
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 4}, # duplicate frag
{'obj': obj1, 'frag': 10},
{'obj': obj1, 'frag': 11},
{'obj': obj1, 'frag': 12},
{'obj': obj1, 'frag': 13},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
# Expect a maximum of one request to each primary plus one extra
# request to node 1. Actual value could be less if the extra request
# occurs and quorum is reached before requests to nodes with a
# duplicate frag.
self.assertLessEqual(len(log), self.replicas() + 1)
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), self.policy.ec_ndata)
def test_GET_with_missing_and_mixed_frags_may_503(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
# we get a 503 when all the handoffs return 200
node_frags = [[]] * self.replicas() # primaries have no frags
node_frags = node_frags + [ # handoffs all have frags
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
# never get a quorum so all nodes are searched
self.assertEqual(len(log), 2 * self.replicas())
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), 7)
def test_GET_with_mixed_frags_and_no_quorum_will_503(self):
# all nodes have a frag but there is no one set that reaches quorum,
# which means there is no backend 404 response, but proxy should still
# return 404 rather than 503
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
obj3 = self._make_ec_object_stub(pattern='obj3')
obj4 = self._make_ec_object_stub(pattern='obj4')
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj3, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj3, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj3, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj3, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj3, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj3, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
{'obj': obj3, 'frag': 6},
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
{'obj': obj3, 'frag': 7},
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
{'obj': obj3, 'frag': 8},
{'obj': obj4, 'frag': 8},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
collected_etags = set()
collected_status = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag)
collected_status.add(conn.resp.status)
# default node_iter will exhaust at 2 * replicas
self.assertEqual(len(log), 2 * self.replicas())
self.assertEqual(
{obj1['etag'], obj2['etag'], obj3['etag'], obj4['etag']},
collected_etags)
self.assertEqual({200}, collected_status)
def test_GET_with_quorum_durable_files(self):
# verify that only (ec_nparity + 1) nodes need to be durable for a GET
# to be completed with ec_ndata requests.
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': True}, # durable
{'obj': obj1, 'frag': 1, 'durable': True}, # durable
{'obj': obj1, 'frag': 2, 'durable': True}, # durable
{'obj': obj1, 'frag': 3, 'durable': True}, # durable
{'obj': obj1, 'frag': 4, 'durable': True}, # durable
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
] # handoffs not used in this scenario
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
self.assertEqual(self.policy.ec_ndata, len(log))
collected_durables = []
for conn in log:
if (conn.resp.headers.get('X-Backend-Durable-Timestamp')
== conn.resp.headers.get('X-Backend-Data-Timestamp')):
collected_durables.append(conn)
# because nodes are shuffled we can't be sure how many durables are
# returned but it must be at least 1 and cannot exceed 5
self.assertLessEqual(len(collected_durables), 5)
self.assertGreaterEqual(len(collected_durables), 1)
def test_GET_with_single_durable_file(self):
# verify that a single durable is sufficient for a GET
# to be completed with ec_ndata requests.
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': True}, # durable
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
] # handoffs not used in this scenario
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
collected_durables = []
for conn in log:
if (conn.resp.headers.get('X-Backend-Durable-Timestamp')
== conn.resp.headers.get('X-Backend-Data-Timestamp')):
collected_durables.append(conn)
# because nodes are shuffled we can't be sure how many non-durables
# are returned before the durable, but we do expect a single durable
self.assertEqual(1, len(collected_durables))
def test_GET_with_no_durable_files(self):
# verify that at least one durable is necessary for a successful GET
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
] + [[]] * self.replicas() # handoffs
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# all 28 nodes tried with an optimistic get, none are durable and none
# report having a durable timestamp
self.assertEqual(28, len(log))
def test_GET_with_missing_durable_files_and_mixed_etags(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
# non-quorate durables for another object won't stop us finding the
# quorate object
node_frags = [
# ec_ndata - 1 frags of obj2 are available and durable
{'obj': obj2, 'frag': 0, 'durable': True},
{'obj': obj2, 'frag': 1, 'durable': True},
{'obj': obj2, 'frag': 2, 'durable': True},
{'obj': obj2, 'frag': 3, 'durable': True},
{'obj': obj2, 'frag': 4, 'durable': True},
{'obj': obj2, 'frag': 5, 'durable': True},
{'obj': obj2, 'frag': 6, 'durable': True},
{'obj': obj2, 'frag': 7, 'durable': True},
{'obj': obj2, 'frag': 8, 'durable': True},
# ec_ndata frags of obj1 are available and one is durable
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': True},
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
# Quorum of non-durables for a different object won't
# prevent us hunting down the durable object
node_frags = [
# primaries
{'obj': obj2, 'frag': 0, 'durable': False},
{'obj': obj2, 'frag': 1, 'durable': False},
{'obj': obj2, 'frag': 2, 'durable': False},
{'obj': obj2, 'frag': 3, 'durable': False},
{'obj': obj2, 'frag': 4, 'durable': False},
{'obj': obj2, 'frag': 5, 'durable': False},
{'obj': obj2, 'frag': 6, 'durable': False},
{'obj': obj2, 'frag': 7, 'durable': False},
{'obj': obj2, 'frag': 8, 'durable': False},
{'obj': obj2, 'frag': 9, 'durable': False},
{'obj': obj2, 'frag': 10, 'durable': False},
{'obj': obj2, 'frag': 11, 'durable': False},
{'obj': obj2, 'frag': 12, 'durable': False},
{'obj': obj2, 'frag': 13, 'durable': False},
# handoffs
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': True}, # parity
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
def test_GET_with_missing_durables_and_older_durables(self):
# scenario: non-durable frags of newer obj1 obscure all durable frags
# of older obj2, so first 14 requests result in a non-durable set.
# At that point (or before) the proxy knows that a durable set of
# frags for obj2 exists so will fetch them, requiring another 10
# directed requests.
obj2 = self._make_ec_object_stub(pattern='obj2',
timestamp=self._ts_iter.next())
obj1 = self._make_ec_object_stub(pattern='obj1',
timestamp=self._ts_iter.next())
node_frags = [
[{'obj': obj1, 'frag': 0, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj2, 'frag': 1, 'durable': True}],
[{'obj': obj1, 'frag': 2, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj2, 'frag': 3, 'durable': True}],
[{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj2, 'frag': 4, 'durable': True}],
[{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj2, 'frag': 5, 'durable': True}],
[{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj2, 'frag': 6, 'durable': True}],
[{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj2, 'frag': 7, 'durable': True}],
[{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj2, 'frag': 8, 'durable': True}],
[{'obj': obj1, 'frag': 9, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 10, 'durable': False},
{'obj': obj2, 'frag': 10, 'durable': True}],
[{'obj': obj1, 'frag': 11, 'durable': False},
{'obj': obj2, 'frag': 11, 'durable': True}],
[{'obj': obj1, 'frag': 12, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 13, 'durable': False},
{'obj': obj2, 'frag': 13, 'durable': True}],
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
# max: proxy will GET all non-durable obj1 frags and then 10 obj frags
self.assertLessEqual(len(log), self.replicas() + self.policy.ec_ndata)
# min: proxy will GET 10 non-durable obj1 frags and then 10 obj frags
self.assertGreaterEqual(len(log), 2 * self.policy.ec_ndata)
# scenario: obj3 has 14 frags but only 2 are durable and these are
# obscured by two non-durable frags of obj1. There is also a single
# non-durable frag of obj2. The proxy will need to do at least 10
# GETs to see all the obj3 frags plus 1 more to GET a durable frag.
# The proxy may also do one more GET if the obj2 frag is found.
# i.e. 10 + 1 durable for obj3, 2 for obj1 and 1 more if obj2 found
obj2 = self._make_ec_object_stub(pattern='obj2',
timestamp=self._ts_iter.next())
obj3 = self._make_ec_object_stub(pattern='obj3',
timestamp=self._ts_iter.next())
obj1 = self._make_ec_object_stub(pattern='obj1',
timestamp=self._ts_iter.next())
node_frags = [
[{'obj': obj1, 'frag': 0, 'durable': False}, # obj1 frag
{'obj': obj3, 'frag': 0, 'durable': True}],
[{'obj': obj1, 'frag': 1, 'durable': False}, # obj1 frag
{'obj': obj3, 'frag': 1, 'durable': True}],
[{'obj': obj2, 'frag': 2, 'durable': False}, # obj2 frag
{'obj': obj3, 'frag': 2, 'durable': False}],
[{'obj': obj3, 'frag': 3, 'durable': False}],
[{'obj': obj3, 'frag': 4, 'durable': False}],
[{'obj': obj3, 'frag': 5, 'durable': False}],
[{'obj': obj3, 'frag': 6, 'durable': False}],
[{'obj': obj3, 'frag': 7, 'durable': False}],
[{'obj': obj3, 'frag': 8, 'durable': False}],
[{'obj': obj3, 'frag': 9, 'durable': False}],
[{'obj': obj3, 'frag': 10, 'durable': False}],
[{'obj': obj3, 'frag': 11, 'durable': False}],
[{'obj': obj3, 'frag': 12, 'durable': False}],
[{'obj': obj3, 'frag': 13, 'durable': False}],
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj3['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj3['etag'])
self.assertGreaterEqual(len(log), self.policy.ec_ndata + 1)
self.assertLessEqual(len(log), self.policy.ec_ndata + 4)
def test_GET_with_missing_durables_and_older_non_durables(self):
# scenario: non-durable frags of newer obj1 obscure all frags
# of older obj2, so first 28 requests result in a non-durable set.
# There are only 10 frags for obj2 and one is not durable.
obj2 = self._make_ec_object_stub(pattern='obj2',
timestamp=self._ts_iter.next())
obj1 = self._make_ec_object_stub(pattern='obj1',
timestamp=self._ts_iter.next())
node_frags = [
[{'obj': obj1, 'frag': 0, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj2, 'frag': 1, 'durable': False}], # obj2 non-durable
[{'obj': obj1, 'frag': 2, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj2, 'frag': 3, 'durable': True}],
[{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj2, 'frag': 4, 'durable': True}],
[{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj2, 'frag': 5, 'durable': True}],
[{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj2, 'frag': 6, 'durable': True}],
[{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj2, 'frag': 7, 'durable': True}],
[{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj2, 'frag': 8, 'durable': True}],
[{'obj': obj1, 'frag': 9, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 10, 'durable': False},
{'obj': obj2, 'frag': 10, 'durable': True}],
[{'obj': obj1, 'frag': 11, 'durable': False},
{'obj': obj2, 'frag': 11, 'durable': True}],
[{'obj': obj1, 'frag': 12, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 13, 'durable': False},
{'obj': obj2, 'frag': 13, 'durable': True}],
[], # 1 empty primary
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
# max: proxy will GET all non-durable obj1 frags and then 10 obj2 frags
self.assertLessEqual(len(log), self.replicas() + self.policy.ec_ndata)
# min: proxy will GET 10 non-durable obj1 frags and then 10 obj2 frags
self.assertGreaterEqual(len(log), 2 * self.policy.ec_ndata)
def test_GET_with_mixed_etags_at_same_timestamp(self):
# this scenario should never occur but if there are somehow
# fragments for different content at the same timestamp then the
# object controller should handle it gracefully
ts = self.ts() # force equal timestamps for two objects
obj1 = self._make_ec_object_stub(timestamp=ts, pattern='obj1')
obj2 = self._make_ec_object_stub(timestamp=ts, pattern='obj2')
self.assertNotEqual(obj1['etag'], obj2['etag']) # sanity
node_frags = [
# 7 frags of obj2 are available and durable
{'obj': obj2, 'frag': 0, 'durable': True},
{'obj': obj2, 'frag': 1, 'durable': True},
{'obj': obj2, 'frag': 2, 'durable': True},
{'obj': obj2, 'frag': 3, 'durable': True},
{'obj': obj2, 'frag': 4, 'durable': True},
{'obj': obj2, 'frag': 5, 'durable': True},
{'obj': obj2, 'frag': 6, 'durable': True},
# 7 frags of obj1 are available and durable
{'obj': obj1, 'frag': 7, 'durable': True},
{'obj': obj1, 'frag': 8, 'durable': True},
{'obj': obj1, 'frag': 9, 'durable': True},
{'obj': obj1, 'frag': 10, 'durable': True},
{'obj': obj1, 'frag': 11, 'durable': True},
{'obj': obj1, 'frag': 12, 'durable': True},
{'obj': obj1, 'frag': 13, 'durable': True},
] + [[]] * self.replicas() # handoffs
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
# read body to provoke any EC decode errors
self.assertFalse(resp.body)
self.assertEqual(resp.status_int, 404)
self.assertEqual(len(log), self.replicas() * 2)
collected_etags = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag) # will be None from handoffs
self.assertEqual({obj1['etag'], obj2['etag'], None}, collected_etags)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertEqual(log_lines,
['Problem with fragment response: ETag mismatch'] * 7)
def test_GET_mixed_success_with_range(self):
fragment_size = self.policy.fragment_size
ec_stub = self._make_ec_object_stub()
frag_archives = ec_stub['frags']
frag_archive_size = len(ec_stub['frags'][0])
headers = {
'Content-Type': 'text/plain',
'Content-Length': fragment_size,
'Content-Range': 'bytes 0-%s/%s' % (fragment_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(ec_stub['body']),
'X-Object-Sysmeta-Ec-Etag': ec_stub['etag'],
'X-Timestamp': Timestamp(self._ts_iter.next()).normal,
}
responses = [
StubResponse(206, frag_archives[0][:fragment_size], headers, 0),
StubResponse(206, frag_archives[1][:fragment_size], headers, 1),
StubResponse(206, frag_archives[2][:fragment_size], headers, 2),
StubResponse(206, frag_archives[3][:fragment_size], headers, 3),
StubResponse(206, frag_archives[4][:fragment_size], headers, 4),
# data nodes with old frag
StubResponse(416, frag_index=5),
StubResponse(416, frag_index=6),
StubResponse(206, frag_archives[7][:fragment_size], headers, 7),
StubResponse(206, frag_archives[8][:fragment_size], headers, 8),
StubResponse(206, frag_archives[9][:fragment_size], headers, 9),
# hopefully we ask for two more
StubResponse(206, frag_archives[10][:fragment_size], headers, 10),
StubResponse(206, frag_archives[11][:fragment_size], headers, 11),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={'Range': 'bytes=0-3'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, 'test')
self.assertEqual(len(log), self.policy.ec_ndata + 2)
# verify that even when last responses to be collected are 416's
# the shortfall of 2xx responses still triggers extra spawned requests
responses = [
StubResponse(206, frag_archives[0][:fragment_size], headers, 0),
StubResponse(206, frag_archives[1][:fragment_size], headers, 1),
StubResponse(206, frag_archives[2][:fragment_size], headers, 2),
StubResponse(206, frag_archives[3][:fragment_size], headers, 3),
StubResponse(206, frag_archives[4][:fragment_size], headers, 4),
StubResponse(206, frag_archives[7][:fragment_size], headers, 7),
StubResponse(206, frag_archives[8][:fragment_size], headers, 8),
StubResponse(206, frag_archives[9][:fragment_size], headers, 9),
StubResponse(206, frag_archives[10][:fragment_size], headers, 10),
# data nodes with old frag
StubResponse(416, frag_index=5),
# hopefully we ask for one more
StubResponse(416, frag_index=6),
# and hopefully we ask for another
StubResponse(206, frag_archives[11][:fragment_size], headers, 11),
]
req = swob.Request.blank('/v1/a/c/o', headers={'Range': 'bytes=0-3'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, 'test')
self.assertEqual(len(log), self.policy.ec_ndata + 2)
def test_GET_with_range_unsatisfiable_mixed_success(self):
responses = [
StubResponse(416, frag_index=0),
StubResponse(416, frag_index=1),
StubResponse(416, frag_index=2),
StubResponse(416, frag_index=3),
StubResponse(416, frag_index=4),
StubResponse(416, frag_index=5),
StubResponse(416, frag_index=6),
# sneak in bogus extra responses
StubResponse(404),
StubResponse(206, frag_index=8),
# and then just "enough" more 416's
StubResponse(416, frag_index=9),
StubResponse(416, frag_index=10),
StubResponse(416, frag_index=11),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=%s-' % 100000000000000})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 416)
# ec_ndata responses that must agree, plus the bogus extras
self.assertEqual(len(log), self.policy.ec_ndata + 2)
def test_GET_with_missing_and_range_unsatisifiable(self):
responses = [ # not quite ec_ndata frags on primaries
StubResponse(416, frag_index=0),
StubResponse(416, frag_index=1),
StubResponse(416, frag_index=2),
StubResponse(416, frag_index=3),
StubResponse(416, frag_index=4),
StubResponse(416, frag_index=5),
StubResponse(416, frag_index=6),
StubResponse(416, frag_index=7),
StubResponse(416, frag_index=8),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=%s-' % 100000000000000})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
# TODO: does 416 make sense without a quorum, or should this be a 404?
# a non-range GET of same object would return 404
self.assertEqual(resp.status_int, 416)
self.assertEqual(len(log), 2 * self.replicas())
def test_GET_with_success_and_507_will_503(self):
responses = [ # only 9 good nodes
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
]
def get_response(req):
# bad disk on all other nodes
return responses.pop(0) if responses else StubResponse(507)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
self.assertEqual(len(log), 2 * self.replicas())
def test_GET_with_success_and_404_will_404(self):
responses = [ # only 9 good nodes
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
]
def get_response(req):
# no frags on other nodes
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.assertEqual(len(log), 2 * self.replicas())
def test_GET_mixed_ranged_responses_success(self):
segment_size = self.policy.ec_segment_size
frag_size = self.policy.fragment_size
new_data = ('test' * segment_size)[:-492]
new_etag = md5(new_data).hexdigest()
new_archives = self._make_ec_archive_bodies(new_data)
old_data = ('junk' * segment_size)[:-492]
old_etag = md5(old_data).hexdigest()
old_archives = self._make_ec_archive_bodies(old_data)
frag_archive_size = len(new_archives[0])
# here we deliberately omit X-Backend-Data-Timestamp to check that
# proxy will tolerate responses from object server that have not been
# upgraded to send that header
old_headers = {
'Content-Type': 'text/plain',
'Content-Length': frag_size,
'Content-Range': 'bytes 0-%s/%s' % (frag_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(old_data),
'X-Object-Sysmeta-Ec-Etag': old_etag,
'X-Backend-Timestamp': Timestamp(self._ts_iter.next()).internal
}
new_headers = {
'Content-Type': 'text/plain',
'Content-Length': frag_size,
'Content-Range': 'bytes 0-%s/%s' % (frag_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(new_data),
'X-Object-Sysmeta-Ec-Etag': new_etag,
'X-Backend-Timestamp': Timestamp(self._ts_iter.next()).internal
}
# 7 primaries with stale frags, 3 handoffs failed to get new frags
responses = [
StubResponse(206, old_archives[0][:frag_size], old_headers, 0),
StubResponse(206, new_archives[1][:frag_size], new_headers, 1),
StubResponse(206, old_archives[2][:frag_size], old_headers, 2),
StubResponse(206, new_archives[3][:frag_size], new_headers, 3),
StubResponse(206, old_archives[4][:frag_size], old_headers, 4),
StubResponse(206, new_archives[5][:frag_size], new_headers, 5),
StubResponse(206, old_archives[6][:frag_size], old_headers, 6),
StubResponse(206, new_archives[7][:frag_size], new_headers, 7),
StubResponse(206, old_archives[8][:frag_size], old_headers, 8),
StubResponse(206, new_archives[9][:frag_size], new_headers, 9),
StubResponse(206, old_archives[10][:frag_size], old_headers, 10),
StubResponse(206, new_archives[11][:frag_size], new_headers, 11),
StubResponse(206, old_archives[12][:frag_size], old_headers, 12),
StubResponse(206, new_archives[13][:frag_size], new_headers, 13),
StubResponse(206, new_archives[0][:frag_size], new_headers, 0),
StubResponse(404),
StubResponse(404),
StubResponse(206, new_archives[6][:frag_size], new_headers, 6),
StubResponse(404),
StubResponse(206, new_archives[10][:frag_size], new_headers, 10),
StubResponse(206, new_archives[12][:frag_size], new_headers, 12),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, new_data[:segment_size])
self.assertEqual(len(log), self.policy.ec_ndata + 10)
def test_GET_mismatched_fragment_archives(self):
segment_size = self.policy.ec_segment_size
test_data1 = ('test' * segment_size)[:-333]
# N.B. the object data *length* here is different
test_data2 = ('blah1' * segment_size)[:-333]
etag1 = md5(test_data1).hexdigest()
etag2 = md5(test_data2).hexdigest()
ec_archive_bodies1 = self._make_ec_archive_bodies(test_data1)
ec_archive_bodies2 = self._make_ec_archive_bodies(test_data2)
headers1 = {'X-Object-Sysmeta-Ec-Etag': etag1,
'X-Object-Sysmeta-Ec-Content-Length': '333'}
# here we're going to *lie* and say the etag here matches
headers2 = {'X-Object-Sysmeta-Ec-Etag': etag1,
'X-Object-Sysmeta-Ec-Content-Length': '333'}
responses1 = [(200, body, self._add_frag_index(fi, headers1))
for fi, body in enumerate(ec_archive_bodies1)]
responses2 = [(200, body, self._add_frag_index(fi, headers2))
for fi, body in enumerate(ec_archive_bodies2)]
req = swob.Request.blank('/v1/a/c/o')
# sanity check responses1
responses = responses1[:self.policy.ec_ndata]
status_codes, body_iter, headers = zip(*responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(md5(resp.body).hexdigest(), etag1)
# sanity check responses2
responses = responses2[:self.policy.ec_ndata]
status_codes, body_iter, headers = zip(*responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(md5(resp.body).hexdigest(), etag2)
# now mix the responses a bit
mix_index = random.randint(0, self.policy.ec_ndata - 1)
mixed_responses = responses1[:self.policy.ec_ndata]
mixed_responses[mix_index] = responses2[mix_index]
status_codes, body_iter, headers = zip(*mixed_responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
try:
resp.body
except ECDriverError:
resp._app_iter.close()
else:
self.fail('invalid ec fragment response body did not blow up!')
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
msg = error_lines[0]
self.assertIn('Error decoding fragments', msg)
self.assertIn('/a/c/o', msg)
log_msg_args, log_msg_kwargs = self.logger.log_dict['error'][0]
self.assertEqual(log_msg_kwargs['exc_info'][0], ECDriverError)
def test_GET_read_timeout(self):
segment_size = self.policy.ec_segment_size
test_data = ('test' * segment_size)[:-333]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
headers = {'X-Object-Sysmeta-Ec-Etag': etag}
self.app.recoverable_node_timeout = 0.01
responses = [
(200, SlowBody(body, 0.1), self._add_frag_index(i, headers))
for i, body in enumerate(ec_archive_bodies)
] * self.policy.ec_duplication_factor
req = swob.Request.blank('/v1/a/c/o')
status_codes, body_iter, headers = zip(*responses + [
(404, '', {}) for i in range(
self.policy.object_ring.max_more_nodes)])
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
# do this inside the fake http context manager, it'll try to
# resume but won't be able to give us all the right bytes
self.assertNotEqual(md5(resp.body).hexdigest(), etag)
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(self.replicas(), len(error_lines))
nparity = self.policy.ec_nparity
for line in error_lines[:nparity]:
self.assertIn('retrying', line)
for line in error_lines[nparity:]:
self.assertIn('ChunkReadTimeout (0.01s)', line)
def test_GET_read_timeout_resume(self):
segment_size = self.policy.ec_segment_size
test_data = ('test' * segment_size)[:-333]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
headers = {'X-Object-Sysmeta-Ec-Etag': etag}
self.app.recoverable_node_timeout = 0.05
# first one is slow
responses = [(200, SlowBody(ec_archive_bodies[0], 0.1),
self._add_frag_index(0, headers))]
# ... the rest are fine
responses += [(200, body, self._add_frag_index(i, headers))
for i, body in enumerate(ec_archive_bodies[1:], start=1)]
req = swob.Request.blank('/v1/a/c/o')
status_codes, body_iter, headers = zip(
*responses[:self.policy.ec_ndata + 1])
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertTrue(md5(resp.body).hexdigest(), etag)
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
self.assertIn('retrying', error_lines[0])
def test_fix_response_HEAD(self):
headers = {'X-Object-Sysmeta-Ec-Content-Length': '10',
'X-Object-Sysmeta-Ec-Etag': 'foo'}
# sucsessful HEAD
responses = [(200, '', headers)]
status_codes, body_iter, headers = zip(*responses)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, '')
# 200OK shows original object content length
self.assertEqual(resp.headers['Content-Length'], '10')
self.assertEqual(resp.headers['Etag'], 'foo')
# not found HEAD
responses = [(404, '', {})] * self.replicas() * 2
status_codes, body_iter, headers = zip(*responses)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# 404 shows actual response body size (i.e. 0 for HEAD)
self.assertEqual(resp.headers['Content-Length'], '0')
def test_PUT_with_slow_commits(self):
# It's important that this timeout be much less than the delay in
# the slow commit responses so that the slow commits are not waited
# for.
self.app.post_quorum_timeout = 0.01
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
# plenty of slow commits
response_sleep = 5.0
codes = [FakeStatus(201, response_sleep=response_sleep)
for i in range(self.replicas())]
# swap out some with regular fast responses
number_of_fast_responses_needed_to_be_quick_enough = \
self.policy.quorum
fast_indexes = random.sample(
range(self.replicas()),
number_of_fast_responses_needed_to_be_quick_enough)
for i in fast_indexes:
codes[i] = 201
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
start = time.time()
resp = req.get_response(self.app)
response_time = time.time() - start
self.assertEqual(resp.status_int, 201)
self.assertLess(response_time, response_sleep)
def test_PUT_with_just_enough_durable_responses(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.policy.ec_ndata + 1)
codes += [503] * (self.policy.ec_nparity - 1)
self.assertEqual(len(codes), self.policy.ec_n_unique_fragments)
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_with_less_durable_responses(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.policy.ec_ndata)
codes += [503] * (self.policy.ec_nparity)
self.assertEqual(len(codes), self.policy.ec_n_unique_fragments)
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_GET_with_invalid_ranges(self):
# real body size is segment_size - 10 (just 1 segment)
segment_size = self.policy.ec_segment_size
real_body = ('a' * segment_size)[:-10]
# range is out of real body but in segment size
self._test_invalid_ranges('GET', real_body,
segment_size, '%s-' % (segment_size - 10))
# range is out of both real body and segment size
self._test_invalid_ranges('GET', real_body,
segment_size, '%s-' % (segment_size + 10))
def _test_invalid_ranges(self, method, real_body, segment_size, req_range):
# make a request with range starts from more than real size.
body_etag = md5(real_body).hexdigest()
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'Destination': 'c1/o',
'Range': 'bytes=%s' % (req_range)})
fragments = self.policy.pyeclib_driver.encode(real_body)
fragment_payloads = [fragments * self.policy.ec_duplication_factor]
node_fragments = zip(*fragment_payloads)
self.assertEqual(len(node_fragments), self.replicas()) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body)),
'X-Object-Sysmeta-Ec-Etag': body_etag}
start = int(req_range.split('-')[0])
self.assertGreaterEqual(start, 0) # sanity
title, exp = swob.RESPONSE_REASONS[416]
range_not_satisfiable_body = \
'<html><h1>%s</h1><p>%s</p></html>' % (title, exp)
if start >= segment_size:
responses = [(416, range_not_satisfiable_body,
self._add_frag_index(i, headers))
for i in range(POLICIES.default.ec_ndata)]
else:
responses = [(200, ''.join(node_fragments[i]),
self._add_frag_index(i, headers))
for i in range(POLICIES.default.ec_ndata)]
status_codes, body_iter, headers = zip(*responses)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 416)
self.assertEqual(resp.content_length, len(range_not_satisfiable_body))
self.assertEqual(resp.body, range_not_satisfiable_body)
self.assertEqual(resp.etag, body_etag)
self.assertEqual(resp.headers['Accept-Ranges'], 'bytes')
class TestECFunctions(unittest.TestCase):
def test_chunk_transformer(self):
def do_test(dup_factor, segments):
segment_size = 1024
orig_chunks = []
for i in range(segments):
orig_chunks.append(chr(i + 97) * segment_size)
policy = ECStoragePolicy(0, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(
replicas=10 * dup_factor),
ec_segment_size=segment_size,
ec_duplication_factor=dup_factor)
encoded_chunks = [[] for _ in range(policy.ec_n_unique_fragments)]
for orig_chunk in orig_chunks:
# each segment produces a set of frags
frag_set = policy.pyeclib_driver.encode(orig_chunk)
for frag_index, frag_data in enumerate(frag_set):
encoded_chunks[frag_index].append(frag_data)
# chunk_transformer buffers and concatenates multiple frags
expected = [''.join(frags) for frags in encoded_chunks]
transform = obj.chunk_transformer(policy)
transform.send(None)
backend_chunks = transform.send(''.join(orig_chunks))
self.assertIsNotNone(backend_chunks) # sanity
self.assertEqual(
len(backend_chunks), policy.ec_n_unique_fragments)
self.assertEqual(expected, backend_chunks)
# flush out last chunk buffer
backend_chunks = transform.send('')
self.assertEqual(
len(backend_chunks), policy.ec_n_unique_fragments)
self.assertEqual([''] * policy.ec_n_unique_fragments,
backend_chunks)
do_test(dup_factor=1, segments=1)
do_test(dup_factor=2, segments=1)
do_test(dup_factor=3, segments=1)
do_test(dup_factor=1, segments=2)
do_test(dup_factor=2, segments=2)
do_test(dup_factor=3, segments=2)
def test_chunk_transformer_non_aligned_last_chunk(self):
last_chunk = 'a' * 128
def do_test(dup):
policy = ECStoragePolicy(0, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=10 * dup),
ec_segment_size=1024,
ec_duplication_factor=dup)
expected = policy.pyeclib_driver.encode(last_chunk)
transform = obj.chunk_transformer(policy)
transform.send(None)
transform.send(last_chunk)
# flush out last chunk buffer
backend_chunks = transform.send('')
self.assertEqual(
len(backend_chunks), policy.ec_n_unique_fragments)
self.assertEqual(expected, backend_chunks)
do_test(1)
do_test(2)
@patch_policies([ECStoragePolicy(0, name='ec', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10,
ec_nparity=4, ec_segment_size=4096,
ec_duplication_factor=2),
StoragePolicy(1, name='unu')],
fake_ring_args=[{'replicas': 28}, {}])
class TestECDuplicationObjController(
ECObjectControllerMixin, unittest.TestCase):
container_info = {
'status': 200,
'read_acl': None,
'write_acl': None,
'sync_key': None,
'versions': None,
'storage_policy': '0',
}
controller_cls = obj.ECObjectController
def _test_GET_with_duplication_factor(self, node_frags, obj):
# This is basic tests in the healthy backends status
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# the backend requests should be >= num_data_fragments
self.assertGreaterEqual(len(log), self.policy.ec_ndata)
# but <= # of replicas
self.assertLessEqual(len(log), self.replicas())
self.assertEqual(len(collected_responses), 1)
etag, frags = collected_responses.items()[0]
# the backend requests will stop at enough ec_ndata responses
self.assertEqual(
len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (len(frags), etag))
# TODO: actually "frags" in node_frags is meaning "node_index" right now
# in following tests. Reconsidering the name and semantics change needed.
# Or, just mapping to be correct as frag_index is enough?.
def test_GET_with_duplication_factor(self):
obj = self._make_ec_object_stub()
node_frags = [
{'obj': obj, 'frag': 0},
{'obj': obj, 'frag': 1},
{'obj': obj, 'frag': 2},
{'obj': obj, 'frag': 3},
{'obj': obj, 'frag': 4},
{'obj': obj, 'frag': 5},
{'obj': obj, 'frag': 6},
{'obj': obj, 'frag': 7},
{'obj': obj, 'frag': 8},
{'obj': obj, 'frag': 9},
{'obj': obj, 'frag': 10},
{'obj': obj, 'frag': 11},
{'obj': obj, 'frag': 12},
{'obj': obj, 'frag': 13},
] * 2 # duplicated!
self._test_GET_with_duplication_factor(node_frags, obj)
def test_GET_with_duplication_factor_almost_duplicate_dispersion(self):
obj = self._make_ec_object_stub()
node_frags = [
# first half of # of replicas are 0, 1, 2, 3, 4, 5, 6
{'obj': obj, 'frag': 0},
{'obj': obj, 'frag': 0},
{'obj': obj, 'frag': 1},
{'obj': obj, 'frag': 1},
{'obj': obj, 'frag': 2},
{'obj': obj, 'frag': 2},
{'obj': obj, 'frag': 3},
{'obj': obj, 'frag': 3},
{'obj': obj, 'frag': 4},
{'obj': obj, 'frag': 4},
{'obj': obj, 'frag': 5},
{'obj': obj, 'frag': 5},
{'obj': obj, 'frag': 6},
{'obj': obj, 'frag': 6},
# second half of # of replicas are 7, 8, 9, 10, 11, 12, 13
{'obj': obj, 'frag': 7},
{'obj': obj, 'frag': 7},
{'obj': obj, 'frag': 8},
{'obj': obj, 'frag': 8},
{'obj': obj, 'frag': 9},
{'obj': obj, 'frag': 9},
{'obj': obj, 'frag': 10},
{'obj': obj, 'frag': 10},
{'obj': obj, 'frag': 11},
{'obj': obj, 'frag': 11},
{'obj': obj, 'frag': 12},
{'obj': obj, 'frag': 12},
{'obj': obj, 'frag': 13},
{'obj': obj, 'frag': 13},
]
# ...but it still works!
self._test_GET_with_duplication_factor(node_frags, obj)
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_stop(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
# both of obj1 and obj2 has only 9 frags which is not able to decode
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
]
# ... and the rests are 404s which is limited by request_count
# (2 * replicas in default) rather than max_extra_requests limitation
# because the retries will be in ResumingGetter if the responses
# are 404s
node_frags += [[]] * (self.replicas() * 2 - len(node_frags))
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# default node_iter will exhaust to the last of handoffs
self.assertEqual(len(log), self.replicas() * 2)
# we have obj1, obj2, and 404 NotFound in collected_responses
self.assertEqual(sorted([obj1['etag'], obj2['etag'], None]),
sorted(collected_responses.keys()))
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_many_missed_overwrite_will_need_handoff(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
# primaries
node_frags = [
{'obj': obj2, 'frag': 0},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2}, # missed
{'obj': obj2, 'frag': 3},
{'obj': obj2, 'frag': 4},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6}, # missed
{'obj': obj2, 'frag': 7},
{'obj': obj2, 'frag': 8},
{'obj': obj1, 'frag': 9}, # missed
{'obj': obj1, 'frag': 10}, # missed
{'obj': obj1, 'frag': 11}, # missed
{'obj': obj2, 'frag': 12},
{'obj': obj2, 'frag': 13},
]
node_frags = node_frags * 2 # 2 duplication
# so the primaries have indexes 0, 1, 3, 4, 5, 7, 8, 12, 13
# (9 indexes) for obj2 and then a handoff has index 6
node_frags += [
{'obj': obj2, 'frag': 6}, # handoff
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# there's not enough of the obj2 etag on the primaries, we would
# have collected responses for both etags, and would have made
# one more request to the handoff node
self.assertEqual(len(log), self.replicas() + 1)
self.assertEqual(len(collected_responses), 2)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_succeed(self):
obj1 = self._make_ec_object_stub(pattern='obj1',
timestamp=self.ts())
obj2 = self._make_ec_object_stub(pattern='obj2',
timestamp=self.ts())
# 28 nodes are here
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
[],
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
[],
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
[],
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
[],
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
[],
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
[],
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
[],
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
[],
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
[],
[],
]
node_frags += [[]] * 13 # Plus 13 nodes in handoff
# finally 10th fragment for obj2 found
node_frags += [[{'obj': obj2, 'frag': 9}]]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# we go exactly as long as we have to, finding two different
# etags and some 404's (i.e. collected_responses[None])
self.assertEqual(len(log), len(node_frags))
self.assertEqual(len(collected_responses), 3)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_mixed_frags_and_no_quorum_will_503(self):
# all nodes have a frag but there is no one set that reaches quorum,
# which means there is no backend 404 response, but proxy should still
# return 404 rather than 503
stub_objects = [
self._make_ec_object_stub(pattern='obj1'),
self._make_ec_object_stub(pattern='obj2'),
self._make_ec_object_stub(pattern='obj3'),
self._make_ec_object_stub(pattern='obj4'),
self._make_ec_object_stub(pattern='obj5'),
self._make_ec_object_stub(pattern='obj6'),
self._make_ec_object_stub(pattern='obj7'),
]
etags = collections.Counter(stub['etag'] for stub in stub_objects)
self.assertEqual(len(etags), 7, etags) # sanity
# primaries and handoffs for required nodes
# this is 10-4 * 2 case so that 56 requests (2 * replicas) required
# to give up. we prepares 7 different objects above so responses
# will have 8 fragments for each object
required_nodes = self.replicas() * 2
# fill them out to the primary and handoff nodes
node_frags = []
for frag in range(8):
for stub_obj in stub_objects:
if len(node_frags) >= required_nodes:
# we already have enough responses
break
node_frags.append({'obj': stub_obj, 'frag': frag})
# sanity
self.assertEqual(required_nodes, len(node_frags))
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
collected_etags = set()
collected_status = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag)
collected_status.add(conn.resp.status)
self.assertEqual(required_nodes, len(log))
self.assertEqual(len(collected_etags), 7)
self.assertEqual({200}, collected_status)
def test_GET_with_no_durable_files(self):
# verify that at least one durable is necessary for a successful GET
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
]
node_frags = node_frags * 2 # 2 duplications
node_frags += [[]] * self.replicas() # handoffs
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# all 28 nodes tried with an optimistic get, none are durable and none
# report having a durable timestamp
self.assertEqual(self.replicas() * 2, len(log))
def test_GET_with_missing_and_mixed_frags_may_503(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
obj3 = self._make_ec_object_stub(pattern='obj3')
obj4 = self._make_ec_object_stub(pattern='obj4')
# we get a 503 when all the handoffs return 200
node_frags = [[]] * self.replicas() # primaries have no frags
# plus, 4 different objects and 7 indexes will b 28 node responses
# here for handoffs
node_frags = node_frags + [ # handoffs all have frags
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj3, 'frag': 0},
{'obj': obj4, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj3, 'frag': 1},
{'obj': obj4, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj3, 'frag': 2},
{'obj': obj4, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj3, 'frag': 3},
{'obj': obj4, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj3, 'frag': 4},
{'obj': obj4, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj3, 'frag': 5},
{'obj': obj4, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
{'obj': obj3, 'frag': 6},
{'obj': obj4, 'frag': 6},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
# never get a quorum so all nodes are searched
self.assertEqual(len(log), 2 * self.replicas())
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), 7)
def test_GET_with_mixed_etags_at_same_timestamp(self):
# the difference from parent class is only handoff stub length
ts = self.ts() # force equal timestamps for two objects
obj1 = self._make_ec_object_stub(timestamp=ts, pattern='obj1')
obj2 = self._make_ec_object_stub(timestamp=ts, pattern='obj2')
self.assertNotEqual(obj1['etag'], obj2['etag']) # sanity
node_frags = [
# 7 frags of obj2 are available and durable
{'obj': obj2, 'frag': 0, 'durable': True},
{'obj': obj2, 'frag': 1, 'durable': True},
{'obj': obj2, 'frag': 2, 'durable': True},
{'obj': obj2, 'frag': 3, 'durable': True},
{'obj': obj2, 'frag': 4, 'durable': True},
{'obj': obj2, 'frag': 5, 'durable': True},
{'obj': obj2, 'frag': 6, 'durable': True},
# 7 frags of obj1 are available and durable
{'obj': obj1, 'frag': 7, 'durable': True},
{'obj': obj1, 'frag': 8, 'durable': True},
{'obj': obj1, 'frag': 9, 'durable': True},
{'obj': obj1, 'frag': 10, 'durable': True},
{'obj': obj1, 'frag': 11, 'durable': True},
{'obj': obj1, 'frag': 12, 'durable': True},
{'obj': obj1, 'frag': 13, 'durable': True},
# handoffs
]
node_frags += [[]] * (self.replicas() * 2 - len(node_frags))
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
# read body to provoke any EC decode errors
self.assertFalse(resp.body)
self.assertEqual(resp.status_int, 404)
self.assertEqual(len(log), self.replicas() * 2)
collected_etags = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag) # will be None from handoffs
self.assertEqual({obj1['etag'], obj2['etag'], None}, collected_etags)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertEqual(log_lines,
['Problem with fragment response: ETag mismatch'] * 7)
def _test_determine_chunk_destinations_prioritize(
self, missing_two, missing_one):
# This scenario is only likely for ec_duplication_factor >= 2. If we
# have multiple failures such that the putters collection is missing
# two primary nodes for frag index 'missing_two' and missing one
# primary node for frag index 'missing_one', then we should prioritize
# finding a handoff for frag index 'missing_two'.
class FakePutter(object):
def __init__(self, index):
self.node_index = index
controller = self.controller_cls(self.app, 'a', 'c', 'o')
# sanity, caller must set missing_two < than ec_num_unique_fragments
self.assertLess(missing_two, self.policy.ec_n_unique_fragments)
# create a dummy list of putters, check no handoffs
putters = []
for index in range(self.policy.object_ring.replica_count):
putters.append(FakePutter(index))
# sanity - all putters have primary nodes
got = controller._determine_chunk_destinations(putters, self.policy)
expected = {}
for i, p in enumerate(putters):
expected[p] = self.policy.get_backend_index(i)
self.assertEqual(got, expected)
# now, for fragment index that is missing two copies, lets make one
# putter be a handoff
handoff_putter = putters[missing_two]
handoff_putter.node_index = None
# and then pop another putter for a copy of same fragment index
putters.pop(missing_two + self.policy.ec_n_unique_fragments)
# also pop one copy of a different fragment to make one missing hole
putters.pop(missing_one)
# then determine chunk destinations: we have 26 putters here;
# missing_two frag index is missing two copies; missing_one frag index
# is missing one copy, therefore the handoff node should be assigned to
# missing_two frag index
got = controller._determine_chunk_destinations(putters, self.policy)
# N.B. len(putters) is now len(expected - 2) due to pop twice
self.assertEqual(len(putters), len(got))
# sanity, no node index - for handoff putter
self.assertIsNone(handoff_putter.node_index)
self.assertEqual(got[handoff_putter], missing_two)
# sanity, other nodes except handoff_putter have node_index
self.assertTrue(all(
[putter.node_index is not None for putter in got if
putter != handoff_putter]))
def test_determine_chunk_destinations_prioritize_more_missing(self):
# drop node_index 0, 14 and 1 should work
self._test_determine_chunk_destinations_prioritize(0, 1)
# drop node_index 1, 15 and 0 should work, too
self._test_determine_chunk_destinations_prioritize(1, 0)
class TestNumContainerUpdates(unittest.TestCase):
def test_it(self):
test_cases = [
# (container replicas, object replicas, object quorum, expected)
(3, 17, 13, 6), # EC 12+5
(3, 9, 4, 7), # EC 3+6
(3, 14, 11, 5), # EC 10+4
(5, 14, 11, 6), # EC 10+4, 5 container replicas
(7, 14, 11, 7), # EC 10+4, 7 container replicas
(3, 19, 16, 5), # EC 15+4
(5, 19, 16, 6), # EC 15+4, 5 container replicas
(3, 28, 22, 8), # EC (10+4)x2
(5, 28, 22, 9), # EC (10+4)x2, 5 container replicas
(3, 1, 1, 3), # 1 object replica
(3, 2, 1, 3), # 2 object replicas
(3, 3, 2, 3), # 3 object replicas
(3, 4, 2, 4), # 4 object replicas
(3, 5, 3, 4), # 5 object replicas
(3, 6, 3, 5), # 6 object replicas
(3, 7, 4, 5), # 7 object replicas
]
for c_replica, o_replica, o_quorum, exp in test_cases:
c_quorum = utils.quorum_size(c_replica)
got = obj.num_container_updates(c_replica, c_quorum,
o_replica, o_quorum)
self.assertEqual(
exp, got,
"Failed for c_replica=%d, o_replica=%d, o_quorum=%d" % (
c_replica, o_replica, o_quorum))
if __name__ == '__main__':
unittest.main()
| 43.870079 | 79 | 0.573506 |
6bb5ed9bb102e09076a540e684b0750ab417a233 | 57 | py | Python | takeyourmeds/settings/role.py | takeyourmeds/takeyourmeds-web | edf24188f26948902cfb69793b4d5aa3cf8b6dea | [
"MIT"
] | 11 | 2015-06-01T16:31:42.000Z | 2022-03-01T01:20:58.000Z | takeyourmeds/settings/role.py | takeyourmeds/takeyourmeds-web | edf24188f26948902cfb69793b4d5aa3cf8b6dea | [
"MIT"
] | 111 | 2015-07-20T13:23:16.000Z | 2017-09-08T08:17:10.000Z | takeyourmeds/settings/role.py | takeyourmeds/takeyourmeds-web | edf24188f26948902cfb69793b4d5aa3cf8b6dea | [
"MIT"
] | 6 | 2015-07-15T08:08:12.000Z | 2018-06-23T00:13:13.000Z | # overwritten by build system
from roles.local import *
| 14.25 | 29 | 0.77193 |
41895e1c8bbc008bb9fa453d488d8913ef287bca | 8,006 | py | Python | Steering_.py | didghwns0514/AutonomousDriving | 6622a4b28c3c7e7cbc59bd7d24835861dea4eae6 | [
"MIT"
] | 3 | 2021-03-10T02:46:41.000Z | 2021-05-04T00:00:29.000Z | Steering_.py | didghwns0514/AutonomousDriving | 6622a4b28c3c7e7cbc59bd7d24835861dea4eae6 | [
"MIT"
] | null | null | null | Steering_.py | didghwns0514/AutonomousDriving | 6622a4b28c3c7e7cbc59bd7d24835861dea4eae6 | [
"MIT"
] | null | null | null | import cv2
import time
import math
import numpy as np
import matplotlib.pyplot as plt
global destination, lanewidth_Half, lane_Error, destination_I,list_Lane
global t1, start_time, gear, find, point, speed_Obs
global start_time, U_straight_time, J_straight_time, J_straight_time_2
global just_once, just_once_2, t1, t2,t3,a, steer_2,steer_x_length, point,b,past
gear = 0
steer = 0
past = []
steer_angle = []
just_once = 0
just_once_2 =0
U_alpha = 3.4
U_beta = 1.7
J_alpha = 0
J_beta = 0
start_time = 0
U_straight_time = 1
J_straight_time = 1
J_straight_time_2 = 1
speed_Obs = 0
speed_Default = 30
look_ahead = 4
steer_x_length = []
steer_y_length = []
think_x = 0
think_y = 0
find = 0
point = 0
t1 = 0
t2 = 0
t3 = 0
a = 1
b = 0
steer_2 = 0
################Function#######################
def steering(Mission, Obstacle, dotted_Line,points_Path, steer_measured_Comb):#, speed_Default, speed_Obs):
global steer, gear, speed_Obs, speed_Default,start_time, U_straight_time, J_straight_time, J_straight_time_2
global just_once, just_once_2, t1, t2,t3,a, steer_2, steer_x_length, point, b, past
check = Mission * Obstacle
################### U-TURN ##################################
if check == 5:
wSPEED = 35
curve_time = 18.22/ (wSPEED * 100 /(360*5.8)) +U_alpha ######## wSTEER = -1970
curve_time_2 = (9.11/( wSPEED * 100 /(360*5.8)))+U_beta ######## wSTEER = 1970
if dotted_Line < 180: # left dotted line
t2 = t1
t1 = time.time()
t3 = t1 - t2
if just_once == 0:
start_time = t1
just_once = 1
if time.time() - start_time -t3 > U_straight_time: ###########straight
steer = -1970
if time.time() - start_time - U_straight_time - t3> curve_time:#############first curve
steer = 1970
if time.time() - curve_time - start_time -U_straight_time -t3 > curve_time_2:###########second curve
steer = 0
check = 10
return steer, speed_Obs#, gear
################## S-CURVE ################################
elif check == 0:
## print "Path",points_Path
## S curve = 4, Narrow = 2
if check == 4 :
look_ahead = 1
look_ahead = 8 ########### 'look_ahead' is distance of what you see.
if look_ahead > len(points_Path):
look_ahead = len(points_Path)/2
for i in range(0,len(points_Path) -look_ahead ):
x_length = (points_Path[i+look_ahead][0] - points_Path[i][0])*(0.1/1.41)
y_length = (points_Path[i+look_ahead][1] - points_Path[i][1])*(0.1/1.41)
abs(y_length)
if x_length == 0. :
## print "1111111111111111111111111"
tan_value = 0
else:
tan_value = 1.04/(abs(pow(x_length,2) + pow(y_length,2) - 1.04)/(2*x_length))
## print pow(x_length, 2), pow(y_length,2), x_length
theta = math.degrees(math.atan(tan_value))
steer_angle_now = steer_measured_Comb/71
steer_angle.append( theta)
#steer_angle[i] = steer_angle_now + theta
steer = (steer_angle[0])*71
## print 'sA', steer_angle
## print "iner steer" ,steer
return steer, speed_Obs#, gear
################ JU - CHA ##################################
elif check == 7:
wSPEED = 35
jucha_time = (9.11/ (wSPEED * 100 /(360*5.8))) +J_alpha ######## in
jucha_time_2 = (9.11/( wSPEED * 100 /(360*5.8))) +J_beta ######## back(out)
if VLD.dotted_Detection()[0] < 180: # left dotted line
t1 = time.time()
if just_once_2 == 0:
start_time = t1
just_once_2 = 1
if time.time() - start_time > J_straight_time: ###########straight
steer = 1970
if time.time() - start_time - J_straight_time > jucha_time:#############in
steer = 0
if time.time() - start_time - J_straight_time - jucha_time > (J_straight_time+10):###########in straight
steer = 0
gear = 2
if time.time() - start_time - J_straight_time - jucha_time - J_straight_time - 10 > J_straight_time:###########back straight
steer = 1970
gear = 2
if time.time() - start_time - J_straight_time - jucha_time - J_straight_time - 10 - J_straight_time > jucha_time_2:###########back
steer = 0
gear = 0
check = 10
return steer, speed_Obs#, gear
################ default #######################################
else:
if a ==1 :
if past == points_Path:
speed_Default = 23
else:
speed_Default = 30
past = points_Path
"""ti1 = time.time()
ti3 = ti2 - ti1
ti2 = time.time()"""
steer = []
steer_x_length = []
steer_y_length = []
point = 0
think_x = 0
think_y = 0
find = []
next_steer = 0
look_ahead = 1 ########### 'look_ahead' is tistance of what you see.
if len(points_Path) <= 10:
speed_Default = 23
if len(points_Path) <= 3:
return steer, speed_Default
for i in range(0,len(points_Path) -look_ahead):
x_length = (points_Path[i+look_ahead][0] - points_Path[i][0])*(0.1/1.41)
y_length = (points_Path[i+look_ahead][1] - points_Path[i][1])*(0.1/1.41)
abs(y_length)
steer_x_length.append(x_length)
steer_y_length.append(y_length)
for j in range(0,len(points_Path)-3):
find.append(steer_x_length[j+look_ahead] * steer_x_length[j])
if steer_x_length[j+1] == 0:
if steer_x_length[j] == 0:
find[j] = 1
if find[j] <= 0 :
## if j <= 10:
point = j + 1
## if j > 10:
## point = j +1
if point >= (len(points_Path) -6):
point = len(points_Path) -10
if point > 0:
for k in range(0, point):
think_x = think_x + steer_x_length[k]
think_y = think_y + steer_y_length[k]
if think_x == 0:
tan_value = 0
b = 1
else:
tan_value = 1.04/(abs(pow(think_x,2) + pow(think_y,2) - 1.04)/(2*think_x))
else:
print steer_x_length
if steer_x_length[0] == 0:
tan_value = 0
b = 1
else:
for k in range(0, len(points_Path) - 2):
think_x = think_x + steer_x_length[k]
think_y = think_y + steer_y_length[k]
tan_value = 1.04/(abs(pow(think_x,2) + pow(think_y,2) - 1.04)/(2*think_x))
theta = math.degrees(math.atan(tan_value))
if j <=10:
theta = theta*(2.61)
speed_Default = 23
else:
theta = theta
speed_Default = 30
steer_angle_now = steer_measured_Comb/71
steer = ( theta)*71
## if a == 0:
## steer = steer_2
## a = 1
## if steer_x_length[point+1] == 0:
## if b == 0:
## steer_2 = -steer
## a = 0
## b = 0
print "iner steer", steer
return steer, speed_Default#, gear
##########################################################
'''while True:
try:
steer(points_Path, steer_measured_Comb )
except Exception as e:
print "d",e'''
| 36.226244 | 140 | 0.485136 |
0f6e4de80d71212354fe0e0cecd5bc48b1ac9ef6 | 8,201 | py | Python | comprehensive_scenario/plot_results.py | adeliegorce/tools4reionisation | da97c677a40b93258f78fb5882de05eedae7c304 | [
"MIT"
] | null | null | null | comprehensive_scenario/plot_results.py | adeliegorce/tools4reionisation | da97c677a40b93258f78fb5882de05eedae7c304 | [
"MIT"
] | null | null | null | comprehensive_scenario/plot_results.py | adeliegorce/tools4reionisation | da97c677a40b93258f78fb5882de05eedae7c304 | [
"MIT"
] | 1 | 2020-07-29T14:14:51.000Z | 2020-07-29T14:14:51.000Z | import matplotlib.pyplot as plt
import numpy as np
import matplotlib
from scipy import integrate
import sys
from highz import *
import highz_config as hz
import triangleme2 as triangleme
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
# plt.ion()
colors = ['#1f77b4','#d62728', '#ff7f0e', '#2ca02c','#9467bd', '#8c564b', '#e377c2', '#7f7f7f','#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5']
cmaps = ['Blues','Reds','Oranges','Greens','Purples','copper']
#############################Robertson
def loading_verbose(msg):
sys.stdout.write('\r'+msg)
sys.stdout.flush()
def readmcmc(data,subn=100000,extent=[]):
ns = len(data)
print('\nTriangle plot...')
labels = [r"$a$", r"$b$", r"$c$", r"$d$",r"$\tau$"]
if (0<len(extent)<5):
raise Error('Wrong format for extents')
fig00 = plt.figure(figsize=(10,10))
# plot_name = './Figures/triangle_plot'
################### READING MCMC
for u,data1 in enumerate(data):
print('\nImporting data %s' %(data1))
outfile = './Outfiles/chains_'+str(data1)+'.dat'
samples = np.loadtxt(outfile,unpack=True,usecols=(0,1,2,3,4)) #includes tau
# print(str(data1),np.median(samples,axis=1))
print('a = %.4f +/- %.4f' %(np.median(samples[0,:]),np.std(samples[0,:])))
print('b = %.2f +/- %.2f' %(np.median(samples[1,:]),np.std(samples[1,:])))
print('c = %.2f +/- %.2f' %(np.median(samples[2,:]),np.std(samples[2,:])))
print('d = %.2f +/- %.2f' %(np.median(samples[3,:]),np.std(samples[3,:])))
print('tau = %.4f +/- %.4f' %(np.median(samples[4,:]),np.std(samples[4,:])))
subn = min(subn,samples.shape[1])
subsamples = samples[:,np.random.randint(0,samples.shape[1], size=subn)]
if (len(extent)==0):
triangleme.corner(subsamples.T, labels=labels, plot_contours=True, plot_datapoints=False, plot_ellipse=False, ls='.',cmap=cmaps[u],color=colors[u],lw=.8,fig=fig00)
else:
triangleme.corner(subsamples.T, labels=labels, plot_contours=True, plot_datapoints=False, plot_ellipse=False, ls='.', extents=extent,cmap=cmaps[u],color=colors[u],lw=.8,fig=fig00)
plot_name = plot_name+'_'+str(data1)
# plt.savefig(plot_name+'.png')
return
def model_plots(data,nrand=10000,CL=95):
ns = len(data)
percentile1=(100-CL)/2
percentile2=CL+(100-CL)/2
from xetanh import xe_tanh,xe_asym
# matplotlib.rcParams.update({'font.size': 18})
print('\nPlotting x_e, rho and tau...')
########################################## DATA
zdi,rhodim,rhodi, rhodip=np.loadtxt("./Data/data.ir.txt",skiprows=1,unpack=True)
zdu,rhodum,rhodu, rhodup=np.loadtxt("./Data/data.uv.txt",skiprows=1,unpack=True)
rhodd = np.hstack((rhodi,rhodu))
errdd = np.hstack((rhodip,rhodup))-np.hstack((rhodi,rhodu))
z_data, xe_data, dxep, dxem = np.loadtxt("./Data/constraints_readble.txt",usecols=(0,1,2,3),dtype=float,unpack=True)
ref = np.loadtxt("./Data/constraints_readble.txt",usecols=(4),dtype=str,unpack=True)
z = np.linspace(0,30, 300)
rec = [0.1, 0.1, 0.85, 0.85]
# xe figure
figx = plt.figure(figsize=(12,8))
axx = figx.add_axes(rec)
axx.set_xlim(4,16)
axx.set_xlabel(r'Redshift $z$',fontsize=20)
axx.set_ylim(0.,hz.fH)
axx.set_ylabel(r'IGM ionised fraction $x_e$',fontsize=20)
#data from LAEs
w=np.where((ref=='LAE') & (dxep==0))
uplims = np.zeros(z_data.shape)
uplims[w] = True
axx.errorbar(z_data[w], xe_data[w], yerr=[dxem[w],dxep[w]], fmt='*',uplims=uplims[w],ecolor='k',color='k',elinewidth=1.,ms=8,capsize=3,alpha=.7)
w=np.where((ref=='LAE') & (dxep!=0))
axx.errorbar(z_data[w], xe_data[w], yerr=[dxem[w],dxep[w]], fmt='*',ecolor='k',color='k',elinewidth=1.,ms=8, label=r'Ly-$\alpha$ emitters',capsize=3,alpha=.7)
#Data from dumping wings
w=np.where((ref=='QSOs') & (dxep==0))
uplims = np.zeros(z_data.shape)
uplims[w] = True
axx.errorbar(z_data[w], xe_data[w], yerr=[dxem[w],dxep[w]], fmt='s',uplims=uplims[w],ecolor='k',color='k',elinewidth=1.,ms=5,capsize=3,alpha=.7)
w=np.where(ref=='QSOs')
axx.errorbar(z_data[w], xe_data[w], yerr=[dxem[w],dxep[w]], fmt='s',ecolor='k',color='k',elinewidth=1.,ms=5, label='QSO spectra',capsize=3,alpha=.7)
w=np.where(ref=='GRB')
axx.errorbar(z_data[w], xe_data[w], yerr=[dxem[w],dxep[w]], fmt='D',ecolor='k',color='k',elinewidth=1.,ms=5,label='GRB afterglows',capsize=3,alpha=.7)
#tanh model for xe
axx.plot(z,xe_tanh(z,8.5,0.5,helium=hz.HeliumI,helium2=hz.HeliumII),'purple',linestyle='--',linewidth=1.5,label=r'Symmetric model from P16')
#asymmetric model for xe
axx.plot(z,xe_asym(z,6.1,6.6,helium=hz.HeliumI,helium2=hz.HeliumII),color='purple',linestyle='-.',linewidth=1.5,label=r'Asymmetric model from P16')
# rho figure
figr = plt.figure(figsize=(12,8))
axr = figr.add_axes(rec)
axr.set_xlim(0.1,15)
axr.set_xlabel(r'Redshift $z$',fontsize=20)
axr.set_ylabel(r'$\mathrm{log}(\rho_{\mathrm{SFR}})\ [\mathrm{M}_{\odot} \mathrm{yr}^{-1} \mathrm{Mpc}^{-3}]$',fontsize=20)
axr.set_ylim(-4,0)
#plot of the UV and IR data
axr.errorbar(zdi, rhodi, yerr=[rhodi-rhodim,rhodip-rhodi], fmt='o',elinewidth=1,capsize=3, color='purple', ecolor='purple', label='IR luminosity density',alpha=.7)
axr.errorbar(zdu, rhodu, yerr=[rhodu-rhodum,rhodup-rhodu], fmt='o',elinewidth=1,capsize=3, color='plum', ecolor='plum', label='UV luminosity density',alpha=.7)
# tau figure
figt = plt.figure(figsize=(8,8))
axt = figt.add_axes([0.12, 0.1, 0.84, 0.85])
axt.set_xlim(0.0,0.10)
axt.set_ylim(0.,1.1)
axt.set_xlabel(r'Thomson optical depth $\tau$',fontsize=24)
axt.set_ylabel(r'Probability distribution',fontsize=24)
# axt.set_ylim(0.,1.5)
#gaussian with Planck value
x0=np.arange(1000)/10000.
gg= np.exp(-(x0-hz.tau)**2/2./hz.sigtau**2)/np.sqrt(2.*np.pi)
gg=gg/np.max(gg)
axt.plot(x0, gg,color='purple',lw=1.5,label='Planck+2018')
################### READING MCMC
# endstr = '.png'
for ii,data1 in enumerate(data):
print('\nImporting data %s' %(data1))
outfile = './Outfiles/chains_'+str(data1)+'.dat'
out = np.loadtxt(outfile,unpack=True)
samples = out[:-2,:]
tau = out[-2,:]
# endstr = '_'+str(data1)+endstr
rho, xe = np.zeros((z.size,nrand)), np.zeros((z.size,nrand))
u=0
for theta in samples[np.random.randint(len(samples), size=nrand)]:#[np.random.randint(len(samples), size=nrand)]:
rho[:,u] = sfrd(z[::-1],tuple(theta))
xe[:,u] = QHII(z[::-1],hz.y0,tuple(theta)).flatten()
msg = str('Computing confidence intervals... %i%% done' %((u+1)/nrand*100))
loading_verbose(msg)
u=u+1
msg = str('Computing confidence intervals... 100% done.')
loading_verbose(msg)
axr.fill_between(z[::-1], np.log10(np.percentile(rho,percentile2,axis=1)), np.log10(np.percentile(rho,percentile1,axis=1)), color=colors[ii],alpha=.3)
axr.plot(z[::-1],np.log10(np.median(rho,axis=1)),color=colors[ii],lw=2)#,label=str(data1))
axx.fill_between(z[::-1], np.percentile(xe,percentile2,axis=1), np.percentile(xe,percentile1,axis=1), color=colors[ii],alpha=.3)
axx.plot(z[::-1],np.median(xe,axis=1),color=colors[ii],lw=2)#,label=str(data1))
values,bins= np.histogram(tau,bins=100,density=True)#,label=str(data1))
bins=(bins[1:]+bins[:-1])/2
axt.plot(bins,values/np.max(values),drawstyle='steps-mid',color=colors[ii],lw=1.5)
# axt.axvline(np.median(tau),color=colors[ii],lw=1.5)
axr.legend(loc=1, frameon=False,fontsize=18)
# figr.savefig('./Figures/rho_SFR'+endstr)
axx.legend(loc='best', frameon=False,fontsize=18)
# figx.savefig('./Figures/xe'+endstr)
axt.legend(loc='best', frameon=False,fontsize=18)
# figt.savefig('./Figures/tau_distri'+endstr)
print(' ')
return
| 46.596591 | 192 | 0.606511 |
a4c7ea8c73e5c4b3b39ca6bfd73813fa92b4e12c | 15,312 | py | Python | world/dominion/unit_types.py | emonical/arxcode | 10a24a2d86f709469698f3d1815fe1ae0d6174a3 | [
"MIT"
] | 1 | 2020-09-30T04:59:24.000Z | 2020-09-30T04:59:24.000Z | world/dominion/unit_types.py | emonical/arxcode | 10a24a2d86f709469698f3d1815fe1ae0d6174a3 | [
"MIT"
] | null | null | null | world/dominion/unit_types.py | emonical/arxcode | 10a24a2d86f709469698f3d1815fe1ae0d6174a3 | [
"MIT"
] | null | null | null | """
Unit types:
All the stats for different kinds of military units are defined here and
will be used at runtime.
"""
import traceback
from .combat_grid import PositionActor
from random import randint
from . import unit_constants
_UNIT_TYPES = {}
def register_unit(unit_cls):
"""
Registers decorated class in _UNIT_TYPES
Args:
unit_cls: UnitStats class/child class
Returns:
unit_cls
"""
if unit_cls.id not in _UNIT_TYPES:
_UNIT_TYPES[unit_cls.id] = unit_cls
return unit_cls
def get_unit_class_by_id(unit_id, unit_model=None):
"""
Looks up registered units by their ID
Args:
unit_id: ID that matches a UnitStats class id attribute
unit_model: optional MilitaryUnit model passed along for debug info
Returns:
UnitStats class or subclass matching ID
"""
try:
cls = _UNIT_TYPES[unit_id]
except KeyError:
if unit_model:
print(
"ERROR: Unit type not found for MilitaryUnit obj #%s!" % unit_model.id
)
print(
"Attempted Unit class ID was %s. Not found, using Infantry as fallback."
% unit_id
)
traceback.print_exc()
cls = unit_constants.INFANTRY
return cls
def get_unit_stats(unit_model, grid=None):
"""
Returns the type of unit class for combat that corresponds
to a unit's database model instance. Because we don't want to have
the entire weekly maintenance process that handles all dominion
commands stop for an exception, we do a lot of handling with default
values.
"""
cls = get_unit_class_by_id(unit_model.unit_type, unit_model)
unit = cls(unit_model, grid)
return unit
def type_from_str(name_str):
"""
Gets integer of unit type from a string
Helper function for end-users entering the name of a unit type
and retrieving the integer that is used in the database to represent
it, which is then used for django filters.
Args:
name_str:
Returns:
int
"""
cls = cls_from_str(name_str)
if cls:
return cls.id
def cls_from_str(name_str):
"""
Gets class of unit type from a string
Helper function for end-users entering the name of a unit type
and retrieving the class that contains stats for that unit type.
Args:
name_str: str
Returns:
UnitStats
"""
name_str = name_str.lower()
for cls in _UNIT_TYPES.values():
if cls.name.lower() == name_str:
return cls
def print_unit_names():
return ", ".join(cls.name for cls in _UNIT_TYPES.values())
class UnitStats(PositionActor):
"""
Contains all the stats for a military unit.
"""
id = -1
name = "Default"
# silver upkeep costs for 1 of a given unit
silver_upkeep = 10
food_upkeep = 1
hiring_cost = 5
# how powerful we are in melee combat
melee_damage = 1
# how powerful we are at range
range_damage = 0
# our defense against attacks
defense = 0
# defense against ANY number of attackers. Super powerful
multi_defense = 0
storm_damage = 0
# how much damage each individual in unit can take
hp = 1
# if we are a ranged unit, this value is not 0. Otherwise it is 0.
range = 0
# the minimum range an enemy must be for us to use our ranged attack
min_for_range = 1
# our value in siege
siege = 0
movement = 0
strategic_speed = 0
# where the unit can be deployed: ground, naval, or flying
environment = "ground"
# how much more damage we take from things like dragon fire, spells, catapults, etc
structure_damage_multiplier = 1
xp_cost_multiplier = 1
def __init__(self, dbobj, grid):
super(UnitStats, self).__init__(grid)
self.dbobj = dbobj
self.formation = None
self.log = None
# how much damage we've taken
self.damage = 0
# how many troops from unit have died
self.losses = 0
self.routed = False
self.destroyed = False
# the target we are currently trying to engage
self.target = None
# whether we are currently storming a castle
self.storming = False
# if we know a castle position to storm
self.storm_targ_pos = None
# A castle object if we're in it
self.castle = None
self.flanking = None
self.flanked_by = None
try:
self.commander = dbobj.commander
if dbobj.army:
self.morale = dbobj.army.morale
self.commander = self.commander or dbobj.army.general
else:
self.morale = 80
self.level = dbobj.level
self.equipment = dbobj.equipment
self.type = dbobj.unit_type
self.quantity = dbobj.quantity
self.starting_quantity = dbobj.quantity
except AttributeError:
print("ERROR: No dbobj for UnitStats found! Using default values.")
traceback.print_exc()
self.morale = 0
self.level = 0
self.equipment = 0
self.type = unit_constants.INFANTRY
self.quantity = 1
self.starting_quantity = 1
self.dbobj = None
self.commander = None
if dbobj.origin:
from django.core.exceptions import ObjectDoesNotExist
try:
self.name = dbobj.origin.unit_mods.get(unit_type=self.id).name
except (ObjectDoesNotExist, AttributeError):
pass
@classmethod
def display_class_stats(cls):
"""
Returns a string of stats about this class.
Returns:
msg (str): Formatted display of this class's stats
"""
msg = "{wName:{n %s\n" % cls.name
msg += "{wHiring Cost (military resources){n: %s\n" % cls.hiring_cost
msg += "{wUpkeep Cost (silver){n: %s\n" % cls.silver_upkeep
msg += "{wFood Upkeep{n: %s\n" % cls.food_upkeep
return msg
def _targ_in_range(self):
if not self.target:
return False
return self.check_distance_to_actor(self.target) <= self.range
targ_in_range = property(_targ_in_range)
def _unit_active(self):
return not self.routed and not self.destroyed
active = property(_unit_active)
def _unit_value(self):
return self.quantity * self.silver_upkeep
value = property(_unit_value)
def __str__(self):
return "%s's %s(%s)" % (str(self.formation), self.name, self.quantity)
def swing(self, target, atk):
"""
One unit trying to do damage to another. Defense is a representation
of how much resistance to damage each individual unit has against
attacks. For that reason, it's limited by the number of attacks the
unit is actually receiving. multi_defense, however, is an additional
defense that scales with the number of attackers, representing some
incredible durability that can ignore small units. Essentially this
is for dragons, archmages, etc, who are effectively war machines.
"""
defense = target.defense
defense += target.defense * target.level
defense += target.defense * target.equipment
def_mult = target.quantity
if self.quantity < def_mult:
def_mult = self.quantity
defense *= def_mult
# usually this will be 0. multi_defense is for dragons, mages, etc
defense += target.multi_defense * self.quantity
def_roll = randint(0, defense)
if target.commander:
def_roll += def_roll * target.commander.warfare
if target.castle:
def_roll += def_roll * target.castle.level
attack = atk * self.quantity
attack += atk * self.level
attack += atk * self.equipment
# have a floor of half our attack
atk_roll = randint(attack / 2, attack)
if self.commander:
atk_roll += atk_roll * self.commander.warfare
damage = atk_roll - def_roll
if damage < 0:
damage = 0
target.damage += damage
self.log.info(
"%s attacked %s. Atk roll: %s Def roll: %s\nDamage:%s"
% (str(self), str(target), atk_roll, def_roll, damage)
)
def ranged_attack(self):
if not self.range:
return
if not self.target:
return
if not self.targ_in_range:
return
self.swing(self.target, self.range_damage)
def melee_attack(self):
if not self.target:
return
if not self.targ_in_range:
return
if self.storming:
self.swing(self.target, self.storm_damage)
else:
self.swing(self.target, self.melee_damage)
self.target.swing(self, self.target.melee_damage)
def advance(self):
if self.target and not self.targ_in_range:
self.move_toward_actor(self.target, self.movement)
elif self.storm_targ_pos:
try:
x, y, z = self.storm_targ_pos
self.move_toward_position(x, y, z, self.movement)
except (TypeError, ValueError):
print(
"ERROR when attempting to move toward castle. storm_targ_pos: %s"
% str(self.storm_targ_pos)
)
self.log.info("%s has moved. Now at pos: %s" % (self, str(self.position)))
def cleanup(self):
"""
Apply damage, destroy units/remove them, make units check for rout, check
for rally.
"""
if not self.damage:
return
hp = self.hp
hp += self.hp * self.level
hp += self.hp * self.equipment
if self.damage >= hp:
losses = self.damage / hp
# save remainder
self.losses += losses
self.quantity -= losses
if self.quantity <= 0:
self.quantity = 0
self.destroyed = True
self.log.info("%s has been destroyed." % (str(self)))
return
self.damage %= hp
self.rout_check()
if self.routed:
self.rally_check()
def rout_check(self):
"""
Chance for the unit to rout. Roll 1-100 to beat a difficulty number
to avoid routing. Difficulty is based on our percentage of losses +
any morale rating we have below 100. Reduced by 5 per troop level
and commander level.
"""
percent_losses = float(self.losses) / float(self.starting_quantity)
percent_losses = int(percent_losses * 100)
morale_penalty = 100 - self.morale
difficulty = percent_losses + morale_penalty
difficulty -= 5 * self.level
if self.commander:
difficulty -= 5 * self.commander.warfare
if randint(1, 100) < difficulty:
self.routed = True
def rally_check(self):
"""
Rallying is based almost entirely on the skill of the commander. It's
a 1-100 roll trying to reach 100, with the roll being multiplied by
our commander's level(+1). We add +10 for each level of troop training
of the unit, as elite units will automatically rally. Yes, this means
that it is impossible for level 10 or higher units to rout.
"""
level = 0
if self.commander:
level = self.commander.warfare
# a level 0 or no commander just means roll is unmodified
level += 1
roll = randint(1, 100)
roll *= level
roll += 10 * self.level
self.log.info("%s has routed and rolled %s to rally." % (str(self), roll))
if roll >= 100:
self.routed = False
def check_target(self):
if not self.target:
return
if self.target.active:
return self.target
def acquire_target(self, enemy_formation):
"""
Retrieve a target from the enemy formation based on various
targeting criteria.
"""
self.target = enemy_formation.get_target_from_formation_for_attacker(self)
@property
def levelup_cost(self):
current = self.dbobj.level + 1
return current * current * 50 * self.xp_cost_multiplier
@register_unit
class Infantry(UnitStats):
id = unit_constants.INFANTRY
name = "Infantry"
silver_upkeep = 5
melee_damage = 3
storm_damage = 3
defense = 1
hp = 30
movement = 2
strategic_speed = 2
hiring_cost = 10
@register_unit
class Pike(UnitStats):
id = unit_constants.PIKE
name = "Pike"
silver_upkeep = 8
melee_damage = 5
storm_damage = 3
defense = 1
hp = 30
movement = 2
strategic_speed = 2
hiring_cost = 15
@register_unit
class Cavalry(UnitStats):
id = unit_constants.CAVALRY
name = "Cavalry"
silver_upkeep = 15
melee_damage = 10
storm_damage = 3
defense = 3
hp = 60
movement = 6
strategic_speed = 2
hiring_cost = 30
xp_cost_multiplier = 2
@register_unit
class Archers(UnitStats):
id = unit_constants.ARCHERS
name = "Archers"
silver_upkeep = 10
melee_damage = 1
range_damage = 5
storm_damage = 3
defense = 1
hp = 20
range = 6
siege = 5
movement = 2
strategic_speed = 2
hiring_cost = 20
xp_cost_multiplier = 2
@register_unit
class Longship(UnitStats):
id = unit_constants.LONGSHIP
name = "Longships"
silver_upkeep = 75
food_upkeep = 20
movement = 6
melee_damage = 60
range_damage = 100
hp = 500
environment = "naval"
strategic_speed = 12
structure_damage_multiplier = 20
hiring_cost = 150
xp_cost_multiplier = 10
@register_unit
class SiegeWeapon(UnitStats):
id = unit_constants.SIEGE_WEAPON
name = "Siege Weapon"
silver_upkeep = 500
food_upkeep = 20
movement = 1
melee_damage = 20
range_damage = 300
defense = 10
hp = 400
storm_damage = 600
strategic_speed = 1
structure_damage_multiplier = 20
hiring_cost = 1000
xp_cost_multiplier = 30
@register_unit
class Galley(UnitStats):
id = unit_constants.GALLEY
name = "Galleys"
silver_upkeep = 250
food_upkeep = 60
movement = 5
melee_damage = 240
range_damage = 400
hp = 2000
environment = "naval"
strategic_speed = 10
structure_damage_multiplier = 20
hiring_cost = 500
xp_cost_multiplier = 50
@register_unit
class Cog(UnitStats):
id = unit_constants.COG
name = "Cogs"
silver_upkeep = 500
food_upkeep = 120
movement = 6
melee_damage = 700
range_damage = 2000
hp = 5000
environment = "naval"
strategic_speed = 12
hiring_cost = 1000
xp_cost_multiplier = 75
@register_unit
class Dromond(UnitStats):
id = unit_constants.DROMOND
name = "Dromonds"
silver_upkeep = 1000
food_upkeep = 300
movement = 3
melee_damage = 2500
range_damage = 5000
hp = 20000
environment = "naval"
strategic_speed = 8
structure_damage_multiplier = 20
hiring_cost = 2000
xp_cost_multiplier = 100
| 28.890566 | 87 | 0.613179 |
9603ff0eacc671b06d1e042de8ae6258e06dcc2e | 1,658 | py | Python | obolus/server.py | Anomareh/obolus | dcbc9a8b8ca0d66dabe30774a3b3b427b86f3292 | [
"BSD-3-Clause"
] | null | null | null | obolus/server.py | Anomareh/obolus | dcbc9a8b8ca0d66dabe30774a3b3b427b86f3292 | [
"BSD-3-Clause"
] | null | null | null | obolus/server.py | Anomareh/obolus | dcbc9a8b8ca0d66dabe30774a3b3b427b86f3292 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import asyncio
from os import getpid, kill
import signal
from sys import exit
from utils import Buffer, clean_string, dirty_string
class Server:
def __init__(self, port):
self.port = port
self.users = {}
self.pid = getpid()
self.loop = asyncio.get_event_loop()
self.loop.add_signal_handler(signal.SIGINT, self.close)
self.loop.add_signal_handler(signal.SIGTERM, self.close)
asyncio.ensure_future(self.listen(), loop = self.loop)
self.loop.run_forever()
def close(self):
Buffer.esc('[DL][1:HPA]')
Buffer.write(':: Shutting down\n')
for reader, writer in self.users.values():
writer.write(dirty_string(':: Server shutting down'))
writer.write_eof()
self.loop.stop()
exit(0)
def broadcast(self, message):
Buffer.write('Sending message: {}\n'.format(message))
for reader, writer in self.users.values():
writer.write(dirty_string(message))
async def connect(self, reader, writer):
Buffer.write('User connected\n')
user = writer.get_extra_info('peername')
self.users[user] = (reader, writer)
while not reader.at_eof():
message = await reader.readline()
if message:
self.broadcast(clean_string(message))
Buffer.write('User disconnected\n')
del self.users[user]
writer.write_eof()
async def listen(self):
Buffer.write(':: Listening\n')
self.server = await asyncio.start_server(
self.connect, 'localhost', self.port)
| 24.746269 | 66 | 0.607961 |
9510ed1acd155d84268cce985b642e34ea4d6d4d | 4,986 | py | Python | mediacloud/test/__init__.py | mediacloud/api-client | df47422ab417014d78fe41e1659280d5f766a530 | [
"MIT"
] | 7 | 2021-01-28T17:31:54.000Z | 2021-12-20T16:28:23.000Z | mediacloud/test/__init__.py | mediacloud/api-client | df47422ab417014d78fe41e1659280d5f766a530 | [
"MIT"
] | 2 | 2021-04-07T23:26:50.000Z | 2021-09-24T00:43:16.000Z | mediacloud/test/__init__.py | mediacloud/api-client | df47422ab417014d78fe41e1659280d5f766a530 | [
"MIT"
] | 3 | 2021-07-29T11:25:25.000Z | 2021-12-25T22:27:26.000Z | import os.path
import logging
from dotenv import load_dotenv
TEST_USER_EMAIL = "[email protected]"
QUERY_LAST_FEW_DAYS = "publish_date:[NOW-3DAY TO NOW]"
QUERY_LAST_WEEK = "publish_date:[NOW-7DAY TO NOW]"
QUERY_LAST_MONTH = "publish_date:[NOW-31DAY TO NOW]"
QUERY_LAST_YEAR = "publish_date:[NOW-1YEAR TO NOW]"
QUERY_LAST_DECADE = "publish_date:[NOW-10YEAR TO NOW]"
QUERY_ENGLISH_LANGUAGE = "language:en"
# useful for testing very long queries that need to be sent as POST
LONG_ENGLISH_QUERY = '((Scien* OR research* OR study OR studies) OR (Tech* OR Google OR Facebook OR Alphabet OR Amazon OR Netflix OR Twitter OR Instagram OR "consumer tech*" OR Snapchat OR WhatsApp OR SpaceX OR Tesla) OR (weather OR forecast OR flood OR storm OR hurricane OR typhoon OR cyclone OR "heat wave" OR tornado OR blizzard OR avalanche OR drought OR landslide OR mudslide OR wildfire OR lightning OR (climate AND NOT "political climate")) OR (health OR disease OR ill* OR medic* OR deaths) OR (business* OR financ* OR stock OR econom* OR bank OR invest* OR "wall street" OR recession OR "bull market" OR "bear market" OR inflation OR IPO OR "hedge fund" OR "mutual fund" OR broker) OR (sport* OR baseball OR basketball OR football OR soccer OR hockey OR tennis OR golf OR boxing OR mma OR "mixed martial arts" OR NASCAR OR "car racing" OR Olympi* OR ski* OR snowboard* OR swim* OR gymnast*) OR (art OR arts OR celeb* OR movie* OR television OR music* OR "pop culture" OR books OR tv OR theater OR theatre OR gaming) OR (Trump OR Obama OR Democrat* OR Republican* OR Senat* OR Representative OR "First Lady" OR Governor OR campaign OR election) OR ("Afghanistan" OR "Albania" OR "Algeria" OR "Andorra" OR "Angola" OR "Antigua and Barbuda" OR "Argentina" OR "Armenia" OR "Australia" OR "Austria" OR "Azerbaijan" OR "Bahamas" OR "Bahrain" OR "Bangladesh" OR "Barbados" OR "Belarus" OR "Belgium" OR "Belize" OR "Benin" OR "Bhutan" OR "Bolivia" OR "Bosnia and Herzegovina" OR "Botswana" OR "Brazil" OR "Brunei" OR "Bulgaria" OR "Burkina Faso" OR "Burundi" OR "Cabo Verde" OR "Cambodia" OR "Cameroon" OR "Canada" OR "Central African Republic" OR "Chad" OR "Chile" OR "China" OR "Colombia" OR "Comoros" OR "Congo" OR "Costa Rica" OR "Ivory Coast" OR "Croatia" OR "Cuba" OR "Cyprus" OR "Czech Republic" OR "Denmark" OR "Djibouti" OR "Dominica" OR "Dominican Republic" OR "East Timor (Timor-Leste)" OR "Ecuador" OR "Egypt" OR "El Salvador" OR "Equatorial Guinea" OR "Eritrea" OR "Estonia" OR "Eswatini" OR "Swaziland" OR "Ethiopia" OR "Fiji" OR "Finland" OR "France" OR "Gabon" OR "The Gambia" OR "Georgia" OR "Germany" OR "Ghana" OR "Greece" OR "Grenada" OR "Guatemala" OR "Guinea" OR "Guinea-Bissau" OR "Guyana" OR "Haiti" OR "Honduras" OR "Hungary" OR "Iceland" OR "India" OR "Indonesia" OR "Iran" OR "Iraq" OR "Ireland" OR "Israel" OR "Italy" OR "Jamaica" OR "Japan" OR "Kazakhstan" OR "Kenya" OR "Kiribati" OR "North Korea" OR "South Korea" OR "Kosovo" OR "Kuwait" OR "Kyrgyzstan" OR "Laos" OR "Latvia" OR "Lebanon" OR "Lesotho" OR "Liberia" OR "Libya" OR "Liechtenstein" OR "Lithuania" OR "Luxembourg" OR "Madagascar" OR "Malawi" OR "Malaysia" OR "Maldives" OR "Mali" OR "Malta" OR "Marshall Islands" OR "Mauritania" OR "Mauritius" OR "Mexico" OR "Micronesia, Federated States of" OR "Moldova" OR "Monaco" OR "Mongolia" OR "Montenegro" OR "Morocco" OR "Mozambique" OR "Myanmar " OR "Burma" OR "Namibia" OR "Nauru" OR "Nepal" OR "Netherlands" OR "New Zealand" OR "Nicaragua" OR "Niger" OR "Nigeria" OR "North Macedonia" OR "Norway" OR "Oman" OR "Pakistan" OR "Palau" OR "Panama" OR "Papua New Guinea" OR "Paraguay" OR "Peru" OR "Philippines" OR "Poland" OR "Portugal" OR "Qatar" OR "Romania" OR "Russia" OR "Rwanda" OR "Saint Kitts and Nevis" OR "Saint Lucia" OR "Saint Vincent and the Grenadines" OR "Samoa" OR "San Marino" OR "Sao Tome and Principe" OR "Saudi Arabia" OR "Senegal" OR "Serbia" OR "Seychelles" OR "Sierra Leone" OR "Singapore" OR "Slovakia" OR "Slovenia" OR "Solomon Islands" OR "Somalia" OR "South Africa" OR "Spain" OR "Sri Lanka" OR "Sudan" OR "South Sudan" OR "Suriname" OR "Sweden" OR "Switzerland" OR "Syria" OR "Taiwan" OR "Tajikistan" OR "Tanzania" OR "Thailand" OR "Togo" OR "Tonga" OR "Trinidad and Tobago" OR "Tunisia" OR "Turkey" OR "Turkmenistan" OR "Tuvalu" OR "Uganda" OR "Ukraine" OR "United Arab Emirates" OR "United Kingdom" OR "Uruguay" OR "Uzbekistan" OR "Vanuatu" OR "Vatican City" OR "Venezuela" OR "Vietnam" OR "Yemen" OR "Zambia" OR "Zimbabwe" OR "Timor Leste"))'
basedir = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
logger = logging.getLogger(__name__)
# load env-vars from .env file if there is one
test_env = os.path.join(basedir, '.env')
if os.path.isfile(test_env):
load_dotenv(dotenv_path=os.path.join(basedir, '.env'), verbose=True)
def load_text_from_fixture(filename):
with open(os.path.join(basedir, "mediacloud", "test", "fixtures", filename), 'r') as f:
text = f.read()
return text
| 155.8125 | 4,012 | 0.725231 |
8a06917a05944994d25eff26a5b57f4fe3632e5b | 3,358 | py | Python | projects/SixDPose/tools/plot_speed.py | jkznst/detectron2 | 790f1894134bb85b897b0912367ee54a24caf2b2 | [
"Apache-2.0"
] | null | null | null | projects/SixDPose/tools/plot_speed.py | jkznst/detectron2 | 790f1894134bb85b897b0912367ee54a24caf2b2 | [
"Apache-2.0"
] | 1 | 2020-06-23T13:39:45.000Z | 2020-06-23T13:39:45.000Z | projects/SixDPose/tools/plot_speed.py | jkznst/detectron2 | 790f1894134bb85b897b0912367ee54a24caf2b2 | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
import numpy as np
import matplotlib.pyplot as plt
def plot_speed_crpnet():
plt.axvspan(25, 75, facecolor='#2ca02c', alpha=0.3)
plt.scatter(2, 50.2, s=100, label='[82] w/ ref.')
plt.scatter(3, 62.7, s=100, marker='^', label='BB8[87] w/ ref.')
plt.scatter(10, 79, s=100, marker='s', label='SSD-6D[84] w/ ref.')
plt.scatter(50, 56, s=100, marker='*', label='YOLO6D[88]')
plt.scatter(6, 62.7, s=100, marker='p', c='black', label='PoseCNN[86]')
plt.scatter(4.5, 64.7, s=100, marker='v', c='yellow', label='AAE[94] w/ ref.')
# CRPNet
plt.scatter(30, 74.7, s=100, c='purple', marker='D', label='CRPNet(ours)')
plt.scatter(35, 73.6, s=100, c='purple', marker='D')
plt.scatter(43, 70.3, s=100, c='purple', marker='D')
x = [30, 35, 43]
y = [74.7, 73.6, 70.3]
plt.plot(x, y, c='purple')
plt.xlabel('FPS')
plt.ylabel('[email protected](%)')
plt.xlim(0, 75)
# plt.legend(['[82]', 'BB8[87]', 'SSD-6D[84]', 'YOLO6D[88]', 'CRPNet(ours)'])
plt.legend()
plt.show()
def plot_speed_hcrnet():
plt.axvspan(25, 75, facecolor='#2ca02c', alpha=0.3)
plt.scatter(10, 79, s=100, marker='s', label='SSD-6D[84] w/ ref.')
plt.scatter(6, 62.7, s=100, marker='p', c='black', label='PoseCNN[86]')
# plt.scatter(3, 62.7, s=100, marker='^', label='BB8[87] w/ ref.')
plt.scatter(50, 56, s=100, marker='*', label='YOLO6D[88]')
plt.scatter(25, 86.2, s=100, label='PVNet[93]')
# plt.scatter(4.5, 64.7, s=100, marker='v', label='AAE[94] w/ ref.')
plt.scatter(6.5, 72.4, s=100, marker='X', c='brown', label='Pix2Pose[96]')
plt.scatter(18, 89.8, s=100, marker='>', c='#f5bf03', label='CDPN[97]')
plt.scatter(33, 83.0, s=100, marker='<', c='cyan', label='DPOD[98]')
# CRPNet
plt.scatter(30, 74.7, s=100, c='purple', marker='D', label='CRPNet(ours)')
plt.scatter(35, 73.6, s=100, c='purple', marker='D')
plt.scatter(43, 70.3, s=100, c='purple', marker='D')
x = [30, 35, 43]
y = [74.7, 73.6, 70.3]
plt.plot(x, y, c='purple')
# HCRNet
plt.scatter(21, 86.5, s=100, c='red', marker='P', label='HCRNet(ours)')
plt.scatter(25, 84.9, s=100, c='red', marker='P')
plt.scatter(32, 81.0, s=100, c='red', marker='P')
x = [21, 25, 32]
y = [86.5, 84.9, 81.0]
plt.plot(x, y, c='red')
plt.xlabel('FPS')
plt.ylabel('[email protected](%)')
plt.xlim(0, 75)
# plt.legend(['[82]', 'BB8[87]', 'SSD-6D[84]', 'YOLO6D[88]', 'CRPNet(ours)'])
plt.legend()
plt.show()
def plot_indirect_strategy():
x = ['ape', 'bvise.', 'cam', 'can', 'cat', 'driller', 'duck', 'eggbox', 'glue', 'holep.', 'iron', 'lamp', 'phone']
direct = [93.5, 90.3, 93.3, 93.7, 94.5, 93.5, 94.4, 93.2, 94.0, 92.2, 88.8, 86.2, 92.5]
indirect = [96.5, 93.4, 96.4, 97.6, 97.4, 96.6, 97.1, 96.1, 96.9, 94.9, 91.5, 88.1, 95.3]
plt.bar(x, indirect, label='Cascade Regression Strategy (ours)', color='orange')
plt.bar(x, direct, label="Direct Regression Strategy [88]", color='blue')
plt.xticks(np.arange(len(x)), x, rotation=320, fontsize=10)
plt.legend(loc='upper left', fontsize=10)
# plt.rcParams['font.family'] = ['sans-serif']
# plt.rcParams['font.sans-serif'] = ['SimHei']
plt.ylabel('REP@5px(%)')
# plt.xlabel('Objects')
plt.ylim(85, 100)
plt.show()
if __name__ == "__main__":
plot_speed_hcrnet() | 39.97619 | 118 | 0.566706 |
f5babfa614eb826a54c85eeb6562d6c436542be3 | 6,262 | py | Python | openstack_dashboard/dashboards/admin/metadata_defs/views.py | HaManhDong/Custom-Horizon | 17513ebbe03b8ae58e0925f826801343e1e3e3e0 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/admin/metadata_defs/views.py | HaManhDong/Custom-Horizon | 17513ebbe03b8ae58e0925f826801343e1e3e3e0 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/admin/metadata_defs/views.py | HaManhDong/Custom-Horizon | 17513ebbe03b8ae58e0925f826801343e1e3e3e0 | [
"Apache-2.0"
] | null | null | null | # (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard.api import glance
from openstack_dashboard.dashboards.admin.metadata_defs \
import constants
from openstack_dashboard.dashboards.admin.metadata_defs \
import forms as admin_forms
from openstack_dashboard.dashboards.admin.metadata_defs \
import tables as admin_tables
from openstack_dashboard.dashboards.admin.metadata_defs \
import tabs as admin_tabs
class AdminIndexView(tables.DataTableView):
table_class = admin_tables.AdminNamespacesTable
template_name = constants.METADATA_INDEX_TEMPLATE
page_title = _("Metadata Definitions")
def has_prev_data(self, table):
return self._prev
def has_more_data(self, table):
return self._more
def get_data(self):
namespaces = []
prev_marker = self.request.GET.get(
admin_tables.AdminNamespacesTable._meta.prev_pagination_param,
None)
if prev_marker is not None:
sort_dir = 'desc'
marker = prev_marker
else:
sort_dir = 'asc'
marker = self.request.GET.get(
admin_tables.AdminNamespacesTable._meta.pagination_param, None)
filters = self.get_filters()
try:
namespaces, self._more, self._prev =\
glance.metadefs_namespace_list(self.request,
marker=marker,
paginate=True,
sort_dir=sort_dir,
filters=filters)
if prev_marker is not None:
namespaces = sorted(namespaces,
key=lambda ns: getattr(ns, 'namespace'))
except Exception:
self._prev = False
self._more = False
msg = _('Error getting metadata definitions.')
exceptions.handle(self.request, msg)
return namespaces
class CreateView(forms.ModalFormView):
form_class = admin_forms.CreateNamespaceForm
template_name = constants.METADATA_CREATE_TEMPLATE
context_object_name = 'namespace'
success_url = reverse_lazy(constants.METADATA_INDEX_URL)
page_title = _("Create a Metadata Namespace")
submit_label = _("Import Namespace")
class DetailView(tabs.TabView):
redirect_url = constants.METADATA_INDEX_URL
tab_group_class = admin_tabs.NamespaceDetailTabs
template_name = constants.METADATA_DETAIL_TEMPLATE
page_title = "{{ namespace.namespace }}"
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["namespace"] = self.get_data()
return context
@memoized.memoized_method
def get_data(self):
try:
namespace = glance.metadefs_namespace_get(
self.request, self.kwargs['namespace_id'], wrap=True)
except Exception:
url = reverse_lazy(constants.METADATA_INDEX_URL)
exceptions.handle(self.request,
_('Unable to retrieve namespace details.'),
redirect=url)
else:
return namespace
def get_tabs(self, request, *args, **kwargs):
namespace = self.get_data()
return self.tab_group_class(request, namespace=namespace, **kwargs)
class ManageResourceTypes(forms.ModalFormView):
template_name = constants.METADATA_MANAGE_RESOURCES_TEMPLATE
form_class = admin_forms.ManageResourceTypesForm
success_url = reverse_lazy(constants.METADATA_INDEX_URL)
def get_initial(self):
try:
resource_types = glance.metadefs_namespace_resource_types(
self.request, self.kwargs["id"])
except Exception:
resource_types = []
msg = _('Error getting resource type associations.')
exceptions.handle(self.request, msg)
return {'id': self.kwargs["id"],
'resource_types': resource_types}
def get_context_data(self, **kwargs):
context = super(ManageResourceTypes, self).get_context_data(**kwargs)
selected_type_names = [selected_type['name'] for selected_type in
context['form'].initial['resource_types']]
try:
# Set the basic types that aren't already associated
result = [unselected_type for unselected_type in
glance.metadefs_resource_types_list(self.request)
if unselected_type['name'] not in selected_type_names]
except Exception:
result = []
msg = _('Error getting resource type associations.')
exceptions.handle(self.request, msg)
# Add the resource types previously associated, includes prefix, etc
for initial_type in context['form'].initial['resource_types']:
selected_type = initial_type.copy()
selected_type['selected'] = True
result.insert(0, selected_type)
context['id'] = self.kwargs['id']
try:
context["resource_types"] = json.dumps(result)
except Exception:
context["resource_types"] = "[]"
msg = _('Error getting resource type associations.')
exceptions.handle(self.request, msg)
return context
| 37.27381 | 79 | 0.644842 |
a5863fed9592e169eac2fb4669e095476abc9baf | 3,383 | py | Python | examples/basic_operations/update_ad_group.py | bjagadev17/google-ads-python | ee2c059498d5679a0d1d9011f3795324439fad7c | [
"Apache-2.0"
] | null | null | null | examples/basic_operations/update_ad_group.py | bjagadev17/google-ads-python | ee2c059498d5679a0d1d9011f3795324439fad7c | [
"Apache-2.0"
] | null | null | null | examples/basic_operations/update_ad_group.py | bjagadev17/google-ads-python | ee2c059498d5679a0d1d9011f3795324439fad7c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates an ad group.
To get ad groups, run get_ad_groups.py.
"""
import argparse
import sys
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
from google.api_core import protobuf_helpers
# [START update_ad_group]
def main(client, customer_id, ad_group_id, cpc_bid_micro_amount):
ad_group_service = client.get_service("AdGroupService")
# Create ad group operation.
ad_group_operation = client.get_type("AdGroupOperation")
ad_group = ad_group_operation.update
ad_group.resource_name = ad_group_service.ad_group_path(
customer_id, ad_group_id
)
ad_group.status = client.get_type("AdGroupStatusEnum").AdGroupStatus.PAUSED
ad_group.cpc_bid_micros = cpc_bid_micro_amount
client.copy_from(
ad_group_operation.update_mask,
protobuf_helpers.field_mask(None, ad_group._pb),
)
# Update the ad group.
ad_group_response = ad_group_service.mutate_ad_groups(
customer_id=customer_id, operations=[ad_group_operation]
)
print(f"Updated ad group {ad_group_response.results[0].resource_name}.")
# [END update_ad_group]
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v7")
parser = argparse.ArgumentParser(
description=(
"Updates an ad group for specified customer and campaign "
"id with the given bid micro amount."
)
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-a", "--ad_group_id", type=str, required=True, help="The ad group ID."
)
parser.add_argument(
"-b",
"--cpc_bid_micro_amount",
type=int,
required=True,
help="The cpc bid micro amount.",
)
args = parser.parse_args()
try:
main(
googleads_client,
args.customer_id,
args.ad_group_id,
args.cpc_bid_micro_amount,
)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f' Error with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
| 32.528846 | 79 | 0.681348 |
4f32f2101b129ed0d8e58a555355f71a8d9d0fc2 | 6,767 | py | Python | source/CE-Integration/Cleanup.py | tylangesmith/aws-cloudendure-migration-factory-solution | 54ca1740e79d749992356df91e19aa10831bdd65 | [
"MIT-0"
] | null | null | null | source/CE-Integration/Cleanup.py | tylangesmith/aws-cloudendure-migration-factory-solution | 54ca1740e79d749992356df91e19aa10831bdd65 | [
"MIT-0"
] | null | null | null | source/CE-Integration/Cleanup.py | tylangesmith/aws-cloudendure-migration-factory-solution | 54ca1740e79d749992356df91e19aa10831bdd65 | [
"MIT-0"
] | null | null | null | #########################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# SPDX-License-Identifier: MIT-0 #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of this #
# software and associated documentation files (the "Software"), to deal in the Software #
# without restriction, including without limitation the rights to use, copy, modify, #
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to #
# permit persons to whom the Software is furnished to do so. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, #
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A #
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT #
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION #
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE #
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #
#########################################################################################
from __future__ import print_function
import sys
import requests
import json
import os
import boto3
from boto3.dynamodb.conditions import Key, Attr
application = os.environ['application']
environment = os.environ['environment']
servers_table_name = '{}-{}-servers'.format(application, environment)
apps_table_name = '{}-{}-apps'.format(application, environment)
servers_table = boto3.resource('dynamodb').Table(servers_table_name)
apps_table = boto3.resource('dynamodb').Table(apps_table_name)
def remove(session, headers, endpoint, HOST, projectname, waveid):
cleanup = ""
cleanupfail = ""
serverlist = []
applist = []
# Get Projects
r = requests.get(HOST + endpoint.format('projects'), headers=headers, cookies=session)
if r.status_code != 200:
return "ERROR: Failed to fetch the project...."
try:
# Get Project ID
projects = json.loads(r.text)["items"]
project_exist = False
for project in projects:
if project["name"] == projectname:
project_id = project["id"]
project_exist = True
if project_exist == False:
return "ERROR: Project Name does not exist in CloudEndure...."
# Get all Apps and servers from migration factory
getserver = scan_dynamodb_server_table()
servers = sorted(getserver, key = lambda i: i['server_name'])
getapp = scan_dynamodb_app_table()
apps = sorted(getapp, key = lambda i: i['app_name'])
# Get App list
applist = []
for app in apps:
if 'wave_id' in app:
if str(app['wave_id']) == str(waveid) and str(app['cloudendure_projectname']) == str(projectname):
applist.append(app['app_id'])
# Get Server List
for app in applist:
for server in servers:
if app == server['app_id']:
serverlist.append(server)
if len(serverlist) == 0:
return "ERROR: Serverlist for wave " + waveid + " in Migration Factory is empty...."
except:
print(sys.exc_info())
sys.exit(6)
m = requests.get(HOST + endpoint.format('projects/{}/machines').format(project_id), headers=headers, cookies=session)
machine_status = 0
for server in serverlist:
machine_exist = False
for machine in json.loads(m.text)["items"]:
if server["server_name"].lower() == machine['sourceProperties']['name'].lower():
machine_exist = True
if 'lastCutoverDateTime' in machine["lifeCycle"]:
machine_data = {'machineIDs': [machine['id']]}
remove = requests.delete(HOST + endpoint.format('projects/{}/machines').format(project_id), data = json.dumps(machine_data), headers=headers, cookies=session)
if remove.status_code == 204:
print("Machine: " + machine['sourceProperties']['name'] + " has been removed from CloudEndure....")
cleanup = cleanup + server["server_name"] + ","
machine_status += 1
else:
return "ERROR: Machine: " + machine['sourceProperties']['name'] + " cleanup failed...."
else:
cleanupfail = cleanupfail + server["server_name"] + ","
if machine_exist == False:
return "ERROR: Machine: " + server["server_name"] + " does not exist in CloudEndure...."
if len(cleanup) > 0 and len(cleanupfail) == 0:
cleanup = cleanup[:-1]
return "Server: " + cleanup + " have been removed from CloudEndure...."
if len(cleanup) == 0 and len(cleanupfail) > 0:
cleanupfail = cleanupfail[:-1]
return "ERROR: Machine: " + cleanupfail + " has not been migrated to PROD environment...."
if len(cleanup) > 0 and len(cleanupfail) > 0:
cleanup = cleanup[:-1]
cleanupfail = cleanupfail[:-1]
return "Server: " + cleanup + " have been removed from CloudEndure.... | " + "ERROR: Machine: " + cleanupfail + " has not been migrated to PROD environment, please wait for 15 mins...."
# Pagination for server DDB table scan
def scan_dynamodb_server_table():
response = servers_table.scan(ConsistentRead=True)
scan_data = response['Items']
while 'LastEvaluatedKey' in response:
print("Last Evaluate key for server is " + str(response['LastEvaluatedKey']))
response = servers_table.scan(ExclusiveStartKey=response['LastEvaluatedKey'],ConsistentRead=True)
scan_data.extend(response['Items'])
return(scan_data)
# Pagination for app DDB table scan
def scan_dynamodb_app_table():
response = apps_table.scan(ConsistentRead=True)
scan_data = response['Items']
while 'LastEvaluatedKey' in response:
print("Last Evaluate key for app is " + str(response['LastEvaluatedKey']))
response = apps_table.scan(ExclusiveStartKey=response['LastEvaluatedKey'],ConsistentRead=True)
scan_data.extend(response['Items'])
return(scan_data) | 51.656489 | 193 | 0.58076 |
73a2ee6383a85f41bbd5547c174e2a7be93e84a6 | 132,585 | py | Python | tensorflow/python/training/saver_test.py | imdone/tensorflow | bb4d1ef3861c83627ee9586b85ac3070a7d38335 | [
"Apache-2.0"
] | 1 | 2021-04-16T14:53:22.000Z | 2021-04-16T14:53:22.000Z | tensorflow/python/training/saver_test.py | imdone/tensorflow | bb4d1ef3861c83627ee9586b85ac3070a7d38335 | [
"Apache-2.0"
] | 10 | 2018-02-04T18:41:52.000Z | 2018-05-02T09:00:46.000Z | tensorflow/python/training/saver_test.py | imdone/tensorflow | bb4d1ef3861c83627ee9586b85ac3070a7d38335 | [
"Apache-2.0"
] | 4 | 2018-01-17T14:22:49.000Z | 2018-02-27T15:06:41.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import math
import os
import random
import shutil
import tempfile
import time
import numpy as np
import six
from google.protobuf.any_pb2 import Any
from google.protobuf import text_format
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import queue_runner_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import test_util
from tensorflow.python.keras._impl.keras.engine import training
from tensorflow.python.keras._impl.keras.layers import core
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import adam
from tensorflow.python.training import checkpointable
from tensorflow.python.training import checkpointable_utils
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import saver as saver_module
from tensorflow.python.training import saver_test_utils
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.python.util import compat
@test_util.with_c_api
class SaverTest(test.TestCase):
def basicSaveRestore(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "basic_save_restore")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
# Initialize all variables
if not context.executing_eagerly():
self.evaluate([variables.global_variables_initializer(), v2_init])
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Save the initialized values in the file at "save_path"
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"v2": v2.saveable
}, restore_sequentially=True)
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Assert that the variables are not initialized.
if not context.executing_eagerly():
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0_2 = variable_op(1000.0, name="v0")
v1_2 = variable_op(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2_2.insert("k1000", 3000.0)
# Check that the parameter nodes have been initialized.
if not context.executing_eagerly():
init_all_op = [variables.global_variables_initializer(), v2_init]
self.evaluate(init_all_op)
# TODO (xpan): Why _mutable_hash_table_v2 doesn't create empty id:3477
# https://github.com/imdone/tensorflow/issues/3476
# table as it claims in eager mode?
self.assertEqual(b"k1000", self.evaluate(v2_2.keys()))
self.assertEqual(3000.0, self.evaluate(v2_2.values()))
self.assertEqual(1000.0, self.evaluate(v0_2))
self.assertEqual(2000.0, self.evaluate(v1_2))
# Restore the values saved earlier in the parameter nodes.
save2 = saver_module.Saver({"v0": v0_2, "v1": v1_2, "v2": v2_2.saveable})
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0_2))
self.assertEqual(20.0, self.evaluate(v1_2))
self.assertEqual(b"k1", self.evaluate(v2_2.keys()))
self.assertEqual(30.0, self.evaluate(v2_2.values()))
def testBasic(self):
self.basicSaveRestore(variables.Variable)
@test_util.run_in_graph_and_eager_modes()
def testResourceBasic(self):
self.basicSaveRestore(resource_variable_ops.ResourceVariable)
def testResourceVariableReadOpsAddedDeterministically(self):
graph_defs = []
num_graphs = 10
for _ in range(num_graphs):
with ops_lib.Graph().as_default() as g:
for i in range(20):
resource_variable_ops.ResourceVariable(i, name="var%s" % i)
saver_module.Saver()
graph_defs.append(g.as_graph_def())
for i in range(num_graphs - 1):
self.assertEqual(graph_defs[i], graph_defs[i + 1])
def testEagerBasic(self):
with context.eager_mode():
ckpt_prefix = os.path.join(self.get_temp_dir(), "ckpt")
v1 = resource_variable_ops.ResourceVariable(3.14, name="v1")
v2 = resource_variable_ops.ResourceVariable([1, 2], name="v2")
save = saver_module.Saver([v1, v2])
save.save(None, ckpt_prefix)
v1.assign(0.0)
v2.assign([0, 0])
self.assertNear(0.0, self.evaluate(v1), 1e-5)
self.assertAllEqual([0, 0], self.evaluate(v2))
save.restore(None, ckpt_prefix)
self.assertNear(3.14, self.evaluate(v1), 1e-5)
self.assertAllEqual([1, 2], self.evaluate(v2))
def testEagerGraphCompatibility(self):
# Save from graph mode and restore from eager mode.
graph_ckpt_prefix = os.path.join(self.get_temp_dir(), "graph_ckpt")
with context.graph_mode():
with self.test_session(graph=ops_lib.Graph()) as sess:
# Create a graph model and save the checkpoint.
w1 = resource_variable_ops.ResourceVariable(1.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(2.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
sess.run(variables.global_variables_initializer())
graph_saver.save(sess, graph_ckpt_prefix)
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w1 = resource_variable_ops.ResourceVariable(0.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(0.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
graph_saver.restore(None, graph_ckpt_prefix)
self.assertAllEqual(self.evaluate(w1), 1.0)
self.assertAllEqual(self.evaluate(w2), 2.0)
# Save from eager mode and restore from graph mode.
eager_ckpt_prefix = os.path.join(self.get_temp_dir(), "eager_ckpt")
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w3 = resource_variable_ops.ResourceVariable(3.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(4.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
graph_saver.save(None, eager_ckpt_prefix)
with context.graph_mode():
with self.test_session(graph=ops_lib.Graph()) as sess:
w3 = resource_variable_ops.ResourceVariable(0.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(0.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
sess.run(variables.global_variables_initializer())
graph_saver.restore(sess, eager_ckpt_prefix)
self.assertAllEqual(w3.eval(), 3.0)
self.assertAllEqual(w4.eval(), 4.0)
@test_util.run_in_graph_and_eager_modes()
def testResourceSaveRestoreCachingDevice(self):
save_path = os.path.join(self.get_temp_dir(), "resource_cache")
with self.test_session(graph=ops_lib.Graph()) as sess:
v = resource_variable_ops.ResourceVariable([1], caching_device="/cpu:0",
name="v")
if context.executing_eagerly():
sess = None
else:
self.evaluate(variables.global_variables_initializer())
save = saver_module.Saver([v])
save.save(sess, save_path)
save2 = saver_module.Saver([v])
save2.restore(sess, save_path)
self.assertEquals(self.evaluate(v), [1])
def testNoAdditionalOpsAddedBySaverForResourceVariablesOutsideSaveScope(self):
with ops_lib.Graph().as_default() as g:
v = resource_variable_ops.ResourceVariable(1.0, name="v")
with ops_lib.name_scope("saver1"):
saver_module.Saver()
with ops_lib.name_scope("saver2"):
saver_module.Saver({"name": v})
ops_in_saver1_scope_but_not_save_scope = [
op for op in g.get_operations()
if (op.name.startswith("saver1/") and
not op.name.startswith("saver1/save/"))]
self.assertEqual(ops_in_saver1_scope_but_not_save_scope, [])
ops_in_saver2_scope_but_not_save_scope = [
op for op in g.get_operations()
if (op.name.startswith("saver2/") and
not op.name.startswith("saver2/save/"))]
self.assertEqual(ops_in_saver2_scope_but_not_save_scope, [])
def testSaveCopyRestoreWithSaveRelativePaths(self):
"""Save, copy checkpoint dir and restore from copied dir.
This only works for save_relative_paths=True.
"""
save_dir1 = os.path.join(self.get_temp_dir(), "save_dir1")
os.mkdir(save_dir1)
save_path1 = os.path.join(save_dir1, "save_copy_restore")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver(
var_list={
"v0": v0,
"v1": v1,
"v2": v2.saveable},
restore_sequentially=True,
save_relative_paths=True)
init_all_op = [variables.global_variables_initializer(), v2_init]
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path1)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path1, val)
self.assertEqual(saver_module.latest_checkpoint(save_dir1), save_path1)
save_dir2 = os.path.join(self.get_temp_dir(), "save_dir2")
os.renames(save_dir1, save_dir2)
save_path2 = os.path.join(save_dir2, "save_copy_restore")
self.assertEqual(saver_module.latest_checkpoint(save_dir2), save_path2)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.test_session() as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
# Assert that the variables are not initialized.
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path2)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
def testFilenameTensor(self):
v0 = variables.Variable(0, name="v0")
filename = b"somerandomfilename"
save = saver_module.Saver({"v0": v0}, filename=filename)
with self.test_session() as sess:
tensor = sess.graph.get_tensor_by_name(
save.saver_def.filename_tensor_name)
self.assertEqual(sess.run(tensor), filename)
def testInvalidPath(self):
v0 = variables.Variable(0, name="v0")
for ver in (saver_pb2.SaverDef.V1, saver_pb2.SaverDef.V2):
with self.test_session() as sess:
save = saver_module.Saver({"v0": v0}, write_version=ver)
with self.assertRaisesRegexp(errors.NotFoundError,
"Failed to find any matching files for"):
save.restore(sess, "invalid path")
def testInt64(self):
save_path = os.path.join(self.get_temp_dir(), "int64")
with self.test_session() as sess:
# Build a graph with 1 node, and save and restore for them.
v = variables.Variable(np.int64(15), name="v")
save = saver_module.Saver({"v": v}, restore_sequentially=True)
variables.global_variables_initializer().run()
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session() as sess:
v = variables.Variable(np.int64(-1), name="v")
save = saver_module.Saver({"v": v})
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v" in e.message):
sess.run(v)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(np.int64(15), v.eval())
def testSomeErrors(self):
with ops_lib.Graph().as_default():
v0 = variables.Variable([10.0], name="v0")
v1 = variables.Variable([20.0], name="v1")
v2 = variables.Variable([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# By default the name used for "v2" will be "v1" and raise an error.
with self.assertRaisesRegexp(ValueError, "same name: v1"):
saver_module.Saver([v0, v1, v2])
# The names are different and will work.
saver_module.Saver({"vee1": v1, "other": [v2]})
# Partitioned variables also cause name conflicts.
p_v1 = variable_scope.get_variable(
"p_v1",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2 = variable_scope.get_variable(
"p_v2",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2._name = "p_v1"
with self.assertRaisesRegexp(ValueError, "same name: p_v1"):
saver_module.Saver([p_v1, p_v2])
def testSameName(self):
with ops_lib.Graph().as_default():
v0 = variables.Variable([10.0], name="v0")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saving one variable under two names raises an error.
with self.assertRaisesRegexp(
ValueError, "The same saveable will be restored with two names: v0"):
saver_module.Saver({"v0": v0, "v0too": v0})
# Ditto for custom saveables.
with self.assertRaisesRegexp(
ValueError, "The same saveable will be restored with two names: v2"):
saver_module.Saver({"v2": v2.saveable, "v2too": v2.saveable})
# Verify non-duplicate names work.
saver_module.Saver({"v0": v0, "v2": v2.saveable})
def testBasicsWithListOfVariables(self):
save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver([v0, v1, v2.saveable])
variables.global_variables_initializer().run()
v2_init.run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the variables
# have not been initialized either.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver([v0, v1, v2.saveable])
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v0" in e.message):
sess.run(v0)
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0_2 = variables.Variable(1000.0, name="v0")
v1_2 = variables.Variable(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
save2 = saver_module.Saver([v0_2, v1_2, v2_2.saveable])
v2_2.insert("k1000", 3000.0).run()
variables.global_variables_initializer().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
self.assertEqual(b"k1000", v2_2.keys().eval())
self.assertEqual(3000.0, v2_2.values().eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0_2.eval())
self.assertEqual(20.0, v1_2.eval())
self.assertEqual(b"k1", v2_2.keys().eval())
self.assertEqual(30.0, v2_2.values().eval())
def _SaveAndLoad(self, var_name, var_value, other_value, save_path):
with self.test_session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(var_value, name=var_name)
save = saver_module.Saver({var_name: var})
if not context.executing_eagerly():
self.evaluate(var.initializer)
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
with self.test_session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(other_value, name=var_name)
save = saver_module.Saver({var_name: var})
save.restore(sess, save_path)
self.assertAllClose(var_value, self.evaluate(var))
def testCacheRereadsFile(self):
save_path = os.path.join(self.get_temp_dir(), "cache_rereads")
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
# Save and reload one Variable named "var1" in the same file.
# The cached readers should know to re-read the file.
self._SaveAndLoad("var1", 1.1, 2.2, save_path)
def testAllowEmpty(self):
save_path = os.path.join(self.get_temp_dir(), "allow_empty")
with self.test_session() as sess:
_ = constant_op.constant(1)
save = saver_module.Saver(allow_empty=True)
val = save.save(sess, save_path)
self.assertIsNone(val)
with self.test_session() as sess:
save = saver_module.Saver(allow_empty=True)
save.restore(sess, save_path)
def testGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.Variable(123.45)
save = saver_module.Saver({"v0": v0_1})
variables.global_variables_initializer().run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.Variable(543.21)
save = saver_module.Saver({"v0": v0_2})
variables.global_variables_initializer().run()
def testSharedServerOnGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.Variable(123.45)
save = saver_module.Saver({"v0": v0_1}, sharded=True, allow_empty=True)
variables.global_variables_initializer().run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.Variable(543.21)
save = saver_module.Saver({"v0": v0_2}, sharded=True, allow_empty=True)
variables.global_variables_initializer().run()
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(1.0)
twos = variables.Variable([2.0, 2.0, 2.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
v2.insert("k1", 3.0).run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(0.0)
twos = variables.Variable([0.0, 0.0, 0.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(3.0, v2.values().eval())
def testVarListShouldBeEmptyInDeferredBuild(self):
with ops_lib.Graph().as_default():
v = variables.Variable(1.0)
with self.assertRaisesRegexp(ValueError, "defer_build"):
saver_module.Saver([v], defer_build=True)
def testBuildShouldBeCalledBeforeSaveInCaseOfDeferBuild(self):
save_path = os.path.join(self.get_temp_dir(), "error_deferred_build")
with ops_lib.Graph().as_default(), session.Session() as sess:
variables.Variable(1.0)
saver = saver_module.Saver(defer_build=True)
with self.assertRaisesRegexp(RuntimeError, "build"):
saver.save(sess, save_path)
def testDeferredBuild(self):
save_path = os.path.join(self.get_temp_dir(), "deferred_build")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(1.0)
save = saver_module.Saver(defer_build=True)
# if build is not deferred, saver cannot save the `twos`.
twos = variables.Variable([2.0, 2.0, 2.0])
init = variables.global_variables_initializer()
save.build()
init.run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(0.0)
twos = variables.Variable([0.0, 0.0, 0.0])
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
def testReshape(self):
save_path = os.path.join(self.get_temp_dir(), "variables_reshape")
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
save.save(sess, save_path)
# Error when restoring with default reshape=False
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Assign requires shapes of both tensors to match."):
save.restore(sess, save_path)
# Restored to new shape with reshape=True
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver(reshape=True)
save.restore(sess, save_path)
self.assertAllClose([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], var.eval())
@test_util.run_in_graph_and_eager_modes()
def testSaveWithGlobalStep(self, pad_step_number=False):
save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step")
global_step_int = 5
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
with self.test_session(graph=ops_lib.Graph()):
var = resource_variable_ops.ResourceVariable(1.0, name="var0")
save = saver_module.Saver(
{
var._shared_name: var
}, pad_step_number=pad_step_number)
if context.executing_eagerly():
sess = None
else:
self.evaluate(var.initializer)
sess = ops_lib.get_default_session()
if use_tensor:
global_step = constant_op.constant(global_step_int)
val = save.save(sess, save_path, global_step=global_step)
else:
val = save.save(sess, save_path, global_step=global_step_int)
if pad_step_number:
expected_save_path = "%s-%s" % (save_path,
"{:08d}".format(global_step_int))
else:
expected_save_path = "%s-%d" % (save_path, global_step_int)
self.assertEqual(expected_save_path, val)
def testSaveWithGlobalStepWithPadding(self):
self.testSaveWithGlobalStep(pad_step_number=True)
def testSaveToNonexistingPath(self):
file_io.write_string_to_file(
os.path.join(self.get_temp_dir(), "actually_a_file"), "")
paths = [
os.path.join(self.get_temp_dir(), "nonexisting_dir/path"),
os.path.join(self.get_temp_dir(), "other_nonexisting_dir/path1/path2"),
os.path.join(self.get_temp_dir(), "actually_a_file/path"),
]
for save_path in paths:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
# In the case where the parent directory doesn't exist, whether or not the
# save succeeds or fails is implementation dependent. Therefore we allow
# both cases.
try:
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the graph.
save.save(sess, save_path)
with self.test_session() as sess:
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
except ValueError as exc:
error_msg_template = "Parent directory of {} doesn't exist, can't save."
self.assertEqual(error_msg_template.format(save_path), str(exc))
def testSaveToURI(self):
# ParseURI functions don't work on Windows yet.
# TODO (jhseu): Remove this check when it works. id:3959
# https://github.com/imdone/tensorflow/issues/3957
if os.name == "nt":
self.skipTest("Local URI support doesn't work on Windows")
save_path = "file://" + os.path.join(self.get_temp_dir(), "uri")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
save.save(sess, save_path)
@test_util.with_c_api
class SaveRestoreShardedTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
# Build a graph with 2 parameter nodes on different devices.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(10, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(20, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k1", 30.0).run()
t1.insert("k2", 40.0).run()
val = save.save(sess, save_path)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(save_path + "-?????-of-00002", val)
else:
self.assertEqual(save_path, val)
meta_graph_filename = save._MetaGraphFilename(val)
self.assertEqual(save_path + ".meta", meta_graph_filename)
if save._write_version is saver_pb2.SaverDef.V1:
# Restore different ops from shard 0 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
save = saver_module.Saver(
{
"v0": v0,
"t0": t0.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k11", 33.0).run()
self.assertEqual(111, v0.eval())
self.assertEqual(b"k11", t0.keys().eval())
self.assertEqual(33.0, t0.values().eval())
save.restore(sess, save_path + "-00000-of-00002")
self.assertEqual(10, v0.eval())
self.assertEqual(b"k1", t0.keys().eval())
self.assertEqual(30.0, t0.values().eval())
# Restore different ops from shard 1 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variables.Variable(222)
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v1": v1,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t1.insert("k22", 44.0).run()
self.assertEqual(222, v1.eval())
self.assertEqual(b"k22", t1.keys().eval())
self.assertEqual(44.0, t1.values().eval())
save.restore(sess, save_path + "-00001-of-00002")
self.assertEqual(20, v1.eval())
self.assertEqual(b"k2", t1.keys().eval())
self.assertEqual(40.0, t1.values().eval())
# Now try a restore with the sharded filename.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(222, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k11", 33.0).run()
t1.insert("k22", 44.0).run()
self.assertEqual(111, v0.eval())
self.assertEqual(222, v1.eval())
self.assertEqual(b"k11", t0.keys().eval())
self.assertEqual(33.0, t0.values().eval())
self.assertEqual(b"k22", t1.keys().eval())
self.assertEqual(44.0, t1.values().eval())
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
if save._write_version is saver_pb2.SaverDef.V1:
save.restore(sess, save_path + "-?????-of-?????")
else:
save.restore(sess, save_path)
self.assertEqual(10, v0.eval())
self.assertEqual(20, v1.eval())
self.assertEqual(b"k1", t0.keys().eval())
self.assertEqual(30.0, t0.values().eval())
self.assertEqual(b"k2", t1.keys().eval())
self.assertEqual(40.0, t1.values().eval())
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(
saver_module.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics-?????-of-00002"))
else:
self.assertEqual(
saver_module.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics"))
def testSaverDef(self):
with self.test_session():
v0 = variables.Variable(123, name="v0")
save = saver_module.Saver({"v0": v0}, sharded=True)
sd = save.as_saver_def()
self.assertTrue(sd.sharded)
def _testPartitionedVariables(self, use_resource):
var_full_shape = [10, 3]
# Allows save/restore mechanism to work w/ different slicings.
var_name = "my_var"
saved_dir = self._get_test_dir("partitioned_variables")
saved_path = os.path.join(saved_dir, "ckpt")
call_saver_with_dict = False # updated by test loop below
def _save(slices=None, partitioner=None):
with self.test_session(graph=ops_lib.Graph()) as sess:
# Calls .eval() to return the ndarray that makes up the full variable.
rnd = random_ops.random_uniform(var_full_shape).eval()
if slices:
assert not partitioner
# TODO (apassos): make create_partitioned_variables take use_resource id:4333
# https://github.com/imdone/tensorflow/issues/4331
# option to make this test passable without creating a named
# variable_scope.
vs = partitioned_variables.create_partitioned_variables(
var_full_shape, slices, rnd, name=var_name)
elif partitioner:
vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=rnd,
partitioner=partitioner,
use_resource=use_resource)
]
else:
if use_resource:
vs = [resource_variable_ops.ResourceVariable(rnd, name=var_name)]
else:
vs = [variables.Variable(rnd, name=var_name)]
variables.global_variables_initializer().run()
if call_saver_with_dict:
saver = saver_module.Saver({var_name: (vs if slices else vs[0])})
else:
saver = saver_module.Saver(vs)
actual_path = saver.save(sess, saved_path)
self.assertEqual(saved_path, actual_path)
return rnd
def _restore(slices=None, partitioner=None):
with self.test_session(graph=ops_lib.Graph()) as sess:
if slices:
assert not partitioner
new_vs = partitioned_variables.create_partitioned_variables(
var_full_shape,
slices,
array_ops.zeros(var_full_shape), # != original contents.
name=var_name)
elif partitioner:
new_vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=array_ops.zeros(var_full_shape),
partitioner=partitioner)
]
else:
new_vs = [
variables.Variable(
array_ops.zeros(
shape=var_full_shape), # != original contents.
name=var_name)
]
variables.global_variables_initializer().run()
if call_saver_with_dict:
saver = saver_module.Saver({
var_name: (new_vs if slices else new_vs[0])
})
else:
saver = saver_module.Saver(new_vs)
saver.restore(sess, saved_path)
if partitioner:
return new_vs[0].as_tensor().eval()
elif slices and slices[0] != 1:
return array_ops.concat(new_vs, 0).eval()
elif slices and slices[1] != 1:
return array_ops.concat(new_vs, 1).eval()
else: # Non-sliced.
return new_vs[0].eval()
for call_saver_with_dict in {False, True}:
# Save PartitionedVariable and restore into full variable.
saved_full = _save(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
restored_full = _restore()
self.assertAllEqual(saved_full, restored_full)
# Saves 10 horizontal parts of a partitioned variable.
# Restores into a full variable, non-sliced.
saved_full = _save(slices=[10, 1])
restored_full = _restore()
self.assertAllEqual(saved_full, restored_full)
# Restores into a different number/orientation of slices.
restored_full = _restore(slices=[2, 1]) # 2 horizon parts.
self.assertAllEqual(saved_full, restored_full)
restored_full = _restore(slices=[1, 3]) # 3 vertical parts.
self.assertAllEqual(saved_full, restored_full)
# Restores into a PartitionedVariable
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
self.assertAllEqual(saved_full, restored_full)
# Now, saves a full variable and restores in slices.
saved_full = _save()
restored_full = _restore(slices=[1, 3])
self.assertAllEqual(saved_full, restored_full)
def testPartitionedVariable(self):
self._testPartitionedVariables(use_resource=False)
def testPartitionedResourceVariable(self):
self._testPartitionedVariables(use_resource=True)
@test_util.with_c_api
class SaveRestoreShardedTestV2(SaveRestoreShardedTest):
_WRITE_VERSION = saver_pb2.SaverDef.V2
@test_util.with_c_api
class MaxToKeepTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def assertCheckpointState(self, model_checkpoint_path,
all_model_checkpoint_paths, save_dir):
checkpoint_state = saver_module.get_checkpoint_state(save_dir)
self.assertEqual(checkpoint_state.model_checkpoint_path,
model_checkpoint_path)
self.assertEqual(checkpoint_state.all_model_checkpoint_paths,
all_model_checkpoint_paths)
def testMaxToKeepEager(self):
with context.eager_mode():
save_dir = self._get_test_dir("max_to_keep_non_sharded")
v = variable_scope.variable(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=2)
self.evaluate(variables.global_variables_initializer())
if not context.executing_eagerly():
self.assertEqual([], save.last_checkpoints)
s1 = save.save(None, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s1],
save_dir=save_dir)
s2 = save.save(None, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s1, s2],
save_dir=save_dir)
s3 = save.save(None, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s2, s3],
save_dir=save_dir)
# Create a second helper, identical to the first.
save2 = saver_module.Saver({"v": v}, max_to_keep=2)
save2.set_last_checkpoints(save.last_checkpoints)
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(None, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(None, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
s2 = save2.save(None, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(saver_module.checkpoint_exists(s1))
# Deleted by the first helper.
self.assertFalse(saver_module.checkpoint_exists(s3))
def testNonSharded(self):
save_dir = self._get_test_dir("max_to_keep_non_sharded")
with self.test_session() as sess:
v = variables.Variable(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=2)
variables.global_variables_initializer().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s1],
save_dir=save_dir)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s1, s2],
save_dir=save_dir)
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s2, s3],
save_dir=save_dir)
# Create a second helper, identical to the first.
save2 = saver_module.Saver(saver_def=save.as_saver_def())
save2.set_last_checkpoints(save.last_checkpoints)
# Create a third helper, with the same configuration but no knowledge of
# previous checkpoints.
save3 = saver_module.Saver(saver_def=save.as_saver_def())
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s1))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the second helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save2.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save2.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save2.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the third helper.
# Adding s2 again (but helper is unaware of previous s2)
s2 = save3.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s2], save3.last_checkpoints)
# Created by the first helper.
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
# Even though the file for s1 exists, this saver isn't aware of it, which
# is why it doesn't end up in the checkpoint state.
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s2],
save_dir=save_dir)
# Adding s1 (s3 should not be deleted because helper is unaware of it)
s1 = save3.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save3.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
def testSharded(self):
save_dir = self._get_test_dir("max_to_keep_sharded")
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(222, name="v1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, sharded=True, max_to_keep=2)
variables.global_variables_initializer().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertEqual(0, len(gfile.Glob(s1 + "*")))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s3)))
else:
self.assertEqual(4, len(gfile.Glob(s3 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3)))
def testNoMaxToKeep(self):
save_dir = self._get_test_dir("no_max_to_keep")
save_dir2 = self._get_test_dir("max_to_keep_0")
with self.test_session() as sess:
v = variables.Variable(10.0, name="v")
variables.global_variables_initializer().run()
# Test max_to_keep being None.
save = saver_module.Saver({"v": v}, max_to_keep=None)
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s2))
# Test max_to_keep being 0.
save2 = saver_module.Saver({"v": v}, max_to_keep=0)
self.assertEqual([], save2.last_checkpoints)
s1 = save2.save(sess, os.path.join(save_dir2, "s1"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
s2 = save2.save(sess, os.path.join(save_dir2, "s2"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s2))
def testNoMetaGraph(self):
save_dir = self._get_test_dir("no_meta_graph")
with self.test_session() as sess:
v = variables.Variable(10.0, name="v")
save = saver_module.Saver({"v": v})
variables.global_variables_initializer().run()
s1 = save.save(sess, os.path.join(save_dir, "s1"), write_meta_graph=False)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
@test_util.with_c_api
class KeepCheckpointEveryNHoursTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
@test_util.run_in_graph_and_eager_modes()
@test.mock.patch.object(saver_module, "time")
def testNonSharded(self, mock_time):
save_dir = self._get_test_dir("keep_checkpoint_every_n_hours")
with self.test_session() as sess:
v = variable_scope.variable([10.0], name="v")
# Run the initializer NOW to avoid the 0.5s overhead of the first Run()
# call, which throws the test timing off in fastbuild mode.
self.evaluate(variables.global_variables_initializer())
# Create a saver that will keep the last 2 checkpoints plus one every 0.7
# seconds.
start_time = time.time()
mock_time.time.return_value = start_time
save = saver_module.Saver(
{
"v": v
}, max_to_keep=2, keep_checkpoint_every_n_hours=0.7 / 3600)
self.assertEqual([], save.last_checkpoints)
# Wait till 1 seconds have elapsed so s1 will be old enough to keep.
# sleep may return early, don't trust it.
mock_time.time.return_value = start_time + 1.0
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
# We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(),
# would normally delete s1, because max_to_keep is 2. However, s1 is
# older than 0.7s so we must keep it.
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
# s1 should still be here, we are Not checking now to reduce time
# variance in the test.
# We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next
# call to Save(), will delete s2, because max_to_keep is 2, and because
# we already kept the old s1. s2 is very close in time to s1 so it gets
# deleted.
s4 = save.save(sess, os.path.join(save_dir, "s4"))
self.assertEqual([s3, s4], save.last_checkpoints)
# Check that s1 is still here, but s2 is gone.
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertFalse(saver_module.checkpoint_exists(s2))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertTrue(saver_module.checkpoint_exists(s4))
@test_util.with_c_api
class SaveRestoreWithVariableNameMap(test.TestCase):
def _testNonReshape(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "non_reshape")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
self.evaluate(variables.global_variables_initializer())
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Save the initialized values in the file at "save_path"
# Use a variable name map to set the saved tensor names
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Verify that the original names are not in the Saved file
save = saver_module.Saver({"v0": v0, "v1": v1})
with self.assertRaisesOpError("not found in checkpoint"):
save.restore(sess, save_path)
# Verify that the mapped names are present in the Saved file and can be
# Restored using remapped names.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
if not context.executing_eagerly():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
if not context.executing_eagerly():
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Add a prefix to the node names in the current graph and Restore using
# remapped names.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="restore_prefix/v0")
v1 = variable_op(-1.0, name="restore_prefix/v1")
if not context.executing_eagerly():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
@test_util.run_in_graph_and_eager_modes()
def testNonReshapeResourceVariable(self):
self._testNonReshape(resource_variable_ops.ResourceVariable)
def testNonReshapeVariable(self):
self._testNonReshape(variables.Variable)
@test_util.with_c_api
class LatestCheckpointWithRelativePaths(test.TestCase):
@staticmethod
@contextlib.contextmanager
def tempWorkingDir(temppath):
cwd = os.getcwd()
os.chdir(temppath)
try:
yield
finally:
os.chdir(cwd)
@staticmethod
@contextlib.contextmanager
def tempDir():
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
def testNameCollision(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train/"
os.mkdir(traindir)
# Collides with the default name of the checkpoint state file.
filepath = os.path.join(traindir, "checkpoint")
with self.test_session() as sess:
unused_a = variables.Variable(0.0) # So that Saver saves something.
variables.global_variables_initializer().run()
# Should fail.
saver = saver_module.Saver(sharded=False)
with self.assertRaisesRegexp(ValueError, "collides with"):
saver.save(sess, filepath)
# Succeeds: the file will be named "checkpoint-<step>".
saver.save(sess, filepath, global_step=1)
self.assertIsNotNone(saver_module.latest_checkpoint(traindir))
# Succeeds: the file will be named "checkpoint-<i>-of-<n>".
saver = saver_module.Saver(sharded=True)
saver.save(sess, filepath)
self.assertIsNotNone(saver_module.latest_checkpoint(traindir))
# Succeeds: the file will be named "checkpoint-<step>-<i>-of-<n>".
saver = saver_module.Saver(sharded=True)
saver.save(sess, filepath, global_step=1)
self.assertIsNotNone(saver_module.latest_checkpoint(traindir))
def testRelativePath(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train/"
os.mkdir(traindir)
filename = "snapshot"
filepath = os.path.join(traindir, filename)
with self.test_session() as sess:
# Build a simple graph.
v0 = variables.Variable(0.0)
inc = v0.assign_add(1.0)
save = saver_module.Saver({"v0": v0})
# Record a short training history.
variables.global_variables_initializer().run()
save.save(sess, filepath, global_step=0)
inc.eval()
save.save(sess, filepath, global_step=1)
inc.eval()
save.save(sess, filepath, global_step=2)
with self.test_session() as sess:
# Build a new graph with different initialization.
v0 = variables.Variable(-1.0)
# Create a new saver.
save = saver_module.Saver({"v0": v0})
variables.global_variables_initializer().run()
# Get the most recent checkpoint name from the training history file.
name = saver_module.latest_checkpoint(traindir)
self.assertIsNotNone(name)
# Restore "v0" from that checkpoint.
save.restore(sess, name)
self.assertEqual(v0.eval(), 2.0)
@test_util.with_c_api
class CheckpointStateTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testAbsPath(self):
save_dir = self._get_test_dir("abs_paths")
abs_path = os.path.join(save_dir, "model-0")
ckpt = saver_module.generate_checkpoint_state_proto(save_dir, abs_path)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testRelPath(self):
train_dir = "train"
model = os.path.join(train_dir, "model-0")
# model_checkpoint_path should have no "train" directory part.
new_rel_path = "model-0"
ckpt = saver_module.generate_checkpoint_state_proto(train_dir, model)
self.assertEqual(ckpt.model_checkpoint_path, new_rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], new_rel_path)
def testAllModelCheckpointPaths(self):
save_dir = self._get_test_dir("all_models_test")
abs_path = os.path.join(save_dir, "model-0")
for paths in [None, [], ["model-2"]]:
ckpt = saver_module.generate_checkpoint_state_proto(
save_dir, abs_path, all_model_checkpoint_paths=paths)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(
len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testUpdateCheckpointState(self):
save_dir = self._get_test_dir("update_checkpoint_state")
os.chdir(save_dir)
# Make a temporary train directory.
train_dir = "train"
os.mkdir(train_dir)
abs_path = os.path.join(save_dir, "model-0")
rel_path = os.path.join("train", "model-2")
saver_module.update_checkpoint_state(
train_dir, rel_path, all_model_checkpoint_paths=[abs_path, rel_path])
ckpt = saver_module.get_checkpoint_state(train_dir)
self.assertEqual(ckpt.model_checkpoint_path, rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path)
def testUpdateCheckpointStateSaveRelativePaths(self):
save_dir = self._get_test_dir("update_checkpoint_state")
os.chdir(save_dir)
abs_path2 = os.path.join(save_dir, "model-2")
rel_path2 = "model-2"
abs_path0 = os.path.join(save_dir, "model-0")
rel_path0 = "model-0"
saver_module._update_checkpoint_state( # pylint: disable=protected-access
save_dir=save_dir,
model_checkpoint_path=abs_path2,
all_model_checkpoint_paths=[rel_path0, abs_path2],
save_relative_paths=True)
# File should contain relative paths.
file_content = file_io.read_file_to_string(
os.path.join(save_dir, "checkpoint"))
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, rel_path2)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path2)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], rel_path0)
# get_checkpoint_state should return absolute paths.
ckpt = saver_module.get_checkpoint_state(save_dir)
self.assertEqual(ckpt.model_checkpoint_path, abs_path2)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path2)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path0)
def testCheckPointStateFailsWhenIncomplete(self):
save_dir = self._get_test_dir("checkpoint_state_fails_when_incomplete")
os.chdir(save_dir)
ckpt_path = os.path.join(save_dir, "checkpoint")
ckpt_file = open(ckpt_path, "w")
ckpt_file.write("")
ckpt_file.close()
with self.assertRaises(ValueError):
saver_module.get_checkpoint_state(save_dir)
def testCheckPointCompletesRelativePaths(self):
save_dir = self._get_test_dir("checkpoint_completes_relative_paths")
os.chdir(save_dir)
ckpt_path = os.path.join(save_dir, "checkpoint")
ckpt_file = open(ckpt_path, "w")
ckpt_file.write("""
model_checkpoint_path: "./model.ckpt-687529"
all_model_checkpoint_paths: "./model.ckpt-687500"
all_model_checkpoint_paths: "./model.ckpt-687529"
""")
ckpt_file.close()
ckpt = saver_module.get_checkpoint_state(save_dir)
self.assertEqual(ckpt.model_checkpoint_path,
os.path.join(save_dir, "./model.ckpt-687529"))
self.assertEqual(ckpt.all_model_checkpoint_paths[0],
os.path.join(save_dir, "./model.ckpt-687500"))
self.assertEqual(ckpt.all_model_checkpoint_paths[1],
os.path.join(save_dir, "./model.ckpt-687529"))
@test_util.with_c_api
class MetaGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testAddCollectionDef(self):
test_dir = self._get_test_dir("good_collection")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
# Creates a graph.
v0 = variables.Variable(1.0, name="v0")
control_flow_ops.cond(
math_ops.less(v0, 10), lambda: math_ops.add(v0, 1),
lambda: math_ops.subtract(v0, 1))
control_flow_ops.while_loop(lambda i: math_ops.less(i, 10),
lambda i: math_ops.add(i, 1), [v0])
var = variables.Variable(constant_op.constant(0, dtype=dtypes.int64))
count_up_to = var.count_up_to(3)
input_queue = data_flow_ops.FIFOQueue(
30, dtypes.float32, shared_name="collection_queue")
qr = queue_runner_impl.QueueRunner(input_queue, [count_up_to])
variables.global_variables_initializer()
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Adds a set of collections.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("float_collection", 3.5)
ops_lib.add_to_collection("string_collection", "hello")
ops_lib.add_to_collection("variable_collection", v0)
# Add QueueRunners.
queue_runner_impl.add_queue_runner(qr)
# Adds user_defined proto in three formats: string, bytes and Any.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph(filename)
self.assertTrue(meta_graph_def.HasField("saver_def"))
self.assertTrue(meta_graph_def.HasField("graph_def"))
self.assertTrue(meta_graph_def.HasField("meta_info_def"))
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_version, "")
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_git_version,
"")
collection_def = meta_graph_def.collection_def
self.assertEqual(len(collection_def), 12)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
test_util.assert_meta_graph_protos_equal(
self, meta_graph_def, new_meta_graph_def)
def testAddCollectionDefFails(self):
with self.test_session():
# Creates a graph.
v0 = variables.Variable(10.0, name="v0")
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Generates MetaGraphDef.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Verifies that collection with unsupported key will not be added.
ops_lib.add_to_collection(save, 3)
save._add_collection_def(meta_graph_def, save)
self.assertEqual(len(meta_graph_def.collection_def), 0)
# Verifies that collection where item type does not match expected
# type will not be added.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("int_collection", 3.5)
save._add_collection_def(meta_graph_def, "int_collection")
self.assertEqual(len(meta_graph_def.collection_def), 0)
def _testMultiSaverCollectionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.Variable(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
variables.global_variables_initializer().run()
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph()
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def0.
collection_def = meta_graph_def0.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
def _testMultiSaverCollectionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Imports from meta_graph.
saver_module.import_meta_graph(filename)
# Retrieves SAVERS collection. Verifies there are 2 entries.
savers = ops_lib.get_collection("savers")
self.assertEqual(2, len(savers))
# Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1.
new_saver0 = savers[0]
new_saver0.restore(sess, saver0_ckpt)
v0 = sess.graph.get_tensor_by_name("v0:0")
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertAllEqual([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], v0.eval())
self.assertEqual([3, 2], v0.get_shape())
self.assertEqual([], v1.get_shape())
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Retrieves saver1. Verifies that new_saver1 can restore v1.
new_saver1 = savers[1]
new_saver1.restore(sess, saver1_ckpt)
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertEqual(11.0, v1.eval())
def testMultiSaverCollection(self):
test_dir = self._get_test_dir("saver_collection")
self._testMultiSaverCollectionSave(test_dir)
self._testMultiSaverCollectionRestore(test_dir)
def testClearExtraneousSavers(self):
test_dir = self._get_test_dir("clear_extraneous_savers")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.Variable(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
variables.global_variables_initializer().run()
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph(clear_extraneous_savers=True)
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def1.
collection_def = meta_graph_def1.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there is 1 entry in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(1, len(savers.value))
# Verifies that saver0 graph nodes are omitted from the saver1 export
self.assertEqual(29, len(meta_graph_def0.graph_def.node))
self.assertEqual(19, len(meta_graph_def1.graph_def.node))
def testBinaryAndTextFormat(self):
test_dir = self._get_test_dir("binary_and_text")
filename = os.path.join(test_dir, "metafile")
with self.test_session(graph=ops_lib.Graph()):
# Creates a graph.
variables.Variable(10.0, name="v0")
# Exports the graph as binary format.
saver_module.export_meta_graph(filename, as_text=False)
with self.test_session(graph=ops_lib.Graph()):
# Imports the binary format graph.
saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(saver)
# Exports the graph as text format.
saver.export_meta_graph(filename, as_text=True)
with self.test_session(graph=ops_lib.Graph()):
# Imports the text format graph.
saver_module.import_meta_graph(filename)
# Writes wrong contents to the file.
graph_io.write_graph(saver.as_saver_def(),
os.path.dirname(filename),
os.path.basename(filename))
with self.test_session(graph=ops_lib.Graph()):
# Import should fail.
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "Cannot parse file"):
saver_module.import_meta_graph(filename)
# Deletes the file
gfile.Remove(filename)
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "does not exist"):
saver_module.import_meta_graph(filename)
def testSliceVariable(self):
test_dir = self._get_test_dir("slice_saver")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
v1 = variables.Variable([20.0], name="v1")
v2 = variables.Variable([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# The names are different and will work.
slice_saver = saver_module.Saver({"first": v1, "second": v2})
variables.global_variables_initializer().run()
# Exports to meta_graph
meta_graph_def = slice_saver.export_meta_graph(filename)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(new_saver)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
test_util.assert_meta_graph_protos_equal(self, meta_graph_def,
new_meta_graph_def)
def _testGraphExtensionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(1.2, dtypes.float32, shape=[100, 28])
with ops_lib.name_scope("hidden1"):
weights = variables.Variable(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test coverage
# the save and restore of control flow context (which doesn't make any
# sense here from a machine learning perspective). The typical biases is
# a simple Variable without the conditions.
biases = variables.Variable(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights) + biases)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(
loop_cond, loop_body,
[constant_op.constant(0), variables.Variable(array_ops.zeros([32]))])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
init_all_op = variables.global_variables_initializer()
with self.test_session() as sess:
# Initializes all the variables.
sess.run(init_all_op)
# Runs to logit.
sess.run(logits)
# Creates a saver.
saver0 = saver_module.Saver()
saver0.save(sess, saver0_ckpt)
# Generates MetaGraphDef.
saver0.export_meta_graph(filename)
def _testGraphExtensionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_saver.export_meta_graph()
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
# Adds loss and train.
labels = constant_op.constant(0, dtypes.int32, shape=[100], name="labels")
batch_size = array_ops.size(labels)
labels = array_ops.expand_dims(labels, 1)
indices = array_ops.expand_dims(math_ops.range(0, batch_size), 1)
concated = array_ops.concat([indices, labels], 1)
onehot_labels = sparse_ops.sparse_to_dense(
concated, array_ops.stack([batch_size, 10]), 1.0, 0.0)
logits = ops_lib.get_collection("logits")[0]
cross_entropy = nn_ops.softmax_cross_entropy_with_logits(
labels=onehot_labels, logits=logits, name="xentropy")
loss = math_ops.reduce_mean(cross_entropy, name="xentropy_mean")
summary.scalar("loss", loss)
# Creates the gradient descent optimizer with the given learning rate.
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
# Runs train_op.
train_op = optimizer.minimize(loss)
ops_lib.add_to_collection("train_op", train_op)
# Runs train_op.
sess.run(train_op)
# Generates MetaGraphDef.
saver_module.export_meta_graph(train_filename)
def _testRestoreFromTrainGraphWithControlContext(self, test_dir):
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(train_filename)
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
train_op = ops_lib.get_collection("train_op")[0]
sess.run(train_op)
def testGraphExtension(self):
test_dir = self._get_test_dir("graph_extension")
self._testGraphExtensionSave(test_dir)
self._testGraphExtensionRestore(test_dir)
self._testRestoreFromTrainGraphWithControlContext(test_dir)
def _testGradientSerDes(self, graph_fn):
"""Tests that gradients can be computed after exporting and importing.
Builds a graph, exports it, and verifies that it can be imported and the
gradient can be built and run correctly.
Args:
graph_fn: takes a single float Tensor argument as input, outputs a single
Tensor
"""
test_dir = self._get_test_dir("nested_control_flow")
filename = os.path.join(test_dir, "metafile")
saver_ckpt = os.path.join(test_dir, "saver.ckpt")
# Create while loop using `outer_body_fn`.
with ops_lib.Graph().as_default():
var = variables.Variable(0.0)
var_name = var.name
output = graph_fn(var)
output_name = output.name
init_op = variables.global_variables_initializer()
# Generate a MetaGraphDef containing the while loop.
with session.Session() as sess:
sess.run(init_op)
sess.run(output)
saver = saver_module.Saver()
saver.save(sess, saver_ckpt)
saver.export_meta_graph(filename)
# Build and run the gradients of the while loop. We use this below to
# verify that the gradients are correct with an imported MetaGraphDef.
grad = gradients_impl.gradients([output], [var])
# Turn off constant folding to avoid breaking testNestedControlFlowSerDes.
# It appears that a missing control dependency in the gradient graph
# causes the fetch node to not be triggered.
no_constfold_config = config_pb2.ConfigProto()
no_constfold_config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
with session.Session(config=no_constfold_config) as sess:
sess.run(init_op)
expected_grad_value = sess.run(grad)
# Restore the MetaGraphDef into a new Graph.
with ops_lib.Graph().as_default():
with session.Session() as sess:
saver = saver_module.import_meta_graph(filename)
saver.restore(sess, saver_ckpt)
# Make sure we can still build gradients and get the same result.
var = ops_lib.get_default_graph().get_tensor_by_name(var_name)
output = ops_lib.get_default_graph().get_tensor_by_name(output_name)
grad = gradients_impl.gradients([output], [var])
init_op = variables.global_variables_initializer()
with session.Session(config=no_constfold_config) as sess:
sess.run(init_op)
actual_grad_value = sess.run(grad)
self.assertEqual(expected_grad_value, actual_grad_value)
def _testWhileLoopAndGradientSerDes(self, outer_body_fn):
# Build a while loop with `outer_body_fn`, export it, and verify that it can
# be imported and the gradient can be built and run correctly.
# pylint: disable=g-long-lambda
return self._testGradientSerDes(
lambda x: control_flow_ops.while_loop(
lambda i, y: i < 5, outer_body_fn, [0, x])[1])
# pylint: enable=g-long-lambda
def testNestedWhileLoopsSerDes(self):
# Test two simple nested while loops.
def body(i, x):
_, r = control_flow_ops.while_loop(lambda j, y: j < 3,
lambda j, y: (j + 1, y + x),
[0, 0.0])
return i + 1, x + r
self._testWhileLoopAndGradientSerDes(body)
def testNestedControlFlowSerDes(self):
# Test while loop in a cond in a while loop.
# pylint: disable=g-long-lambda
def body(i, x):
cond_result = control_flow_ops.cond(
i > 0,
lambda: control_flow_ops.while_loop(
lambda j, y: j < 3,
lambda j, y: (j + 1, y + x),
[0, 0.0])[1],
lambda: x)
return i + 1, cond_result
# pylint: enable=g-long-lambda
self._testWhileLoopAndGradientSerDes(body)
def testNestedCondsSerDes(self):
# Test conds in a cond.
# pylint: disable=g-long-lambda
self._testGradientSerDes(lambda x: control_flow_ops.cond(
x > 0,
lambda: control_flow_ops.cond(x > 3,
lambda: array_ops.identity(x),
lambda: math_ops.multiply(x, 2.0)),
lambda: control_flow_ops.cond(x < -3,
lambda: constant_op.constant(1.0),
lambda: math_ops.multiply(x, -1.0))))
# pylint: enable=g-long-lambda
def testStrippedOpListDef(self):
with self.test_session():
# Creates a graph.
v0 = variables.Variable(0.0)
var = variables.Variable(10.0)
math_ops.add(v0, var)
@function.Defun(dtypes.float32)
def minus_one(x):
return x - 1
minus_one(array_ops.identity(v0))
save = saver_module.Saver({"v0": v0})
variables.global_variables_initializer()
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph()
ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(ops, [
"Add", "Assign", "Const", "Identity", "NoOp", "RestoreV2",
"SaveSlices", "Sub", "VariableV2"
])
else:
self.assertEqual(ops, [
"Add", "Assign", "Const", "Identity", "NoOp", "RestoreV2", "SaveV2",
"Sub", "VariableV2"
])
# Test calling stripped_op_list_for_graph directly
op_list = meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def)
self.assertEqual(ops, [o.name for o in op_list.op])
for o in op_list.op:
self.assertEqual(o.summary, "")
self.assertEqual(o.description, "")
def testStripDefaultValuedAttrs(self):
"""Verifies that default valued attrs are stripped, unless disabled."""
# With strip_default_attrs enabled, attributes "T" (float32) and "Tout"
# (complex64) in the "Complex" op must be removed.
with self.test_session():
real_num = variables.Variable(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.Variable(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
save = saver_module.Saver({"real_num": real_num, "imag_num": imag_num})
variables.global_variables_initializer()
meta_graph_def = save.export_meta_graph(strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
# With strip_default_attrs disabled, attributes "T" (float32) and "Tout"
# (complex64) in the "Complex" op must *not* be removed, even if they map
# to their defaults.
with self.test_session(graph=ops_lib.Graph()):
real_num = variables.Variable(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.Variable(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
save = saver_module.Saver({"real_num": real_num, "imag_num": imag_num})
variables.global_variables_initializer()
meta_graph_def = save.export_meta_graph(strip_default_attrs=False)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
def testImportIntoNamescope(self):
# Test that we can import a meta graph into a namescope.
test_dir = self._get_test_dir("import_into_namescope")
filename = os.path.join(test_dir, "ckpt")
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
with session.Session() as sess:
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias, name="logits")
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit, name="cost")
adam.AdamOptimizer().minimize(cost, name="optimize")
saver = saver_module.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, filename)
graph = ops_lib.Graph()
with session.Session(graph=graph) as sess:
new_saver = saver_module.import_meta_graph(
filename + ".meta", graph=graph, import_scope="new_model")
new_saver.restore(sess, filename)
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testImportIntoImplicitNamescope(self):
# Test that we can import a meta graph into an implicit namescope.
test_dir = self._get_test_dir("import_into_namescope")
filename = os.path.join(test_dir, "ckpt")
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
with session.Session() as sess:
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias, name="logits")
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit, name="cost")
adam.AdamOptimizer().minimize(cost, name="optimize")
saver = saver_module.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, filename)
graph = ops_lib.Graph()
with session.Session(graph=graph) as sess:
with ops_lib.name_scope("new_model"):
new_saver = saver_module.import_meta_graph(
filename + ".meta", graph=graph)
new_saver.restore(sess, filename)
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnImport(self):
# Test that we import a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph()
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=False, import_scope="new_model")
# Device refers to GPU, which is not available here.
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(variables.global_variables_initializer())
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=True, import_scope="new_model")
sess.run(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnExport(self):
# Test that we export a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph(clear_devices=True)
graph_io.write_graph(meta_graph_def, self.get_temp_dir(),
"meta_graph.pbtxt")
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(meta_graph_def, import_scope="new_model")
sess.run(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testPreserveDatasetAndFunctions(self):
with ops_lib.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(10).map(lambda x: x * x)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
_ = array_ops.identity(next_element, name="output")
# Generate three MetaGraphDef protos using different code paths.
meta_graph_def_simple = saver_module.export_meta_graph()
meta_graph_def_devices_cleared = saver_module.export_meta_graph(
clear_devices=True)
meta_graph_def_from_graph_def = saver_module.export_meta_graph(
clear_devices=True, graph_def=g.as_graph_def())
for meta_graph_def in [meta_graph_def_simple,
meta_graph_def_devices_cleared,
meta_graph_def_from_graph_def]:
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(meta_graph_def, import_scope="new_model")
sess.run(variables.global_variables_initializer())
for i in range(10):
self.assertEqual(i * i, sess.run("new_model/output:0"))
with self.assertRaises(errors.OutOfRangeError):
sess.run("new_model/output:0")
@test_util.with_c_api
class CheckpointReaderTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
def testDebugString(self):
# Builds a graph.
v0 = variables.Variable(
[[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
v1 = variables.Variable(
[[[1], [2]], [[3], [4]], [[5], [6]]], dtype=dtypes.float32, name="v1")
init_all_op = variables.global_variables_initializer()
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, write_version=self._WRITE_VERSION)
save_path = os.path.join(self.get_temp_dir(),
"ckpt_for_debug_string" + str(self._WRITE_VERSION))
with self.test_session() as sess:
sess.run(init_all_op)
# Saves a checkpoint.
save.save(sess, save_path)
# Creates a reader.
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
# Verifies that the tensors exist.
self.assertTrue(reader.has_tensor("v0"))
self.assertTrue(reader.has_tensor("v1"))
debug_string = reader.debug_string()
# Verifies that debug string contains the right strings.
self.assertTrue(compat.as_bytes("v0 (DT_FLOAT) [2,3]") in debug_string)
self.assertTrue(compat.as_bytes("v1 (DT_FLOAT) [3,2,1]") in debug_string)
# Verifies get_variable_to_shape_map() returns the correct information.
var_map = reader.get_variable_to_shape_map()
self.assertEqual([2, 3], var_map["v0"])
self.assertEqual([3, 2, 1], var_map["v1"])
# Verifies get_tensor() returns the tensor value.
v0_tensor = reader.get_tensor("v0")
v1_tensor = reader.get_tensor("v1")
self.assertAllEqual(v0.eval(), v0_tensor)
self.assertAllEqual(v1.eval(), v1_tensor)
# Verifies get_tensor() fails for non-existent tensors.
with self.assertRaisesRegexp(errors.NotFoundError,
"v3 not found in checkpoint"):
reader.get_tensor("v3")
def testNonexistentPath(self):
with self.assertRaisesRegexp(errors.NotFoundError,
"Unsuccessful TensorSliceReader"):
pywrap_tensorflow.NewCheckpointReader("non-existent")
@test_util.with_c_api
class CheckpointReaderForV2Test(CheckpointReaderTest):
_WRITE_VERSION = saver_pb2.SaverDef.V2
@test_util.with_c_api
class WriteGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testWriteGraph(self):
test_dir = self._get_test_dir("write_graph_dir")
variables.Variable([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph(),
os.path.join(test_dir, "l1"), "graph.pbtxt")
truth = os.path.join(test_dir, "l1", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
def testRecursiveCreate(self):
test_dir = self._get_test_dir("deep_dir")
variables.Variable([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph().as_graph_def(),
os.path.join(test_dir, "l1", "l2", "l3"),
"graph.pbtxt")
truth = os.path.join(test_dir, "l1", "l2", "l3", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
@test_util.with_c_api
class SaverUtilsTest(test.TestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), "saver_utils_test")
gfile.MakeDirs(self._base_dir)
def tearDown(self):
gfile.DeleteRecursively(self._base_dir)
def testCheckpointExists(self):
for sharded in (False, True):
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
with self.test_session(graph=ops_lib.Graph()) as sess:
unused_v = variables.Variable(1.0, name="v")
variables.global_variables_initializer().run()
saver = saver_module.Saver(sharded=sharded, write_version=version)
path = os.path.join(self._base_dir, "%s-%s" % (sharded, version))
self.assertFalse(
saver_module.checkpoint_exists(path)) # Not saved yet.
ckpt_prefix = saver.save(sess, path)
self.assertTrue(saver_module.checkpoint_exists(ckpt_prefix))
ckpt_prefix = saver_module.latest_checkpoint(self._base_dir)
self.assertTrue(saver_module.checkpoint_exists(ckpt_prefix))
def testGetCheckpointMtimes(self):
prefixes = []
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
with self.test_session(graph=ops_lib.Graph()) as sess:
unused_v = variables.Variable(1.0, name="v")
variables.global_variables_initializer().run()
saver = saver_module.Saver(write_version=version)
prefixes.append(
saver.save(sess, os.path.join(self._base_dir, str(version))))
mtimes = saver_module.get_checkpoint_mtimes(prefixes)
self.assertEqual(2, len(mtimes))
self.assertTrue(mtimes[1] >= mtimes[0])
@test_util.with_c_api
class ScopedGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def _testScopedSave(self, test_dir, exported_filename, ckpt_filename):
graph = ops_lib.Graph()
with graph.as_default():
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with ops_lib.name_scope("hidden1"):
weights1 = variables.Variable(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
biases1 = variables.Variable(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights1) + biases1)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights2 = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases2):
biases2 += constant_op.constant(0.1, shape=[32])
return it + 1, biases2
_, biases2 = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.Variable(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights2) + biases2)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights3 = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases3 = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights3) + biases3
ops_lib.add_to_collection("logits", logits)
# Adds user_defined proto in three formats: string, bytes and Any.
# Any proto should just pass through.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
_, var_list = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filename),
graph=ops_lib.get_default_graph(),
export_scope="hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
with self.test_session(graph=graph) as sess:
sess.run(variables.global_variables_initializer())
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.save(sess, os.path.join(test_dir, ckpt_filename), write_state=False)
def _testScopedRestore(self, test_dir, exported_filename,
new_exported_filename, ckpt_filename):
graph = ops_lib.Graph()
# Create all the missing inputs.
with graph.as_default():
new_image = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filename),
graph=graph,
input_map={"$unbound_inputs_images": new_image},
import_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
hidden1 = graph.as_graph_element("new_hidden1/Relu:0")
weights1 = graph.as_graph_element("new_hidden1/weights:0")
biases1 = graph.as_graph_element("new_hidden1/biases:0")
with graph.as_default():
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.Variable(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
# The rest of the variables.
rest_variables = list(
set(variables.global_variables()) - set(var_list.keys()))
init_rest_op = variables.variables_initializer(rest_variables)
with self.test_session(graph=graph) as sess:
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.restore(sess, os.path.join(test_dir, ckpt_filename))
# Verify that we have restored weights1 and biases1.
sess.run([weights1, biases1])
# Initialize the rest of the variables and run logits.
sess.run(init_rest_op)
sess.run(logits)
# Verifies that we can save the subgraph under "hidden1" and restore it
# into "new_hidden1" in the new graph.
def testScopedSaveAndRestore(self):
test_dir = self._get_test_dir("scoped_export_import")
ckpt_filename = "ckpt"
self._testScopedSave(test_dir, "exported_hidden1.pbtxt", ckpt_filename)
self._testScopedRestore(test_dir, "exported_hidden1.pbtxt",
"exported_new_hidden1.pbtxt", ckpt_filename)
# Verifies that we can copy the subgraph under "hidden1" and copy it
# to different name scope in the same graph or different graph.
def testCopyScopedGraph(self):
test_dir = self._get_test_dir("scoped_copy")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.Variable([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with self.test_session(graph=graph1) as sess:
sess.run(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies copy to the same graph with the same name fails.
with graph1.as_default():
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "need to be different" in str(e)):
meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden1")
# Verifies copy to the same graph.
with graph1.as_default():
var_list_2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden2")
with self.test_session(graph=graph1) as sess:
saver1 = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver1.restore(sess, saver0_ckpt)
saver2 = saver_module.Saver(var_list=var_list_2, max_to_keep=1)
saver2.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("hidden1/relu:0"))
self.assertAllClose(expected, sess.run("hidden2/relu:0"))
# Verifies copy to differen graph.
graph2 = ops_lib.Graph()
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.test_session(graph=graph2) as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
def testExportGraphDefWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.Variable([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with self.test_session(graph=graph1) as sess:
sess.run(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
graph_def=graph1.as_graph_def(), export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies that we can run successfully after restoring.
graph2 = ops_lib.Graph()
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.test_session(graph=graph2) as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
def testSerializeSaverWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
saver2_ckpt = os.path.join(test_dir, "saver2.ckpt")
graph = ops_lib.Graph()
with graph.as_default():
with ops_lib.name_scope("hidden1"):
variable1 = variables.Variable([1.0], name="variable1")
saver1 = saver_module.Saver(var_list=[variable1])
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver1)
with ops_lib.name_scope("hidden2"):
variable2 = variables.Variable([2.0], name="variable2")
saver2 = saver_module.Saver(var_list=[variable2], name="hidden2/")
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver2)
with self.test_session(graph=graph) as sess:
variables.global_variables_initializer().run()
saver1.save(sess, saver1_ckpt, write_state=False)
saver2.save(sess, saver2_ckpt, write_state=False)
graph1 = ops_lib.Graph()
var_dict1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph,
to_graph=graph1)
self.assertEqual(1, len(var_dict1))
saver_list1 = graph1.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list1))
with self.test_session(graph=graph1) as sess:
saver_list1[0].restore(sess, saver1_ckpt)
self.assertEqual(1.0, var_dict1["variable1:0"].eval())
graph2 = ops_lib.Graph()
var_dict2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden2",
to_scope="new_hidden2",
from_graph=graph,
to_graph=graph2)
self.assertEqual(1, len(var_dict2))
saver_list2 = graph2.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list2))
with self.test_session(graph=graph2) as sess:
saver_list2[0].restore(sess, saver2_ckpt)
self.assertEqual(2.0, var_dict2["variable2:0"].eval())
class _OwnsAVariableSimple(checkpointable.CheckpointableBase):
"""A Checkpointable object which can be saved using a tf.train.Saver."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
def _gather_saveables_for_checkpoint(self):
return {checkpointable.VARIABLE_VALUE_KEY: self.non_dep_variable}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class _MirroringSaveable(
saver_module.BaseSaverBuilder.ResourceVariableSaveable):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
super(_MirroringSaveable, self).__init__(
self._primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class _OwnsMirroredVariables(checkpointable.CheckpointableBase):
"""A Checkpointable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self.non_dep_variable.name):
return _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored,
name=name)
return {checkpointable.VARIABLE_VALUE_KEY: _saveable_factory}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class NonLayerCheckpointable(checkpointable.Checkpointable):
def __init__(self):
super(NonLayerCheckpointable, self).__init__()
self.a_variable = checkpointable_utils.add_variable(
self, name="a_variable", shape=[])
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Checkpointables which aren't Layers.
self._non_layer = NonLayerCheckpointable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
@test_util.with_c_api
class CheckpointableCompatibilityTests(test.TestCase):
# TODO (allenl): Track down python3 reference cycles in these tests. id:4024
# https://github.com/imdone/tensorflow/issues/4022
@test_util.run_in_graph_and_eager_modes()
def testNotSaveableButIsCheckpointable(self):
v = _OwnsAVariableSimple()
saver = saver_module.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
with self.test_session() as sess:
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
@test_util.run_in_graph_and_eager_modes()
def testMoreComplexSaveableReturned(self):
v = _OwnsMirroredVariables()
saver = saver_module.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
with self.test_session() as sess:
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
def testSingleTensorEvaluation(self):
class _CountingSaveable(saver_module.BaseSaverBuilder.SaveableObject):
def __init__(self, name):
self.eval_count = 0
def _tensor():
self.eval_count += 1
return constant_op.constant([1.])
dummy_op = constant_op.constant([2.])
super(_CountingSaveable, self).__init__(
dummy_op,
[saver_module.BaseSaverBuilder.SaveSpec(
_tensor, "", name, dtype=dummy_op.dtype)],
name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
pass
with context.eager_mode():
v = _CountingSaveable("foo")
saver = saver_module.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
with self.test_session() as sess:
save_path = saver.save(sess, prefix)
self.assertEqual(1, v.eval_count)
saver.restore(sess, save_path)
self.assertEqual(1, v.eval_count)
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
train_op = optimizer.minimize(
functools.partial(model, input_value),
global_step=optimizer_step)
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
return root_checkpointable
def _set_sentinels(self, root_checkpointable):
self.evaluate(root_checkpointable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")
.assign([102.]))
beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(103.))
def _check_sentinels(self, root_checkpointable):
self.assertAllEqual(
[1.], self.evaluate(root_checkpointable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")))
beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
def testVariableNotFoundErrorRaised(self):
# Restore does some tricky exception handling to figure out if it should
# load an object-based checkpoint. Tests that the exception handling isn't
# too broad.
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
a = resource_variable_ops.ResourceVariable(1., name="a")
b = resource_variable_ops.ResourceVariable(1., name="b")
a_saver = saver_module.Saver([a])
b_saver = saver_module.Saver([b])
with self.test_session() as sess:
sess.run(a.initializer)
save_path = a_saver.save(sess=sess, save_path=checkpoint_prefix)
with self.assertRaisesRegexp(
errors.NotFoundError, "Key b not found in checkpoint"):
b_saver.restore(sess=sess, save_path=save_path)
def testCheckpointNotFoundErrorRaised(self):
# Restore does some tricky exception handling to figure out if it should
# load an object-based checkpoint. Tests that the exception handling isn't
# too broad.
a = resource_variable_ops.ResourceVariable(1., name="a")
saver = saver_module.Saver([a])
with self.test_session() as sess:
with self.assertRaisesRegexp(
errors.NotFoundError,
"Failed to find any matching files for path_which_does_not_exist"):
saver.restore(sess=sess, save_path="path_which_does_not_exist")
def testLoadFromObjectBasedGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_graph = ops_lib.Graph()
with save_graph.as_default(), self.test_session(graph=save_graph) as sess:
root = self._initialized_model()
object_saver = checkpointable_utils.CheckpointableSaver(root)
save_path = object_saver.save(file_prefix=checkpoint_prefix)
# An incompatible object-based checkpoint to check error messages
var = resource_variable_ops.ResourceVariable(1., name="a")
self.evaluate(var.initializer)
second_saver = checkpointable_utils.CheckpointableSaver(var)
second_path = second_saver.save(file_prefix=os.path.join(
checkpoint_directory, "second"))
restore_graph = ops_lib.Graph()
with restore_graph.as_default(), self.test_session(
graph=restore_graph) as sess:
root = self._initialized_model()
self._set_sentinels(root)
saver = saver_module.Saver()
saver.restore(sess=sess, save_path=save_path)
self._check_sentinels(root)
before_second_restore_ops = restore_graph.get_operations()
# Test that multiple restores do not pollute the graph
saver.restore(sess=sess, save_path=save_path)
self.assertEqual(before_second_restore_ops,
restore_graph.get_operations())
with self.assertRaisesRegexp(errors.NotFoundError,
"could not find a_variable"):
saver.restore(sess=sess, save_path=second_path)
def testLoadFromObjectBasedEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_graph = ops_lib.Graph()
with save_graph.as_default(), self.test_session(graph=save_graph):
root = self._initialized_model()
object_saver = checkpointable_utils.CheckpointableSaver(root)
save_path = object_saver.save(file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
saver = saver_module.Saver(
root.model.variables + root.optimizer.variables())
saver.restore(sess=None, save_path=save_path)
self._check_sentinels(root)
if __name__ == "__main__":
test.main()
| 41.811731 | 87 | 0.675846 |
14d1d5ad981028071efd7b67d678b74658ad0f3a | 5,911 | py | Python | test/combine1.py | SungbinChoi/w4c_st1 | 5acdedf3c6278cd7239a6beb605c3e16821f7c86 | [
"Apache-2.0"
] | null | null | null | test/combine1.py | SungbinChoi/w4c_st1 | 5acdedf3c6278cd7239a6beb605c3e16821f7c86 | [
"Apache-2.0"
] | null | null | null | test/combine1.py | SungbinChoi/w4c_st1 | 5acdedf3c6278cd7239a6beb605c3e16821f7c86 | [
"Apache-2.0"
] | null | null | null | import random
from random import shuffle
import numpy as np
from datetime import datetime
import time
import queue
import threading
import logging
from PIL import Image
import itertools
import re
import os
import glob
import shutil
import sys
import copy
import h5py
from netCDF4 import Dataset
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.parallel.data_parallel import data_parallel
import torch.utils.checkpoint as cp
from collections import OrderedDict
from torch import Tensor
from typing import Any, List, Tuple
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
target_city = 'R1'
input_folder_path_list = ['v0_0/' + target_city + '_0',
'v1_0/' + target_city + '_1',
'v2_0/' + target_city + '_2',
'v3_0/' + target_city + '_3',
]
input_folder_path_list_2 = ['',
'',
'v2_1/' + target_city + '_2',
'v3_1/' + target_city + '_3',
]
input_folder_path_list_3 = ['',
'v1_1/' + target_city + '_1',
'',
'v3_2/' + target_city + '_3',
]
out_dir = 'submit' + '/' + target_city + '/' + 'test'
input_data_folder_path = '../0_data/' + target_city
input_n_data_folder_path = '../0_data/' + target_city + 'n'
num_frame_per_day = 96
num_frame_before = 4
num_frame_out = 32
num_frame_sequence = 36
height=256
width =256
num_channel_1 = 9
num_channel_2_src = 16
num_channel_2 = 107 + num_channel_2_src
num_channel = (num_channel_1*2 + num_channel_2)
num_channel_out= 4
NUM_INPUT_CHANNEL = num_channel * num_frame_before
NUM_OUTPUT_CHANNEL = num_channel_out * num_frame_out
SEED = 0
EPS = 1e-12
np.set_printoptions(precision=4)
def write_data(data, filename):
f = h5py.File(filename, 'w', libver='latest')
dset = f.create_dataset('array', shape=(data.shape), data=data, dtype=np.uint16, compression='gzip', compression_opts=9)
f.close()
if __name__ == '__main__':
COMMON_STRING ='@%s: \n' % os.path.basename(__file__)
COMMON_STRING += '\tset random seed\n'
COMMON_STRING += '\t\tSEED = %d\n'%SEED
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
COMMON_STRING += '\tset cuda environment\n'
COMMON_STRING += '\t\ttorch.__version__ = %s\n'%torch.__version__
COMMON_STRING += '\t\ttorch.version.cuda = %s\n'%torch.version.cuda
COMMON_STRING += '\t\ttorch.backends.cudnn.version() = %s\n'%torch.backends.cudnn.version()
try:
COMMON_STRING += '\t\tos[\'CUDA_VISIBLE_DEVICES\'] = %s\n'%os.environ['CUDA_VISIBLE_DEVICES']
NUM_CUDA_DEVICES = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
except Exception:
COMMON_STRING += '\t\tos[\'CUDA_VISIBLE_DEVICES\'] = None\n'
NUM_CUDA_DEVICES = 1
COMMON_STRING += '\t\ttorch.cuda.device_count() = %d\n'%torch.cuda.device_count()
print(COMMON_STRING)
try:
if not os.path.exists(out_dir):
os.makedirs(out_dir)
except Exception:
print('out_dir not made')
exit(-1)
prediction_filename_list = []
for prediction_filename in os.listdir(input_folder_path_list[0]):
prediction_filename_list.append(prediction_filename)
assert len(prediction_filename_list) == 36
num_day_done = 0
for prediction_filename in prediction_filename_list:
day_name = prediction_filename.split('.')[0]
out_file_path = os.path.join(out_dir, day_name + '.h5')
pred_out = []
for c in range(4):
prediction = np.load(os.path.join(input_folder_path_list[c], prediction_filename))
pred_out.append(np.moveaxis(prediction['prediction'], 0, 1))
pred_out = np.concatenate(pred_out, axis=1)
assert pred_out.dtype == np.float32
num_pred_list = np.ones((4),np.int32)
pred_out_2 = []
for c in range(4):
input_folder_path = input_folder_path_list_2[c]
if input_folder_path == '':
pred_out_2.append(np.zeros((32,1,256,256),np.float32))
continue
prediction = np.load(os.path.join(input_folder_path, prediction_filename))
pred_out_2.append(np.moveaxis(prediction['prediction'], 0, 1))
num_pred_list[c] += 1
pred_out_2 = np.concatenate(pred_out_2, axis=1)
assert pred_out_2.dtype == np.float32
pred_out_3 = []
for c in range(4):
input_folder_path = input_folder_path_list_3[c]
if input_folder_path == '':
pred_out_3.append(np.zeros((32,1,256,256),np.float32))
continue
prediction = np.load(os.path.join(input_folder_path, prediction_filename))
pred_out_3.append(np.moveaxis(prediction['prediction'], 0, 1))
num_pred_list[c] += 1
pred_out_3 = np.concatenate(pred_out_3, axis=1)
assert pred_out_3.dtype == np.float32
pred_out += pred_out_2
pred_out += pred_out_3
for c in range(4):
pred_out[:,c,:,:] = pred_out[:,c,:,:] / float(num_pred_list[c])
pred_out[:,0,:,:] *= 22000
pred_out[:,1,:,:] *= 500
pred_out[:,2,:,:] *= 100
pred_out_binary = pred_out[:,3,:,:].copy()
pred_out_binary[pred_out_binary>0.5] = 1
pred_out_binary[pred_out_binary<=0.5] = 0
pred_out[:,3,:,:] = pred_out_binary
pred_out = np.rint(pred_out)
pred_out = pred_out.astype(np.uint16)
write_data(pred_out, out_file_path)
num_day_done += 1
print('num_day_done:', num_day_done, '\t', )
exit(1)
| 33.777143 | 124 | 0.628658 |
96b29950ff796458ec270f990730e08d499a7ffe | 1,138 | py | Python | utils/TokenizationMapper.py | eyal-orbach/Details2Story-XLNetPlanCloze | 47fd98ca2d59f3d4113d2de510fe955fff49efb9 | [
"MIT"
] | 4 | 2020-12-14T03:25:03.000Z | 2022-03-19T20:30:25.000Z | utils/TokenizationMapper.py | eyal-orbach/Details2Story-XLNetPlanCloze | 47fd98ca2d59f3d4113d2de510fe955fff49efb9 | [
"MIT"
] | 1 | 2021-12-04T00:42:49.000Z | 2021-12-04T00:42:49.000Z | utils/TokenizationMapper.py | eyal-orbach/Details2Story-XLNetPlanCloze | 47fd98ca2d59f3d4113d2de510fe955fff49efb9 | [
"MIT"
] | 1 | 2021-04-08T21:30:56.000Z | 2021-04-08T21:30:56.000Z |
SRC_TOKENIZER_SPECIAL_CHAR = b'\xe2\x96\x81'
HELPER_TOKENIZER_SPECIAL_CHAR =b'\xc4\xa0'
class TokenizationMapper(object):
mapping = {}
target_unk_idx = None
def get_target_idx_From_src(self, src_idx):
return self.mapping.get(src_idx, self.target_unk_idx)
def init_map(self, source_tokenizer, target_tokenizer):
self.target_unk_idx = target_tokenizer.encoder.get(target_tokenizer.unk_token)
for i in range(source_tokenizer.vocab_size):
tok_arr = source_tokenizer.convert_ids_to_tokens([i])
if len(tok_arr) is not 1:
tok = target_tokenizer.unk_token
else:
tok = tok_arr[0]
if tok.startswith(SRC_TOKENIZER_SPECIAL_CHAR.decode('utf-8')):
newtok = HELPER_TOKENIZER_SPECIAL_CHAR.decode('utf-8') + tok[1:]
else:
newtok = tok
target_idx_arr = target_tokenizer.convert_tokens_to_ids([newtok])
if len(target_idx_arr) is not 1:
self.mapping[i] = self.target_unk_idx
else:
self.mapping[i] = target_idx_arr[0]
| 33.470588 | 86 | 0.635325 |
456f744898fc134adc8f95b08f5ded0d85db3977 | 1,014 | py | Python | gxf/extensions/reload.py | Talanor/gxf | 4dd2f3a123e646fcbf0f44b43f2004b04acba9be | [
"MIT"
] | 41 | 2015-02-16T02:30:19.000Z | 2019-11-03T06:39:52.000Z | gxf/extensions/reload.py | Talanor/gxf | 4dd2f3a123e646fcbf0f44b43f2004b04acba9be | [
"MIT"
] | 4 | 2015-02-02T10:51:00.000Z | 2019-03-02T15:48:57.000Z | gxf/extensions/reload.py | Talanor/gxf | 4dd2f3a123e646fcbf0f44b43f2004b04acba9be | [
"MIT"
] | 8 | 2015-01-21T20:48:21.000Z | 2019-01-28T12:53:56.000Z | # -*- coding: utf-8 -*-
import sys
import importlib
import gxf
@gxf.register()
class Reload(gxf.MaintenanceCommand):
'''
This command can be used to reload packages from source.
'''
def setup(self, parser):
parser.add_argument(
'package', nargs='*', default=['gxf', 'gxf.extensions'],
help='packages to be reloaded, '
'defaults to gxf and gxf.extensions.')
def run(self, args):
toreload, toremove = set(), set()
packages = [(p, p.split('.')) for p in args.package]
for name, module in sys.modules.items():
path = name.split('.')
for p, ps in packages:
if p == name:
toreload.add(name)
elif ps == path[:len(ps)]:
toremove.add(name)
for name in toremove:
if name not in toreload:
del sys.modules[name]
for name in sorted(toreload):
importlib.reload(sys.modules[name])
| 26.684211 | 68 | 0.532544 |
829a16aed8d079085606670a782b3e9290e42416 | 160 | py | Python | words.py | Diplomatica-HTN/Profanity-Filter | 85f3135648b27c454bc3b33178cedb5c37569bc9 | [
"MIT"
] | 1 | 2021-09-19T14:17:24.000Z | 2021-09-19T14:17:24.000Z | words.py | Diplomatica-HTN/Profanity-Filter | 85f3135648b27c454bc3b33178cedb5c37569bc9 | [
"MIT"
] | null | null | null | words.py | Diplomatica-HTN/Profanity-Filter | 85f3135648b27c454bc3b33178cedb5c37569bc9 | [
"MIT"
] | null | null | null |
# Note these words are offensive
string = '''
List of Offensive Words Not Shown on Github'''
string_list = string.split('\n')
string_list = string_list[1:]
| 16 | 46 | 0.7125 |
a759c21e5c76c2d5bb415535416335f4ac7f4186 | 1,432 | py | Python | chainer/functions/loss/mean_absolute_error.py | dydo0316/test2 | a9982a8b426dd07eb1ec4e7695a7bc546ecc6063 | [
"MIT"
] | null | null | null | chainer/functions/loss/mean_absolute_error.py | dydo0316/test2 | a9982a8b426dd07eb1ec4e7695a7bc546ecc6063 | [
"MIT"
] | null | null | null | chainer/functions/loss/mean_absolute_error.py | dydo0316/test2 | a9982a8b426dd07eb1ec4e7695a7bc546ecc6063 | [
"MIT"
] | null | null | null | import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
class MeanAbsoluteError(function_node.FunctionNode):
"""Mean absolute error function."""
def check_type_forward(self, in_types):
type_check.argname(in_types, ('x0', 'x1'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward_cpu(self, inputs):
x0, x1 = inputs
self.diff = x0 - x1
diff = self.diff.ravel()
return numpy.array(abs(diff).sum() / diff.size, dtype=diff.dtype),
def forward_gpu(self, inputs):
x0, x1 = inputs
self.diff = x0 - x1
diff = self.diff.ravel()
return abs(diff).sum() / diff.dtype.type(diff.size),
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
coeff = gy * gy.data.dtype.type(1. / self.diff.size)
coeff = chainer.functions.broadcast_to(coeff, self.diff.shape)
gx0 = coeff * cuda.get_array_module(gy.data).sign(self.diff)
return gx0, -gx0
def mean_absolute_error(x0, x1):
"""Mean absolute error function.
This function computes mean absolute error between two variables. The mean
is taken over the minibatch.
"""
return MeanAbsoluteError().apply((x0, x1))[0]
| 29.22449 | 78 | 0.63757 |
a7fbcab93a6e2896e9d58b488d9be6c17f7e243f | 38,005 | py | Python | bokeh/session.py | hhuuggoo/bokeh | cc274a5003edf91804b49bd933c99644fdb0a9fc | [
"BSD-3-Clause"
] | 2 | 2020-04-07T18:55:04.000Z | 2021-02-27T13:33:23.000Z | bokeh/session.py | jeycin/bokeh | 8bf4a6f626c505dcb9c525409c0f74debf4499d7 | [
"BSD-3-Clause"
] | null | null | null | bokeh/session.py | jeycin/bokeh | 8bf4a6f626c505dcb9c525409c0f74debf4499d7 | [
"BSD-3-Clause"
] | null | null | null | """ Defines the base PlotSession and some example session types.
"""
from exceptions import DataIntegrityException
from os.path import abspath, split, join
import os.path
import json
import logging
import urlparse
import uuid
import warnings
import requests
from bokeh import protocol, utils
from bokeh.objects import PlotObject, Plot
from bokeh.properties import List
logger = logging.getLogger(__file__)
class Session(object):
""" Sessions provide a sandbox or facility in which to manage the 'live'
object state for a Bokeh plot.
Many use cases for Bokeh have a client-server separation between the
plot and data model objects and the view layer objects and controllers.
For instance, we may have data and plot definitions in an interactive
Python session, while the rendering layer and its objects may be in
Javascript running in a web browser (even a remote browser).
Even a rich client scenario benefits from the session concept, as it
clearly demarcates responsibilities between plot specification and
managing interactive state. For inter-process or inter-language cases,
it provides a central place to manage serialization (and related
persistence issues).
Sessions can be used as ContextManagers, but they can be created
around a PlotObject manually the PlotObject and its related objects
will be associated with the given session.
"""
def __init__(self, plot=None):
""" Initializes this session from the given PlotObject. """
# Has the plot model changed since the last save?
self._dirty = True
# This stores a reference to all models in the object graph.
# Eventually consider making this be weakrefs?
self._models = {}
def __enter__(self):
pass
def __exit__(self, e_ty, e_val, e_tb):
pass
def add(self, *objects):
""" Associates the given object to this session. This means
that changes to the object's internal state will be reflected
in the persistence layer and trigger event that propagate
across to the View(s)
"""
for obj in objects:
if obj is None:
warnings.warn("Null object passed to Session.add()")
else:
obj.session = self
self._models[obj._id] = obj
def view(self):
""" Triggers the OS to open a web browser pointing to the file
that is connected to this session.
"""
raise NotImplementedError
class BaseHTMLSession(Session):
""" Common file & HTML-related utility functions which all HTML output
sessions will need. Mostly involves JSON serialization.
"""
bokeh_url = "https://bokeh.pydata.org/"
# The base local directory for all CSS and JS
server_static_dir = join(abspath(split(__file__)[0]), "server", "static")
# The base dir for all HTML templates
template_dir = join(abspath(split(__file__)[0]), "templates")
# The base URL for all CSS and JS
static_url = bokeh_url
#------------------------------------------------------------------------
# Static file handling
#------------------------------------------------------------------------
def js_paths(self, as_url=True, unified=True, min=True):
""" Returns a list of URLs or absolute paths on this machine to the JS
source files needed to render this session. If **unified** is True,
then this list is a single file. If **min** is True, then minifies
all the JS.
"""
raise NotImplementedError
def css_paths(self, as_url=True):
""" Returns the paths to required CSS files. Could be paths
or URIs depending on the type of session.
"""
raise NotImplementedError
@property
def bokehjs_dir(self):
return getattr(self, "_bokehjs_dir",
join(self.server_static_dir, "vendor/bokehjs"))
@bokehjs_dir.setter
def bokehjs_dir(self, val):
self._bokehjs_dir = val
def _inline_scripts(self, paths):
# Copied from dump.py, which itself was from wakariserver
if len(paths) == 0:
return ""
strings = []
for script in paths:
f_name = abspath(join(self.server_static_dir, script))
strings.append("""
// BEGIN %s
""" % f_name + open(f_name).read() + \
"""
// END %s
""" % f_name)
return "".join(strings)
def _inline_css(self, paths):
# Copied from dump.py, which itself was from wakariserver
if len(paths) == 0:
return ""
strings = []
for css_path in paths:
f_name = join(self.server_static_dir, css_path)
strings.append("""
/* BEGIN %s */
""" % f_name + open(f_name).read().decode("utf-8") + \
"""
/* END %s */
""" % f_name)
return "".join(strings)
def _load_template(self, filename):
import jinja2
with open(join(self.template_dir, filename)) as f:
return jinja2.Template(f.read())
#------------------------------------------------------------------------
# Serialization
#------------------------------------------------------------------------
class PlotObjEncoder(protocol.NumpyJSONEncoder):
""" Helper class we'll use to encode PlotObjects
Note that since json.dumps() takes a *class* as an argument and
not an instance, when this encoder class is used, the Session
instance is set as a class-level attribute. Kind of weird, and
should be better handled via a metaclass.
#hugo - I don't think we should use the json encoder anymore to do
this. It introduces an asymmetry in our operations, because
while you can use this mechanism to serialize, you cannot use
this mechanism to deserialize because we need 2 stage deserialization
in order to resolve references
"""
session = None
def default(self, obj):
if isinstance(obj, PlotObject):
if self.session is None:
raise RuntimeError("PlotObjEncoder requires a valid session")
# We do not want the JSON encoder (which walks the entire
# object graph) to do anything more than return references.
# The model serialization happens later.
d = self.session.get_ref(obj)
return d
else:
return protocol.NumpyJSONEncoder.default(self, obj)
def get_ref(self, obj):
self._models[obj._id] = obj
return {
'type': obj.__view_model__,
'id': obj._id
}
def make_id(self, obj):
return str(uuid.uuid4())
def serialize(self, obj, **jsonkwargs):
""" Returns a string representing the JSON encoded object.
References to other objects/instances is ended by a "ref"
has encoding the type and UUID of the object.
For all HTML sessions, the serialization protocol is JSON.
How we produce references is actually more involved, because
it may differ between server-based models versus embedded.
"""
try:
self.PlotObjEncoder.session = self
jsondata = protocol.serialize_json(
obj,
encoder=self.PlotObjEncoder,
**jsonkwargs)
finally:
self.PlotObjEncoder.session = None
return jsondata
class HTMLFileSession(BaseHTMLSession):
""" Produces a pile of static HTML, suitable for exporting a plot
as a standalone HTML file. This includes a template around the
plot and includes all the associated JS and CSS for the plot.
"""
title = "Bokeh Plot"
# The root directory for the CSS files
css_files = [
"js/vendor/bootstrap/bootstrap-bokeh-2.0.4.css",
"css/bokeh.css",
"css/continuum.css",
]
# TODO: Why is this not in bokehjs_dir, but rather outside of it?
js_files = ["js/bokeh.js"]
# Template files used to generate the HTML
js_template = "plots.js"
div_template = "plots.html" # template for just the plot <div>
html_template = "base.html" # template for the entire HTML file
inline_js = True
inline_css = True
# Used to compute the relative paths to JS and CSS if they are not
# inlined into the output
rootdir = abspath(split(__file__)[0])
def __init__(self, filename="bokehplot.html", plot=None, title=None):
self.filename = filename
if title is not None:
self.title = title
super(HTMLFileSession, self).__init__(plot=plot)
self.plotcontext = PlotContext()
self.raw_js_objs = []
# FIXME: move this to css_paths, js_paths to base class?
def css_paths(self, as_url=False):
return [join(self.server_static_dir, d) for d in self.css_files]
def js_paths(self, as_url=False, unified=True, min=True):
# TODO: Handle unified and minified options
return [join(self.server_static_dir, d) for d in self.js_files]
def raw_js_snippets(self, obj):
self.raw_js_objs.append(obj)
def dumps(self, js=None, css=None, rootdir=None):
""" Returns the HTML contents as a string
**js** and **css** can be "inline" or "relative", and they default
to the values of self.inline_js and self.inline_css.
If these are set to be "relative" (or self.inline_js/css are False),
**rootdir** can be specified to indicate the base directory from which
the path to the various static files should be computed. **rootdir**
defaults to the value of self.rootdir.
"""
# FIXME: Handle this more intelligently
pc_ref = self.get_ref(self.plotcontext)
elementid = str(uuid.uuid4())
# Manually convert our top-level models into dicts, before handing
# them in to the JSON encoder. (We don't want to embed the call to
# vm_serialize into the PlotObjEncoder, because that would cause
# all the attributes to be duplicated multiple times.)
models = []
for m in self._models.itervalues():
ref = self.get_ref(m)
ref["attributes"] = m.vm_serialize()
ref["attributes"].update({"id": ref["id"], "doc": None})
models.append(ref)
jscode = self._load_template(self.js_template).render(
elementid = elementid,
modelid = pc_ref["id"],
modeltype = pc_ref["type"],
all_models = self.serialize(models),
)
div = self._load_template(self.div_template).render(
elementid = elementid
)
if rootdir is None:
rootdir = self.rootdir
if js == "inline" or (js is None and self.inline_js):
# TODO: Are the UTF-8 decodes really necessary?
rawjs = self._inline_scripts(self.js_paths()).decode("utf-8")
jsfiles = []
else:
rawjs = None
jsfiles = [os.path.relpath(p,rootdir) for p in self.js_paths()]
if css == "inline" or (css is None and self.inline_css):
# TODO: Are the UTF-8 decodes really necessary?
rawcss = self._inline_css(self.css_paths()).decode("utf-8")
cssfiles = []
else:
rawcss = None
cssfiles = [os.path.relpath(p,rootdir) for p in self.css_paths()]
plot_div = self._load_template(self.div_template).render(
elementid=elementid
)
# jscode is the one I want
html = self._load_template(self.html_template).render(
js_snippets = [jscode],
html_snippets = [div] + [o.get_raw_js() for o in self.raw_js_objs],
rawjs = rawjs, rawcss = rawcss,
jsfiles = jsfiles, cssfiles = cssfiles,
title = self.title)
return html
def embed_js(self, plot_id, static_root_url):
# FIXME: Handle this more intelligently
pc_ref = self.get_ref(self.plotcontext)
elementid = str(uuid.uuid4())
models = []
for m in self._models.itervalues():
ref = self.get_ref(m)
ref["attributes"] = m.vm_serialize()
ref["attributes"].update({"id": ref["id"], "doc": None})
models.append(ref)
jscode = self._load_template('embed_direct.js').render(
host = "",
static_root_url=static_root_url,
elementid = elementid,
modelid = pc_ref["id"],
modeltype = pc_ref["type"],
plotid = plot_id, all_models = self.serialize(models))
return jscode.encode("utf-8")
def save(self, filename=None, js=None, css=None, rootdir=None):
""" Saves the file contents. Uses self.filename if **filename**
is not provided. Overwrites the contents.
**js** and **css** can be "inline" or "relative", and they default
to the values of self.inline_js and self.inline_css.
If these are set to be "relative" (or self.inline_js/css are False),
**rootdir** can be specified to indicate the base directory from which
the path to the various static files should be computed. **rootdir**
defaults to the value of self.rootdir.
"""
s = self.dumps(js, css, rootdir)
if filename is None:
filename = self.filename
with open(filename, "w") as f:
f.write(s.encode("utf-8"))
return
def view(self, do_save=True, new=False, autoraise=True):
""" Opens a browser to view the file pointed to by this sessions.
Automatically triggers a save by default.
**new** can be None, "tab", or "window" to view the file in the
existing page, a new tab, or a new windows. **autoraise** causes
the browser to be brought to the foreground; this may happen
automatically on some platforms regardless of the setting of this
variable.
"""
import webbrowser
if do_save:
self.save()
newmap = {False: 0, "window": 1, "tab": 2}
file_url = "file://" + abspath(self.filename)
webbrowser.open(file_url, new = newmap[new], autoraise=autoraise)
def dumpjson(self, pretty=True, file=None):
""" Returns a JSON string representing the contents of all the models
stored in this session, or write it to a file object or file name.
If **pretty** is True, then return a string suitable for human reading,
otherwise returns a compact string.
If a file object is provided, then the output is appended to it. If a
file name is provided, then it opened for overwrite, and not append.
Mostly intended to be used for debugging.
"""
models = []
for m in self._models.itervalues():
ref = self.get_ref(m)
ref["attributes"] = m.vm_serialize()
ref["attributes"].update({"id": ref["id"], "doc": None})
models.append(ref)
if pretty:
indent = 4
else:
indent = None
s = self.serialize(models, indent=indent)
if file is not None:
if isinstance(file, basestring):
with open(file, "w") as f:
f.write(s)
else:
file.write(s)
else:
return s
class HTMLFragmentSession(BaseHTMLSession):
""" Produces a DOM fragment which is suitable for embedding in a
pre-existing HTML DOM. Differs from HTMLFileSession in that the
requisite script and css lines are generated separately.
"""
def contents(self, body_only=False):
""" Returns the multi-line string needed to embed a plot into
the <body> of an HTML document. Includes the JS and CSS by
default; if **body_only** is True, then returns just the plot
<div> and associated <script> tags, but none of the static
files it depends on.
"""
pass
#should move these to bokeh.objects?
class PlotContext(PlotObject):
children = List(has_ref=True)
class PlotList(PlotContext):
# just like plot context, except plot context has special meaning
# everywhere, so plotlist is the generic one
pass
class PlotServerSession(BaseHTMLSession):
def __init__(self, username=None, serverloc=None, userapikey="nokey"):
# This logic is based on ContinuumModelsClient.__init__ and
# mpl.PlotClient.__init__. There is some merged functionality here
# since a Session is meant to capture the little bit of lower-level
# logic in PlotClient (i.e. avoiding handling of things like
# _newxyplot()), but also build in the functionality of the
# ContinuumModelsClient.
self.username = username
self.root_url = serverloc
self.http_session = requests.session()
self.http_session.headers.update({
'content-type':'application/json',
'BOKEHUSER-API-KEY' : userapikey,
'BOKEHUSER' : username})
if self.root_url:
url = urlparse.urljoin(self.root_url, '/bokeh/userinfo/')
self.userinfo = utils.get_json(self.http_session.get(url, verify=False))
else:
logger.info('Not using a server, plots will only work in embedded mode')
self.userinfo = None
self.docid = None
self.plotcontext = None
self.apikey = None
self.bbclient = None # reference to a ContinuumModelsClient
self.base_url = urlparse.urljoin(self.root_url, "/bokeh/bb/")
self.raw_js_objs = []
super(PlotServerSession, self).__init__()
#------------------------------------------------------------------------
# Document-related operations
#------------------------------------------------------------------------
def raw_js_snippets(self, obj):
self.raw_js_objs.append(obj)
def load_doc(self, docid):
url = urlparse.urljoin(self.root_url,"/bokeh/getdocapikey/%s" % docid)
resp = self.http_session.get(url, verify=False)
if resp.status_code == 401:
raise Exception('HTTP Unauthorized accessing DocID "%s"' % docid)
apikey = utils.get_json(resp)
if 'apikey' in apikey:
self.docid = docid
self.apikey = apikey['apikey']
logger.info('got read write apikey')
else:
self.docid = docid
self.apikey = apikey['readonlyapikey']
logger.info('got read only apikey')
self.load_all()
plotcontext = self.load_type('PlotContext')
if len(plotcontext):
temp = plotcontext[0]
if len(plotcontext) > 1:
logger.warning(
"Found more than one PlotContext for doc ID %s; " \
"Using PlotContext ID %s" % (self.docid, temp._id))
plotcontext = temp
else:
logger.warning("Unable to load PlotContext for doc ID %s" % self.docid)
plotcontext = PlotContext()
self.store_obj(plotcontext)
self.plotcontext = plotcontext
return
def make_doc(self, title):
url = urlparse.urljoin(self.root_url,"/bokeh/doc/")
data = protocol.serialize_web({'title' : title})
response = self.http_session.post(url, data=data, verify=False)
if response.status_code == 409:
raise DataIntegrityException
self.userinfo = utils.get_json(response)
def remove_doc(self, title):
matching = [x for x in self.userinfo['docs'] \
if x.get('title') == title]
docid = matching[0]['docid']
url = urlparse.urljoin(self.root_url,"/bokeh/doc/%s/" % docid)
response = self.http_session.delete(url, verify=False)
if response.status_code == 409:
raise DataIntegrityException
self.userinfo = utils.get_json(response)
def use_doc(self, name):
self.docname = name
docs = self.userinfo.get('docs')
matching = [x for x in docs if x.get('title') == name]
if len(matching) == 0:
logger.info("No documents found, creating new document '%s'" % name)
self.make_doc(name)
return self.use_doc(name)
elif len(matching) > 1:
logger.warning("Multiple documents with title '%s'" % name)
self.load_doc(matching[0]['docid'])
def make_source(self, *args, **kwargs):
# This should not implement this here directly, since it should
# done by separately creating the DataSource object. Stubbing this
# out for now for symmetry with mpl.PlotClient
raise NotImplementedError("Construct DataSources manually from bokeh.objects")
#------------------------------------------------------------------------
# functions for loading json into models
# we have 2 types of json data, if all the models are of one type, then
# we just have a list of model attributes
# otherwise, we have what we refer to as broadcast_json, which are of the form
# {'type':typename, 'attributes' : attrs}
#------------------------------------------------------------------------
def load_attrs(self, typename, attrs, events='existing'):
broadcast_attrs = [dict(type=typename, attributes=x) for x in attrs]
return self.load_broadcast_attrs(broadcast_attrs, events=events)
def load_broadcast_attrs(self, attrs, events='existing'):
"""events can be 'existing', or None. 'existing' means
trigger events only for existing (not new objects).
None means don't trigger any events.
"""
models = []
created = set()
for attr in attrs:
typename = attr['type']
attr = attr['attributes']
logger.debug('type: %s', typename)
#logger.debug('attrs: %s', attr)
_id = attr['id']
if _id in self._models:
m = self._models[_id]
m._block_callbacks = True
m.load_json(attr, instance=m)
else:
cls = PlotObject.get_class(typename)
m = cls.load_json(attr)
if m is None:
raise RuntimeError('Error loading object from JSON')
self.add(m)
created.add(m)
models.append(m)
for m in models:
m.finalize(self._models)
if events is None:
self.clear_callback_queue(models)
elif events is 'existing':
non_created = [x for x in models if x not in created]
self.execute_callback_queue(models=non_created)
self.clear_callback_queue(models=created)
self.enable_callbacks(models)
return models
def attrs(self, to_store):
attrs = []
for m in to_store:
attr = m.vm_serialize()
attr['doc'] = self.docid
attr['id'] = m._id
attrs.append(attr)
return attrs
def broadcast_attrs(self, to_store):
models = []
for m in to_store:
ref = self.get_ref(m)
ref["attributes"] = m.vm_serialize()
# FIXME: Is it really necessary to add the id and doc to the
# attributes dict? It shows up in the bbclient-based JSON
# serializations, but I don't understand why it's necessary.
ref["attributes"].update({"doc": self.docid})
models.append(ref)
return models
#------------------------------------------------------------------------
# Storing models
#------------------------------------------------------------------------
def store_obj(self, obj, ref=None):
return self.store_objs([obj])
def store_broadcast_attrs(self, attrs):
data = self.serialize(attrs)
url = utils.urljoin(self.base_url, self.docid + "/", "bulkupsert")
self.http_session.post(url, data=data)
def store_objs(self, to_store):
models = self.broadcast_attrs(to_store)
self.store_broadcast_attrs(models)
for m in to_store:
m._dirty = False
def store_all(self):
to_store = [x for x in self._models.values() \
if hasattr(x, '_dirty') and x._dirty]
self.store_objs(to_store)
return to_store
#------------------------------------------------------------------------
# Loading models
#------------------------------------------------------------------------
def load_all(self, asdict=False):
"""the json coming out of this looks different than that coming
out of load_type, because it contains id, type, attributes, whereas
the other one just contains attributes directly
"""
url = utils.urljoin(self.base_url, self.docid +"/")
attrs = protocol.deserialize_json(self.http_session.get(url).content)
if not asdict:
models = self.load_broadcast_attrs(attrs)
for m in models:
m._dirty = False
return models
else:
models = attrs
return models
def load_type(self, typename, asdict=False):
url = utils.urljoin(self.base_url, self.docid +"/", typename + "/")
attrs = protocol.deserialize_json(self.http_session.get(url).content)
if not asdict:
models = self.load_attrs(typename, attrs)
for m in models:
m._dirty = False
return models
else:
models = attrs
return models
def load_obj(self, ref, asdict=False, modelattrs={}):
"""loads an object from the server.
if asdict:
only the json is returned.
else:
update the existing copy in _models if it is present
instantiate a new one if it is not
and make sure to convert all references into models
in the conversion from json to objects, sometimes references
to models need to be resolved. If there are any json attributes
being processed, you can pass them in as modelattrs
"""
typename = ref["type"]
ref_id = ref["id"]
url = utils.urljoin(self.base_url, self.docid + "/" + typename +\
"/" + ref_id + "/")
attr = protocol.deserialize_json(self.http_session.get(url).content)
if not asdict:
return self.load_attrs(typename, [attr])[0]
else:
return attr
#loading callbacks
def callbacks_json(self, to_store):
all_data = []
for m in to_store:
data = self.get_ref(m)
data['callbacks'] = m._callbacks
all_data.append(data)
return all_data
def load_callbacks_json(self, callback_json):
for data in callback_json:
m = self._models[data['id']]
m._callbacks = {}
for attrname, callbacks in data['callbacks'].iteritems():
for callback in callbacks:
obj = self._models[callback['obj']['id']]
callbackname = callback['callbackname']
m.on_change(attrname, obj, callbackname)
def load_all_callbacks(self, get_json=False):
"""get_json = return json of callbacks, rather than
loading them into models
"""
url = utils.urljoin(self.base_url, self.docid + "/", "callbacks")
data = protocol.deserialize_json(self.http_session.get(url).content)
if get_json:
return data
self.load_callbacks_json(data)
#storing callbacks
def store_callbacks(self, to_store):
all_data = self.callbacks_json(to_store)
url = utils.urljoin(self.base_url, self.docid + "/", "callbacks")
all_data = self.serialize(all_data)
self.http_session.post(url, data=all_data)
for m in to_store:
m._callbacks_dirty = False
def store_all_callbacks(self):
to_store = [x for x in self._models.values() \
if hasattr(x, '_callbacks_dirty') and x._callbacks_dirty]
self.store_callbacks(to_store)
return to_store
#managing callbacks
def disable_callbacks(self, models=None):
if models is None:
models = self._models.itervalues()
for m in models:
m._block_callbacks = True
def enable_callbacks(self, models=None):
if models is None:
models = self._models.itervalues()
for m in models:
m._block_callbacks = False
def clear_callback_queue(self, models=None):
if models is None:
models = self._models.itervalues()
for m in models:
del m._callback_queue[:]
def execute_callback_queue(self, models=None):
if models is None:
models = self._models.itervalues()
for m in models:
for cb in m._callback_queue:
m._trigger(*cb)
del m._callback_queue[:]
#------------------------------------------------------------------------
# Static files
#------------------------------------------------------------------------
def css_paths(self, as_urls=True):
""" Returns a list of URLs or file paths for CSS files """
# This should coordinate with the running plot server and use some
# mechanism to query this information from it.
raise NotImplementedError
def js_paths(self, as_urls=True):
raise NotImplementedError
class NotebookSessionMixin(object):
# The root directory for the CSS files
css_files = [
"js/vendor/bootstrap/bootstrap-bokeh-2.0.4.css",
"css/bokeh.css",
"css/continuum.css",
]
# TODO: Why is this not in bokehjs_dir, but rather outside of it?
js_files = ["js/bokeh.js"]
js_template = "plots.js"
div_template = "plots.html"
html_template = "basediv.html" # template for the entire HTML file
def css_paths(self, as_url=False):
# TODO: Fix the duplication of this method from HTMLFileSession.
# Perhaps move this into BaseHTMLSession.. but a lot of other
# things would need to move as well.
return [join(self.server_static_dir, d) for d in self.css_files]
def js_paths(self):
# For notebook session, we rely on a unified bokehJS file,
# that is not located in the BokehJS subtree
return [join(self.server_static_dir, d) for d in self.js_files]
def dumps(self, objects):
""" Returns the HTML contents as a string
FIXME : signature different than other dumps
FIXME: should consolidate code between this one and that one.
"""
if len(objects) == 0:
objects = self._models.values()
if len(objects) == 1 and isinstance(objects[0], Plot):
the_plot = objects[0]
objects = self._models.values()
else:
the_plot = [m for m in objects if isinstance(m, Plot)][0]
plot_ref = self.get_ref(the_plot)
elementid = str(uuid.uuid4())
# Manually convert our top-level models into dicts, before handing
# them in to the JSON encoder. (We don't want to embed the call to
# vm_serialize into the PlotObjEncoder, because that would cause
# all the attributes to be duplicated multiple times.)
models = []
for m in objects:
ref = self.get_ref(m)
ref["attributes"] = m.vm_serialize()
ref["attributes"].update({"id": ref["id"], "doc": None})
models.append(ref)
js = self._load_template(self.js_template).render(
elementid = elementid,
modelid = plot_ref["id"],
modeltype = plot_ref["type"],
all_models = self.serialize(models),
)
plot_div = self._load_template(self.div_template).render(
elementid=elementid
)
html = self._load_template(self.html_template).render(
html_snippets=[plot_div],
elementid = elementid,
js_snippets = [js],
)
return html.encode("utf-8")
plot_ref = self.get_ref(the_plot)
elementid = str(uuid.uuid4())
# Manually convert our top-level models into dicts, before handing
# them in to the JSON encoder. (We don't want to embed the call to
# vm_serialize into the PlotObjEncoder, because that would cause
# all the attributes to be duplicated multiple times.)
models = []
for m in objects:
ref = self.get_ref(m)
ref["attributes"] = m.vm_serialize()
ref["attributes"].update({"id": ref["id"], "doc": None})
models.append(ref)
js = self._load_template(self.js_template).render(
elementid = elementid,
modelid = plot_ref["id"],
modeltype = plot_ref["type"],
all_models = self.serialize(models),
)
plot_div = self._load_template(self.div_template).render(
elementid=elementid
)
html = self._load_template(self.html_template).render(
html_snippets=[plot_div],
elementid = elementid,
js_snippets = [js],
)
return html.encode("utf-8")
def show(self, *objects):
""" Displays the given objects, or all objects currently associated
with the session, inline in the IPython Notebook.
Basicall we return a dummy object that implements _repr_html.
The reason to do this instead of just having this session object
implement _repr_html directly is because users will usually want
to just see one or two plots, and not all the plots and models
associated with the session.
"""
import IPython.core.displaypub as displaypub
html = self.dumps(objects)
displaypub.publish_display_data('bokeh', {'text/html': html})
return None
class NotebookSession(NotebookSessionMixin, HTMLFileSession):
""" Produces inline HTML suitable for placing into an IPython Notebook.
"""
def __init__(self, plot=None):
HTMLFileSession.__init__(self, filename=None, plot=plot)
self.plotcontext = PlotContext()
def notebooksources(self):
import IPython.core.displaypub as displaypub
# Normally this would call self.js_paths() to build a list of
# scripts or get a reference to the unified/minified JS file,
# but our static JS build process produces a single unified
# bokehJS file for inclusion in the notebook.
js_paths = self.js_paths()
css_paths = self.css_paths()
html = self._load_template(self.html_template).render(
rawjs=self._inline_scripts(js_paths).decode('utf8'),
rawcss=self._inline_css(css_paths).decode('utf8'),
js_snippets=[],
html_snippets=["<p>Configuring embedded BokehJS mode.</p>"])
displaypub.publish_display_data('bokeh', {'text/html': html})
return None
class NotebookServerSession(NotebookSessionMixin, PlotServerSession):
""" An IPython Notebook session that is connected to a plot server.
"""
def ws_conn_string(self):
split = urlparse.urlsplit(self.root_url)
#how to fix this in bokeh and wakari?
if split.scheme == 'http':
return "ws://%s/bokeh/sub" % split.netloc
else:
return "wss://%s/bokeh/sub" % split.netloc
def dumps(self, objects):
""" Returns the HTML contents as a string
FIXME : signature different than other dumps
FIXME: should consolidate code between this one and that one.
"""
if len(objects) == 0:
objects = self._models.values()
if len(objects) == 1 and isinstance(objects[0], Plot):
the_plot = objects[0]
objects = self._models.values()
else:
the_plot = [m for m in objects if isinstance(m, Plot)][0]
return the_plot.create_html_snippet(server=True)
def show(self, *objects):
""" Displays the given objects, or all objects currently associated
with the session, inline in the IPython Notebook.
Basicall we return a dummy object that implements _repr_html.
The reason to do this instead of just having this session object
implement _repr_html directly is because users will usually want
to just see one or two plots, and not all the plots and models
associated with the session.
"""
import IPython.core.displaypub as displaypub
html = self.dumps(objects)
displaypub.publish_display_data('bokeh', {'text/html': html})
return None
def notebook_connect(self):
if self.docname is None:
raise RuntimeError("usedoc() must be called before notebook_connect()")
import IPython.core.displaypub as displaypub
msg = """<p>Connecting notebook to document "%s" at server %s</p>""" % \
(self.docname, self.root_url)
displaypub.publish_display_data('bokeh', {'text/html': msg})
return None
| 38.505572 | 87 | 0.586291 |
3cf3004039e6eab9f2c8ce4feffe5925e17d4288 | 814 | py | Python | battery_test.py | wanderer-soul92/functional-python-wanderer-soul92 | 35c355e1e2087a1a3576370ba6420a42957584ec | [
"MIT"
] | null | null | null | battery_test.py | wanderer-soul92/functional-python-wanderer-soul92 | 35c355e1e2087a1a3576370ba6420a42957584ec | [
"MIT"
] | null | null | null | battery_test.py | wanderer-soul92/functional-python-wanderer-soul92 | 35c355e1e2087a1a3576370ba6420a42957584ec | [
"MIT"
] | null | null | null | BMS_allowed_range = {'temperature': {'min': 0, 'max': 45},
'soc': {'min': 20, 'max': 80},
'charge_rate': {'min': 0, 'max': 0.8}}
def get_out_of_range_parameter(BMS_input):
out_of_range_parameters = []
for parameter,value in BMS_input.items() :
check_tolerance_range(parameter,value,out_of_range_parameters)
return out_of_range_parameters
def check_tolerance_range(key,value,out_of_range_parameters):
if (value < BMS_allowed_range[key]['min']) or (value > BMS_allowed_range[key]['max']):
out_of_range_parameters.append(key)
def check_battery_is_ok(BMS_input):
out_of_range_parameter_count = get_out_of_range_parameter(BMS_input)
if len(out_of_range_parameter_count) == 0:
return True
else :
return False
| 33.916667 | 91 | 0.676904 |
7dd1ac6b443ddb00051c0ccd8baa441b6907ce83 | 23,397 | py | Python | google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py | dizcology/python-aiplatform | 1a135775966c8a2303ded529eba514dcf9db7205 | [
"Apache-2.0"
] | 2 | 2021-10-02T02:25:44.000Z | 2021-11-17T10:35:01.000Z | google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py | pompipo/python-aiplatform | 3612b05c62dfb46822cd2c1798fd47349dba33bc | [
"Apache-2.0"
] | 1 | 2021-03-02T18:25:00.000Z | 2021-03-02T18:25:00.000Z | google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py | pompipo/python-aiplatform | 3612b05c62dfb46822cd2c1798fd47349dba33bc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1.types import model
from google.cloud.aiplatform_v1.types import model as gca_model
from google.cloud.aiplatform_v1.types import model_evaluation
from google.cloud.aiplatform_v1.types import model_evaluation_slice
from google.cloud.aiplatform_v1.types import model_service
from google.longrunning import operations_pb2 # type: ignore
from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import ModelServiceGrpcTransport
class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport):
"""gRPC AsyncIO backend transport for ModelService.
A service for managing Vertex AI's machine learning Models.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def upload_model(
self,
) -> Callable[
[model_service.UploadModelRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the upload model method over gRPC.
Uploads a Model artifact into Vertex AI.
Returns:
Callable[[~.UploadModelRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "upload_model" not in self._stubs:
self._stubs["upload_model"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.ModelService/UploadModel",
request_serializer=model_service.UploadModelRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["upload_model"]
@property
def get_model(
self,
) -> Callable[[model_service.GetModelRequest], Awaitable[model.Model]]:
r"""Return a callable for the get model method over gRPC.
Gets a Model.
Returns:
Callable[[~.GetModelRequest],
Awaitable[~.Model]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_model" not in self._stubs:
self._stubs["get_model"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.ModelService/GetModel",
request_serializer=model_service.GetModelRequest.serialize,
response_deserializer=model.Model.deserialize,
)
return self._stubs["get_model"]
@property
def list_models(
self,
) -> Callable[
[model_service.ListModelsRequest], Awaitable[model_service.ListModelsResponse]
]:
r"""Return a callable for the list models method over gRPC.
Lists Models in a Location.
Returns:
Callable[[~.ListModelsRequest],
Awaitable[~.ListModelsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_models" not in self._stubs:
self._stubs["list_models"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.ModelService/ListModels",
request_serializer=model_service.ListModelsRequest.serialize,
response_deserializer=model_service.ListModelsResponse.deserialize,
)
return self._stubs["list_models"]
@property
def update_model(
self,
) -> Callable[[model_service.UpdateModelRequest], Awaitable[gca_model.Model]]:
r"""Return a callable for the update model method over gRPC.
Updates a Model.
Returns:
Callable[[~.UpdateModelRequest],
Awaitable[~.Model]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_model" not in self._stubs:
self._stubs["update_model"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.ModelService/UpdateModel",
request_serializer=model_service.UpdateModelRequest.serialize,
response_deserializer=gca_model.Model.deserialize,
)
return self._stubs["update_model"]
@property
def delete_model(
self,
) -> Callable[
[model_service.DeleteModelRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the delete model method over gRPC.
Deletes a Model.
Note: Model can only be deleted if there are no
DeployedModels created from it.
Returns:
Callable[[~.DeleteModelRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_model" not in self._stubs:
self._stubs["delete_model"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.ModelService/DeleteModel",
request_serializer=model_service.DeleteModelRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_model"]
@property
def export_model(
self,
) -> Callable[
[model_service.ExportModelRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the export model method over gRPC.
Exports a trained, exportable, Model to a location specified by
the user. A Model is considered to be exportable if it has at
least one [supported export
format][google.cloud.aiplatform.v1.Model.supported_export_formats].
Returns:
Callable[[~.ExportModelRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "export_model" not in self._stubs:
self._stubs["export_model"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.ModelService/ExportModel",
request_serializer=model_service.ExportModelRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_model"]
@property
def get_model_evaluation(
self,
) -> Callable[
[model_service.GetModelEvaluationRequest],
Awaitable[model_evaluation.ModelEvaluation],
]:
r"""Return a callable for the get model evaluation method over gRPC.
Gets a ModelEvaluation.
Returns:
Callable[[~.GetModelEvaluationRequest],
Awaitable[~.ModelEvaluation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_model_evaluation" not in self._stubs:
self._stubs["get_model_evaluation"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.ModelService/GetModelEvaluation",
request_serializer=model_service.GetModelEvaluationRequest.serialize,
response_deserializer=model_evaluation.ModelEvaluation.deserialize,
)
return self._stubs["get_model_evaluation"]
@property
def list_model_evaluations(
self,
) -> Callable[
[model_service.ListModelEvaluationsRequest],
Awaitable[model_service.ListModelEvaluationsResponse],
]:
r"""Return a callable for the list model evaluations method over gRPC.
Lists ModelEvaluations in a Model.
Returns:
Callable[[~.ListModelEvaluationsRequest],
Awaitable[~.ListModelEvaluationsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_model_evaluations" not in self._stubs:
self._stubs["list_model_evaluations"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.ModelService/ListModelEvaluations",
request_serializer=model_service.ListModelEvaluationsRequest.serialize,
response_deserializer=model_service.ListModelEvaluationsResponse.deserialize,
)
return self._stubs["list_model_evaluations"]
@property
def get_model_evaluation_slice(
self,
) -> Callable[
[model_service.GetModelEvaluationSliceRequest],
Awaitable[model_evaluation_slice.ModelEvaluationSlice],
]:
r"""Return a callable for the get model evaluation slice method over gRPC.
Gets a ModelEvaluationSlice.
Returns:
Callable[[~.GetModelEvaluationSliceRequest],
Awaitable[~.ModelEvaluationSlice]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_model_evaluation_slice" not in self._stubs:
self._stubs["get_model_evaluation_slice"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.ModelService/GetModelEvaluationSlice",
request_serializer=model_service.GetModelEvaluationSliceRequest.serialize,
response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize,
)
return self._stubs["get_model_evaluation_slice"]
@property
def list_model_evaluation_slices(
self,
) -> Callable[
[model_service.ListModelEvaluationSlicesRequest],
Awaitable[model_service.ListModelEvaluationSlicesResponse],
]:
r"""Return a callable for the list model evaluation slices method over gRPC.
Lists ModelEvaluationSlices in a ModelEvaluation.
Returns:
Callable[[~.ListModelEvaluationSlicesRequest],
Awaitable[~.ListModelEvaluationSlicesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_model_evaluation_slices" not in self._stubs:
self._stubs["list_model_evaluation_slices"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.ModelService/ListModelEvaluationSlices",
request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize,
response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize,
)
return self._stubs["list_model_evaluation_slices"]
__all__ = ("ModelServiceGrpcAsyncIOTransport",)
| 43.247689 | 98 | 0.64145 |
ddc37746b62df11d6c2e6dcd0b4958b805b2349f | 97 | py | Python | main/urls.py | Shubhashis-coder/translator | e1db53700a7bef0f6239d847b9f97b8592d0c140 | [
"MIT"
] | null | null | null | main/urls.py | Shubhashis-coder/translator | e1db53700a7bef0f6239d847b9f97b8592d0c140 | [
"MIT"
] | null | null | null | main/urls.py | Shubhashis-coder/translator | e1db53700a7bef0f6239d847b9f97b8592d0c140 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import *
urlpatterns = [
path('',home,name="home"),
]
| 13.857143 | 28 | 0.680412 |
dd1d8005191a71d1c3388de6051df7b08530394d | 889 | py | Python | setup.py | xc938/pypdftk | e2f1fda9033131fd8220931eeb4feed791fd7aee | [
"BSD-3-Clause"
] | null | null | null | setup.py | xc938/pypdftk | e2f1fda9033131fd8220931eeb4feed791fd7aee | [
"BSD-3-Clause"
] | null | null | null | setup.py | xc938/pypdftk | e2f1fda9033131fd8220931eeb4feed791fd7aee | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='pypdftk-snap-wrkarnd',
description='''Python wrapper for PDFTK with snap installation workaround''',
long_description=long_description,
long_description_content_type="text/markdown",
version='0.4',
author='Julien Bouquillon',
author_email='[email protected]',
url='http://github.com/revolunet/pypdftk',
py_modules=['pypdftk'],
scripts=['pypdftk.py'],
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'],
)
| 34.192308 | 81 | 0.619798 |
fbefb09b4a66d443a7c6274657c1da1f1b552b6a | 736 | py | Python | python/logSensors-example.py | webbhm/MHZ-16_CO2 | df099a4980b7e4a05510997de7c034037ebb0434 | [
"MIT"
] | null | null | null | python/logSensors-example.py | webbhm/MHZ-16_CO2 | df099a4980b7e4a05510997de7c034037ebb0434 | [
"MIT"
] | null | null | null | python/logSensors-example.py | webbhm/MHZ-16_CO2 | df099a4980b7e4a05510997de7c034037ebb0434 | [
"MIT"
] | null | null | null | #Check sensors and log to file
from logData import logData
from si7021 import getHumidity, getTempC
import NDIR
try:
humidity = getHumidity()
logData("si7021", "Success", "humidity", "{:10.1f}".format(humidity), '')
except (IOError):
logData("si7021", "Failure", "humidity", '', IOError)
try:
temp = getTempC()
logData("si7021", "Success", "temperature", "{:10.1f}".format(temp), '')
except (IOError, e):
logData("si7021", "Failure", "temperature", '', str(e))
try:
sensor = NDIR.Sensor(0x4D)
sensor.begin()
logData("CO2 Concentration", "Success", "co2", str(sensor.getCO2()), '')
except (IOError):
logData("CO2 Concentration", "Failure", "co2", '', str(IOError))
| 28.307692 | 80 | 0.619565 |
7676c4985d061440dbc9c4913e380e3a013f4beb | 1,556 | py | Python | recipes/cymunk/__init__.py | lbovet/kivy-ios | 679fc62256ffb87b6101840563fc2952d7166e1b | [
"MIT"
] | null | null | null | recipes/cymunk/__init__.py | lbovet/kivy-ios | 679fc62256ffb87b6101840563fc2952d7166e1b | [
"MIT"
] | null | null | null | recipes/cymunk/__init__.py | lbovet/kivy-ios | 679fc62256ffb87b6101840563fc2952d7166e1b | [
"MIT"
] | null | null | null | """
Author: Lawrence Du
E-mail: [email protected]
"""
from toolchain import CythonRecipe,shprint
import os
from os.path import join
import sh
class CymunkRecipe(CythonRecipe):
version = 'master'
url = 'https://github.com/tito/cymunk/archive/{version}.zip'
name = 'cymunk'
depends = ['hostpython']
cythonize = True
def build_arch(self, arch):
"""
Override build.arch to avoid calling setup.py here (Call it in
install() instead).
"""
self.cythonize_build()
self.biglink()
def install(self):
"""
Do the equivalent of
python setup.py build_ext install
while setting the proper environment variables
"""
arch = list(self.filtered_archs)[0]
build_env = self.get_recipe_env(arch)
hostpython = sh.Command(self.ctx.hostpython)
subdir_path = self.get_build_dir(arch.arch)
setup_path = join(subdir_path,"setup.py")
dest_dir = join (self.ctx.dist_dir, "root", "python")
build_env['PYTHONPATH'] = join(dest_dir, 'lib', 'python2.7', 'site-packages')
#Note: Throws error if PATH is not set. I am not sure if this will cause problems
# in other architectures.
build_env['PATH']= os.environ.get('PATH')
shprint(hostpython,
setup_path,
"build_ext",
#"--compiler=mingw32", #note: throws clang error
"install",
_env=build_env)
recipe = CymunkRecipe()
| 28.814815 | 89 | 0.595758 |
871f3738977c4424e0f083b6edbfe016025f6d8e | 14,346 | py | Python | RL/rl/rllib_script/agent/utils.py | robot-perception-group/AutonomousBlimpDRL | a10a88b2e9c9f9a83435cff2e4bc7e16e83cfeee | [
"MIT"
] | 8 | 2021-11-21T20:47:37.000Z | 2022-03-15T09:50:06.000Z | RL/rl/rllib_script/agent/utils.py | robot-perception-group/AutonomousBlimpDRL | a10a88b2e9c9f9a83435cff2e4bc7e16e83cfeee | [
"MIT"
] | null | null | null | RL/rl/rllib_script/agent/utils.py | robot-perception-group/AutonomousBlimpDRL | a10a88b2e9c9f9a83435cff2e4bc7e16e83cfeee | [
"MIT"
] | null | null | null | import copy
import importlib
import json
import os
import re
from copy import deepcopy
from functools import partial
from typing import Callable, Dict, List, Optional, TypeVar, Union
import yaml
def with_lock(func: Callable):
"""Use as decorator (@withlock) around object methods that need locking.
Note: The object must have a self._lock = threading.Lock() property.
Locking thus works on the object level (no two locked methods of the same
object can be called asynchronously).
Args:
func (Callable): The function to decorate/wrap.
Returns:
Callable: The wrapped (object-level locked) function.
"""
def wrapper(self, *a, **k):
try:
with self._lock:
return func(self, *a, **k)
except AttributeError as e:
if "has no attribute '_lock'" in e.args[0]:
raise AttributeError(
"Object {} must have a `self._lock` property (assigned "
"to a threading.RLock() object in its "
"constructor)!".format(self))
raise e
return wrapper
def deep_update(
original: dict,
new_dict: dict,
new_keys_allowed: str = False,
allow_new_subkey_list: Optional[List[str]] = None,
override_all_if_type_changes: Optional[List[str]] = None) -> dict:
"""Updates original dict with values from new_dict recursively.
If new key is introduced in new_dict, then if new_keys_allowed is not
True, an error will be thrown. Further, for sub-dicts, if the key is
in the allow_new_subkey_list, then new subkeys can be introduced.
Args:
original (dict): Dictionary with default values.
new_dict (dict): Dictionary with values to be updated
new_keys_allowed (bool): Whether new keys are allowed.
allow_new_subkey_list (Optional[List[str]]): List of keys that
correspond to dict values where new subkeys can be introduced.
This is only at the top level.
override_all_if_type_changes(Optional[List[str]]): List of top level
keys with value=dict, for which we always simply override the
entire value (dict), iff the "type" key in that value dict changes.
"""
allow_new_subkey_list = allow_new_subkey_list or []
override_all_if_type_changes = override_all_if_type_changes or []
for k, value in new_dict.items():
if k not in original and not new_keys_allowed:
raise Exception("Unknown config parameter `{}` ".format(k))
# Both orginal value and new one are dicts.
if isinstance(original.get(k), dict) and isinstance(value, dict):
# Check old type vs old one. If different, override entire value.
if k in override_all_if_type_changes and \
"type" in value and "type" in original[k] and \
value["type"] != original[k]["type"]:
original[k] = value
# Allowed key -> ok to add new subkeys.
elif k in allow_new_subkey_list:
deep_update(original[k], value, True)
# Non-allowed key.
else:
deep_update(original[k], value, new_keys_allowed)
# Original value not a dict OR new value not a dict:
# Override entire value.
else:
original[k] = value
return original
def merge_dicts(d1: dict, d2: dict) -> dict:
"""
Args:
d1 (dict): Dict 1.
d2 (dict): Dict 2.
Returns:
dict: A new dict that is d1 and d2 deep merged.
"""
merged = copy.deepcopy(d1)
deep_update(merged, d2, True, [])
return merged
def force_list(elements=None, to_tuple=False):
"""
Makes sure `elements` is returned as a list, whether `elements` is a single
item, already a list, or a tuple.
Args:
elements (Optional[any]): The inputs as single item, list, or tuple to
be converted into a list/tuple. If None, returns empty list/tuple.
to_tuple (bool): Whether to use tuple (instead of list).
Returns:
Union[list,tuple]: All given elements in a list/tuple depending on
`to_tuple`'s value. If elements is None,
returns an empty list/tuple.
"""
ctor = list
if to_tuple is True:
ctor = tuple
return ctor() if elements is None else ctor(elements) \
if type(elements) in [list, tuple] else ctor([elements])
def from_config(cls, config=None, **kwargs):
"""Uses the given config to create an object.
If `config` is a dict, an optional "type" key can be used as a
"constructor hint" to specify a certain class of the object.
If `config` is not a dict, `config`'s value is used directly as this
"constructor hint".
The rest of `config` (if it's a dict) will be used as kwargs for the
constructor. Additional keys in **kwargs will always have precedence
(overwrite keys in `config` (if a dict)).
Also, if the config-dict or **kwargs contains the special key "_args",
it will be popped from the dict and used as *args list to be passed
separately to the constructor.
The following constructor hints are valid:
- None: Use `cls` as constructor.
- An already instantiated object: Will be returned as is; no
constructor call.
- A string or an object that is a key in `cls`'s `__type_registry__`
dict: The value in `__type_registry__` for that key will be used
as the constructor.
- A python callable: Use that very callable as constructor.
- A string: Either a json/yaml filename or the name of a python
module+class (e.g. "ray.rllib. [...] .[some class name]")
Args:
cls (class): The class to build an instance for (from `config`).
config (Optional[dict, str]): The config dict or type-string or
filename.
Keyword Args:
kwargs (any): Optional possibility to pass the constructor arguments in
here and use `config` as the type-only info. Then we can call
this like: from_config([type]?, [**kwargs for constructor])
If `config` is already a dict, then `kwargs` will be merged
with `config` (overwriting keys in `config`) after "type" has
been popped out of `config`.
If a constructor of a Configurable needs *args, the special
key `_args` can be passed inside `kwargs` with a list value
(e.g. kwargs={"_args": [arg1, arg2, arg3]}).
Returns:
any: The object generated from the config.
"""
# `cls` is the config (config is None).
if config is None and isinstance(cls, (dict, str)):
config = cls
cls = None
# `config` is already a created object of this class ->
# Take it as is.
elif isinstance(cls, type) and isinstance(config, cls):
return config
# `type_`: Indicator for the Configurable's constructor.
# `ctor_args`: *args arguments for the constructor.
# `ctor_kwargs`: **kwargs arguments for the constructor.
# Try to copy, so caller can reuse safely.
try:
config = deepcopy(config)
except Exception:
pass
if isinstance(config, dict):
type_ = config.pop("type", None)
if type_ is None and isinstance(cls, str):
type_ = cls
ctor_kwargs = config
# Give kwargs priority over things defined in config dict.
# This way, one can pass a generic `spec` and then override single
# constructor parameters via the kwargs in the call to `from_config`.
ctor_kwargs.update(kwargs)
else:
type_ = config
if type_ is None and "type" in kwargs:
type_ = kwargs.pop("type")
ctor_kwargs = kwargs
# Special `_args` field in kwargs for *args-utilizing constructors.
ctor_args = force_list(ctor_kwargs.pop("_args", []))
# Figure out the actual constructor (class) from `type_`.
# None: Try __default__object (if no args/kwargs), only then
# constructor of cls (using args/kwargs).
if type_ is None:
# We have a default constructor that was defined directly by cls
# (not by its children).
if cls is not None and hasattr(cls, "__default_constructor__") and \
cls.__default_constructor__ is not None and \
ctor_args == [] and \
(
not hasattr(cls.__bases__[0],
"__default_constructor__")
or
cls.__bases__[0].__default_constructor__ is None or
cls.__bases__[0].__default_constructor__ is not
cls.__default_constructor__
):
constructor = cls.__default_constructor__
# Default constructor's keywords into ctor_kwargs.
if isinstance(constructor, partial):
kwargs = merge_dicts(ctor_kwargs, constructor.keywords)
constructor = partial(constructor.func, **kwargs)
ctor_kwargs = {} # erase to avoid duplicate kwarg error
# No default constructor -> Try cls itself as constructor.
else:
constructor = cls
# Try the __type_registry__ of this class.
else:
constructor = lookup_type(cls, type_)
# Found in cls.__type_registry__.
if constructor is not None:
pass
# type_ is False or None (and this value is not registered) ->
# return value of type_.
elif type_ is False or type_ is None:
return type_
# Python callable.
elif callable(type_):
constructor = type_
# A string: Filename or a python module+class or a json/yaml str.
elif isinstance(type_, str):
if re.search("\\.(yaml|yml|json)$", type_):
return from_file(cls, type_, *ctor_args, **ctor_kwargs)
# Try un-json/un-yaml'ing the string into a dict.
obj = yaml.safe_load(type_)
if isinstance(obj, dict):
return from_config(cls, obj)
try:
obj = from_config(cls, json.loads(type_))
except json.JSONDecodeError:
pass
else:
return obj
# Test for absolute module.class path specifier.
if type_.find(".") != -1:
module_name, function_name = type_.rsplit(".", 1)
try:
module = importlib.import_module(module_name)
constructor = getattr(module, function_name)
# Module not found.
except (ModuleNotFoundError, ImportError, AttributeError):
pass
# If constructor still not found, try attaching cls' module,
# then look for type_ in there.
if constructor is None:
if isinstance(cls, str):
# Module found, but doesn't have the specified
# c'tor/function.
raise ValueError(
f"Full classpath specifier ({type_}) must be a valid "
"full [module].[class] string! E.g.: "
"`my.cool.module.MyCoolClass`.")
try:
module = importlib.import_module(cls.__module__)
constructor = getattr(module, type_)
except (ModuleNotFoundError, ImportError, AttributeError):
# Try the package as well.
try:
package_name = importlib.import_module(
cls.__module__).__package__
module = __import__(package_name, fromlist=[type_])
constructor = getattr(module, type_)
except (ModuleNotFoundError, ImportError, AttributeError):
pass
if constructor is None:
raise ValueError(
f"String specifier ({type_}) must be a valid filename, "
f"a [module].[class], a class within '{cls.__module__}', "
f"or a key into {cls.__name__}.__type_registry__!")
if not constructor:
raise TypeError(
"Invalid type '{}'. Cannot create `from_config`.".format(type_))
# Create object with inferred constructor.
try:
object_ = constructor(*ctor_args, **ctor_kwargs)
# Catch attempts to construct from an abstract class and return None.
except TypeError as e:
if re.match("Can't instantiate abstract class", e.args[0]):
return None
raise e # Re-raise
# No sanity check for fake (lambda)-"constructors".
if type(constructor).__name__ != "function":
assert isinstance(
object_, constructor.func
if isinstance(constructor, partial) else constructor)
return object_
def from_file(cls, filename, *args, **kwargs):
"""
Create object from config saved in filename. Expects json or yaml file.
Args:
filename (str): File containing the config (json or yaml).
Returns:
any: The object generated from the file.
"""
path = os.path.join(os.getcwd(), filename)
if not os.path.isfile(path):
raise FileNotFoundError("File '{}' not found!".format(filename))
with open(path, "rt") as fp:
if path.endswith(".yaml") or path.endswith(".yml"):
config = yaml.safe_load(fp)
else:
config = json.load(fp)
# Add possible *args.
config["_args"] = args
return from_config(cls, config=config, **kwargs)
def lookup_type(cls, type_):
if cls is not None and hasattr(cls, "__type_registry__") and \
isinstance(cls.__type_registry__, dict) and (
type_ in cls.__type_registry__ or (
isinstance(type_, str) and
re.sub("[\\W_]", "", type_.lower()) in cls.__type_registry__)):
available_class_for_type = cls.__type_registry__.get(type_)
if available_class_for_type is None:
available_class_for_type = \
cls.__type_registry__[re.sub("[\\W_]", "", type_.lower())]
return available_class_for_type
return None
| 39.961003 | 79 | 0.601073 |
14f83a853f2ab7a6763173cfd1d20ea078d3dcfe | 268 | py | Python | minitest.py | Genzo4/proxy_manager | 6547b6ad1c5a16261dffef04f2be9168551fa8d8 | [
"MIT"
] | 2 | 2021-06-04T15:36:39.000Z | 2022-02-03T07:17:26.000Z | minitest.py | Genzo4/proxy_manager | 6547b6ad1c5a16261dffef04f2be9168551fa8d8 | [
"MIT"
] | null | null | null | minitest.py | Genzo4/proxy_manager | 6547b6ad1c5a16261dffef04f2be9168551fa8d8 | [
"MIT"
] | null | null | null | from proxy_manager_g4 import ProxyManager
from proxy_manager_g4.consts import PROTOCOL_HTTPS
proxy_manager = ProxyManager(protocol=PROTOCOL_HTTPS, anonymity=True)
for i in range(1, 1000):
pr = proxy_manager.get_random()
print(i, ": ", pr, "(", pr.used, ")")
| 29.777778 | 69 | 0.742537 |
01980a7040d7af4545e51778223b1a1c4b423514 | 711 | py | Python | onboarding/exporting/tests.py | icclab/openstack-ova-onboarding | 443a789875acc012616bf53eeca2ce01db8ad85d | [
"Apache-2.0"
] | 8 | 2016-09-06T17:47:24.000Z | 2021-03-09T15:07:23.000Z | onboarding/exporting/tests.py | icclab/openstack-ova-onboarding | 443a789875acc012616bf53eeca2ce01db8ad85d | [
"Apache-2.0"
] | 1 | 2020-04-16T01:11:25.000Z | 2020-04-16T01:11:25.000Z | onboarding/exporting/tests.py | icclab/openstack-ova-onboarding | 443a789875acc012616bf53eeca2ce01db8ad85d | [
"Apache-2.0"
] | 3 | 2017-07-20T11:26:56.000Z | 2019-05-05T11:25:41.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from horizon.test import helpers as test
class OvaTests(test.TestCase):
# Unit tests for exporting.
def test_me(self):
self.assertTrue(1 + 1 == 2)
| 35.55 | 75 | 0.74121 |
0c64342d90d76cb022c856cb1492c0ad0a508b6b | 878 | py | Python | modulo/modulo/urls.py | programavalentina/AssistanceControl_CreativeSpace | c7bdd21380528b10c696f94917fae0cdcb8c4d94 | [
"MIT"
] | 1 | 2018-10-29T18:50:12.000Z | 2018-10-29T18:50:12.000Z | modulo/modulo/urls.py | programavalentina/login_-GeekSpace | c52465adb1306c7a9d32f963e3cb6260cf1058fe | [
"MIT"
] | null | null | null | modulo/modulo/urls.py | programavalentina/login_-GeekSpace | c52465adb1306c7a9d32f963e3cb6260cf1058fe | [
"MIT"
] | null | null | null | """modulo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
from django.contrib import admin
urlpatterns = [
path('users/', include(('apps.users.urls', 'users'), namespace='users')),
path('', include(('apps.modulo.urls', 'modulo'), namespace='modulo')),
]
| 31.357143 | 77 | 0.693622 |
7ba32ec3f4b5b608ef2a8490361f7253e8615d86 | 20,762 | py | Python | src/kestrel/session.py | jmcasava/kestrel-lang | 1a5480e8bbf1e2ceb1723f9c4feab66cf5664d02 | [
"Apache-2.0"
] | null | null | null | src/kestrel/session.py | jmcasava/kestrel-lang | 1a5480e8bbf1e2ceb1723f9c4feab66cf5664d02 | [
"Apache-2.0"
] | null | null | null | src/kestrel/session.py | jmcasava/kestrel-lang | 1a5480e8bbf1e2ceb1723f9c4feab66cf5664d02 | [
"Apache-2.0"
] | null | null | null | """A Kestrel session provides an isolated stateful runtime space for a huntflow.
A huntflow is the source code or script of a cyber threat hunt, which can be
developed offline in a text editor or interactively as the hunt goes. A Kestrel
session provides the runtime space for a huntflow that allows execution and
inspection of hunt statements in the huntflow. The :class:`Session` class in this
module supports both non-interactive and interactive execution of huntflows as
well as comprehensive APIs besides execution.
.. highlight:: python
Examples:
A non-interactive execution of a huntflow::
from kestrel.session import Session
with Session() as session:
open(huntflow_file) as hff:
huntflow = hff.read()
session.execute(huntflow)
An interactive composition and execution of a huntflow::
from kestrel.session import Session
with Session() as session:
try:
hunt_statement = input(">>> ")
except EOFError:
print()
break
else:
output = session.execute(hunt_statement)
print(output)
Export Kestrel variable to Python::
from kestrel.session import Session
huntflow = ""\"newvar = GET process
FROM stixshifter://workstationX
WHERE [process:name = 'cmd.exe']""\"
with Session() as session:
session.execute(huntflow)
cmds = session.get_variable("newvar")
for process in cmds:
print(process["name"])
"""
import tempfile
import os
import pathlib
import shutil
import uuid
import logging
import re
import toml
import time
import math
from datetime import datetime
from lark import UnexpectedCharacters, UnexpectedToken
from kestrel.exceptions import (
KestrelSyntaxError,
NoValidConfiguration,
InvalidStixPattern,
)
from kestrel.syntax.parser import get_all_input_var_names
from kestrel.syntax.parser import parse
from kestrel.syntax.utils import get_keywords
from kestrel.semantics import *
from kestrel.codegen import commands
from kestrel.codegen.display import DisplayBlockSummary
from kestrel.codegen.summary import gen_variable_summary
from firepit import get_storage
from firepit.exceptions import StixPatternError
from kestrel.utils import set_current_working_directory, config_paths
from kestrel.datasource import DataSourceManager
from kestrel.analytics import AnalyticsManager
_logger = logging.getLogger(__name__)
class Session(object):
"""Kestrel Session class
A session object needs to be instantiated to create a Kestrel runtime space.
This is the foundation of multi-user dynamic composition and execution of
huntflows. A Kestrel session has two important properties:
- Stateful: a session keeps track of states/effects of statements that have
been previously executed in this session, e.g., the values of previous
established Kestrel variables. A session can invoke more than one
:meth:`execute`, and each :meth:`execute` can process a block of Kestrel code,
i.e., multiple Kestrel statements.
- Isolated: each session is established in an isolated space (memory and
file system):
- Memory isolation is accomplished by OS process and memory space
management automatically -- different Kestrel session instances will not
overlap in memory.
- File system isolation is accomplished with the setup and management of
a temporary runtime directory for each session.
Args:
runtime_dir (str): to be used for :attr:`runtime_directory`.
store_path (str): the file path or URL to initialize :attr:`store`.
debug_mode (bool): to be assign to :attr:`debug_mode`.
Attributes:
session_id (str): The Kestrel session ID, which will be created as a random
UUID if not given in the constructor.
runtime_directory (str): The runtime directory stores session related
data in the file system such as local cache of queried results,
session log, and may be the internal store. The session will use
a temporary directory derived from :attr:`session_id` if the path is
not specified in constructor parameters.
store (firepit.SqlStorage): The internal store used
by the session to normalize queried results, implement cache, and
realize the low level code generation. The store from the
``firepit`` package provides an operation abstraction
over the raw internal database: either a local store, e.g., SQLite,
or a remote one, e.g., PostgreSQL. If not specified from the
constructor parameter, the session will use the default SQLite
store in the :attr:`runtime_directory`.
debug_mode (bool): The debug flag set by the session constructor. If
True, a fixed debug link ``/tmp/kestrel`` of :attr:`runtime_directory`
will be created, and :attr:`runtime_directory` will not be removed by
the session when terminating.
runtime_directory_is_owned_by_upper_layer (bool): The flag to specify
who owns and manages :attr:`runtime_directory`. False by default,
where the Kestrel session will manage session file system isolation --
create and destory :attr:`runtime_directory`. If True, the runtime
directory is created, passed in to the session constructor, and will
be destroyed by the calling site.
symtable (dict): The continuously updated *symbol table* of the running
session, which is a dictionary mapping from Kestrel variable names
``str`` to their associated Kestrel internal data structure
``VarStruct``.
data_source_manager (kestrel.datasource.DataSourceManager): The
data source manager handles queries to all data source interfaces such as
local file stix bundle and stix-shifter. It also stores previous
queried data sources for the session, which is used for a syntax
sugar when there is no data source in a Kestrel ``GET`` statement -- the
last data source is implicitly used.
analytics_manager (kestrel.analytics.AnalyticsManager): The analytics
manager handles all analytics related operations such as executing an
analytics or getting the list of analytics for code auto-completion.
"""
def __init__(
self, session_id=None, runtime_dir=None, store_path=None, debug_mode=False
):
_logger.debug(
f"Establish session with session_id: {session_id}, runtime_dir: {runtime_dir}, store_path:{store_path}, debug_mode:{debug_mode}"
)
self.config = self._load_configuration()
if session_id:
self.session_id = session_id
else:
self.session_id = str(uuid.uuid4())
self.debug_mode = debug_mode
# default value of runtime_directory ownership
self.runtime_directory_is_owned_by_upper_layer = False
# runtime (temporary) directory to store session-related data
sys_tmp_dir = pathlib.Path(tempfile.gettempdir())
if runtime_dir:
if os.path.exists(runtime_dir):
self.runtime_directory_is_owned_by_upper_layer = True
else:
pathlib.Path(runtime_dir).mkdir(parents=True, exist_ok=True)
self.runtime_directory = runtime_dir
else:
tmp_dir = sys_tmp_dir / ("kestrel-session-" + self.session_id)
self.runtime_directory = tmp_dir.resolve()
if tmp_dir.exists():
if tmp_dir.is_dir():
_logger.debug(
"Kestrel session with runtime_directory exists, reuse it."
)
else:
_logger.debug(
"strange tmp file that uses kestrel session dir name, remove it."
)
os.remove(self.runtime_directory)
else:
_logger.debug(
f"create new session runtime_directory: {self.runtime_directory}."
)
tmp_dir.mkdir(parents=True, exist_ok=True)
if self.debug_mode:
runtime_directory_master = sys_tmp_dir / "kestrel"
if runtime_directory_master.exists():
runtime_directory_master.unlink()
runtime_directory_master.symlink_to(self.runtime_directory)
# local database of SQLite or Parquet
if not store_path:
# use the default local database in config.py
store_path = os.path.join(
self.runtime_directory, self.config["session"]["local_database_path"]
)
self.store = get_storage(store_path, self.session_id)
# Symbol Table
# linking variables in syntax with internal data structure
# handling fallback_var for the most recently accessed var
# {"var": VarStruct}
self.symtable = {}
self.data_source_manager = DataSourceManager()
self.analytics_manager = AnalyticsManager()
iso_ts_regex = r"\d{4}(-\d{2}(-\d{2}(T\d{2}(:\d{2}(:\d{2}Z?)?)?)?)?)?"
self._iso_ts = re.compile(iso_ts_regex)
def execute(self, codeblock):
"""Execute a Kestrel code block.
A Kestrel statement or multiple consecutive statements constitute a code
block, which can be executed by this method. New Kestrel variables can be
created in a code block such as ``newvar = GET ...``. Two types of Kestrel
variables can be legally referred in a Kestrel statement in the code block:
* A Kestrel variable created in the same code block prior to the reference.
* A Kestrel variable created in code blocks previously executed by the
session. The session maintains the :attr:`symtable` to keep the state
of all previously executed Kestrel statements and their established Kestrel
variables.
Args:
codeblock (str): the code block to be executed.
Returns:
A list of outputs that each of them is the output for each
statement in the inputted code block.
"""
ast = self.parse(codeblock)
return self._execute_ast(ast)
def parse(self, codeblock):
"""Parse a Kestrel code block.
Parse one or multiple consecutive Kestrel statements (a Kestrel code block)
into the abstract syntax tree. This could be useful for frontends that
need to parse a statement *without* executing it in order to render
some type of interface.
Args:
codeblock (str): the code block to be parsed.
Returns:
A list of dictionaries that each of them is an *abstract syntax
tree* for one Kestrel statement in the inputted code block.
"""
try:
ast = parse(
codeblock,
self.config["language"]["default_variable"],
self.config["language"]["default_sort_order"],
)
except UnexpectedCharacters as err:
raise KestrelSyntaxError(err.line, err.column, "character", err.char)
except UnexpectedToken as err:
raise KestrelSyntaxError(err.line, err.column, "token", err.token)
return ast
def get_variable_names(self):
"""Get the list of Kestrel variable names created in this session."""
return list(self.symtable.keys())
def get_variable(self, var_name):
"""Get the data of Kestrel variable ``var_name``, which is list of homogeneous entities (STIX SCOs)."""
# In the future, consider returning a generator here?
return self.symtable[var_name].get_entities()
def create_variable(self, var_name, objects, object_type=None):
"""Create a new Kestrel variable ``var_name`` with data in ``objects``.
This is the API equivalent to Kestrel command ``NEW``, while allowing more
flexible objects types (Python objects) than the objects serialized
into text/JSON in the command ``NEW``.
Args:
var_name (str): The Kestrel variable to be created.
objects (list): List of Python objects, currently support either a
list of ``str`` or a list of ``dict``.
object_type (str): The Kestrel entity type for the created Kestrel
variable. It overrides the ``type`` field in ``objects``. If
there is no ``type`` field in ``objects``, e.g., ``objects`` is a
list of ``str``, this parameter is required.
"""
virtual_stmt_ast = [
{"command": "new", "output": var_name, "data": objects, "type": object_type}
]
self._execute_ast(virtual_stmt_ast)
def do_complete(self, code, cursor_pos):
"""Kestrel code auto-completion.
This function gives a list of suggestions on the inputted partial Kestrel
code to complete it. The current version sets the context for
completion on word level -- it will reason around the last word in the
input Kestrel code to provide suggestions. Data sources and analytics names
can also be completed since the entire URI are single words (no space
in data source or analytic name string). This feature can be used to
list all available data sources or analytics, e.g., giving the last
partial word ``stixshifter://``.
Currently this method computes code completion based on:
* Kestrel keywords
* Kestrel variables
* data source names
* analytics names
Args:
code (str): Kestrel code.
cursor_pos (int): the position to start completion (index in ``code``).
Returns:
A list of suggested strings to complete the code.
"""
prefix = code[:cursor_pos]
last_word = prefix.split(" ")[-1]
if "START" in prefix or "STOP" in prefix:
return self._get_complete_timestamp(last_word)
elif "://" in last_word:
scheme, path = last_word.split("://")
if scheme in self.data_source_manager.schemes():
data_source_names = (
self.data_source_manager.list_data_sources_from_scheme(scheme)
)
allnames = [scheme + "://" + name for name in data_source_names]
_logger.debug(
f"auto-complete from data source interface {scheme}: {allnames}"
)
elif scheme in self.analytics_manager.schemes():
analytics_names = self.analytics_manager.list_analytics_from_scheme(
scheme
)
allnames = [scheme + "://" + name for name in analytics_names]
_logger.debug(
f"auto-complete from analytics interface {scheme}: {allnames}"
)
else:
allnames = []
_logger.debug("cannot find auto-complete interface")
else:
allnames = (
get_keywords()
+ self.get_variable_names()
+ self.data_source_manager.schemes()
+ self.analytics_manager.schemes()
)
_logger.debug("standard auto-complete")
suggestions = [
name[len(last_word) :] for name in allnames if name.startswith(last_word)
]
return suggestions
def close(self):
"""Explicitly close the session.
Only needed for non-context-managed sessions.
"""
del self.store
if not self.runtime_directory_is_owned_by_upper_layer and not self.debug_mode:
shutil.rmtree(self.runtime_directory)
def _execute_ast(self, ast):
displays = []
new_vars = []
start_exec_ts = time.time()
for stmt in ast:
try:
# pre-processing: semantics check and completion
# - ensure all parsed elements not empty
# - check existance of argument variables
# - complete data source if omitted by user
# - complete input context
check_elements_not_empty(stmt)
for input_var_name in get_all_input_var_names(stmt):
check_var_exists(input_var_name, self.symtable)
if stmt["command"] == "get":
recognize_var_source(stmt, self.symtable)
complete_data_source(
stmt, self.data_source_manager.queried_data_sources[-1]
)
if stmt["command"] == "load" or stmt["command"] == "save":
stmt["path"] = pathlib.Path(stmt["path"]).resolve()
if stmt["command"] == "find":
check_semantics_on_find(stmt, self.symtable[stmt["input"]].type)
if "attrs" in stmt:
var_struct = self.symtable[stmt["input"]]
stmt["attrs"] = normalize_attrs(stmt, var_struct)
# code generation and execution
execute_cmd = getattr(commands, stmt["command"])
# set current working directory for each command execution
# use this to implicitly pass runtime_dir as an argument to each command
# the context manager switch back cwd when the command execution completes
with set_current_working_directory(self.runtime_directory):
output_var_struct, display = execute_cmd(stmt, self)
# exception completion
except StixPatternError as e:
raise InvalidStixPattern(e.stix)
# post-processing: symbol table update
if output_var_struct is not None:
output_var_name = stmt["output"]
self._update_symbol_table(output_var_name, output_var_struct)
if output_var_name != self.config["language"]["default_variable"]:
if output_var_name in new_vars:
new_vars.remove(output_var_name)
new_vars.append(output_var_name)
if display is not None:
displays.append(display)
end_exec_ts = time.time()
execution_time_sec = math.ceil(end_exec_ts - start_exec_ts)
if new_vars:
vars_summary = [
gen_variable_summary(vname, self.symtable[vname]) for vname in new_vars
]
displays.append(DisplayBlockSummary(vars_summary, execution_time_sec))
return displays
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def _update_symbol_table(self, output_var_name, output_var_struct):
self.symtable[output_var_name] = output_var_struct
self.symtable[self.config["language"]["default_variable"]] = output_var_struct
def _load_configuration(self):
configs = []
for path in config_paths():
try:
configs.append(toml.load(path))
_logger.debug(f"Configuration file {path} loaded successfully.")
except FileNotFoundError:
_logger.debug(f"Configuration file {path} does not exist.")
except toml.decoder.TomlDecodeError:
_logger.debug(f"Invalid configuration file {path}.")
if not configs:
raise NoValidConfiguration
else:
config = configs.pop(0)
for c in configs:
config.update(c)
_logger.debug(f"Configuration loaded: {config}")
return config
def _get_complete_timestamp(self, ts_str):
valid_ts_formats = [
"%Y",
"%Y-%m",
"%Y-%m-%d",
"%Y-%m-%dT%H",
"%Y-%m-%dT%H:%M",
"%Y-%m-%dT%H:%M:%S",
]
complete_ts = []
for vts in valid_ts_formats:
ts = ts_str.split("'")[-1]
matched = self._iso_ts.match(ts)
if matched:
try:
ts_iso = datetime.strptime(matched.group(), vts).isoformat()
complete_ts.append(ts_iso[len(ts) :] + "Z'")
if complete_ts:
return complete_ts
except:
_logger.debug(f"Try to match timestamp {ts} by format {vts}")
pass
| 40.081081 | 140 | 0.621761 |
94884373220351b79d480d251680dd6885e44332 | 6,110 | py | Python | CIM15/IEC61970/Informative/InfAssets/WindingInsulation.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 58 | 2015-04-22T10:41:03.000Z | 2022-03-29T16:04:34.000Z | CIM15/IEC61970/Informative/InfAssets/WindingInsulation.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 12 | 2015-08-26T03:57:23.000Z | 2020-12-11T20:14:42.000Z | CIM15/IEC61970/Informative/InfAssets/WindingInsulation.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 35 | 2015-01-10T12:21:03.000Z | 2020-09-09T08:18:16.000Z | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class WindingInsulation(IdentifiedObject):
"""Winding insulation condition as a result of a test.Winding insulation condition as a result of a test.
"""
def __init__(self, insulationPFStatus='', insulationResistance='', leakageReactance=0.0, Ground=None, TransformerObservation=None, ToWinding=None, FromWinding=None, status=None, *args, **kw_args):
"""Initialises a new 'WindingInsulation' instance.
@param insulationPFStatus: Status of Winding Insulation Power Factor as of statusDate: Acceptable, Minor Deterioration or Moisture Absorption, Major Deterioration or Moisture Absorption, Failed.
@param insulationResistance: For testType, status of Winding Insulation Resistance as of statusDate. Typical values are: Acceptable, Questionable, Failed.
@param leakageReactance: As of statusDate, the leakage reactance measured at the 'from' winding with the 'to' winding short-circuited and all other windings open-circuited.
@param Ground:
@param TransformerObservation:
@param ToWinding:
@param FromWinding:
@param status:
"""
#: Status of Winding Insulation Power Factor as of statusDate: Acceptable, Minor Deterioration or Moisture Absorption, Major Deterioration or Moisture Absorption, Failed.
self.insulationPFStatus = insulationPFStatus
#: For testType, status of Winding Insulation Resistance as of statusDate. Typical values are: Acceptable, Questionable, Failed.
self.insulationResistance = insulationResistance
#: As of statusDate, the leakage reactance measured at the 'from' winding with the 'to' winding short-circuited and all other windings open-circuited.
self.leakageReactance = leakageReactance
self._Ground = None
self.Ground = Ground
self._TransformerObservation = None
self.TransformerObservation = TransformerObservation
self._ToWinding = None
self.ToWinding = ToWinding
self._FromWinding = None
self.FromWinding = FromWinding
self.status = status
super(WindingInsulation, self).__init__(*args, **kw_args)
_attrs = ["insulationPFStatus", "insulationResistance", "leakageReactance"]
_attr_types = {"insulationPFStatus": str, "insulationResistance": str, "leakageReactance": float}
_defaults = {"insulationPFStatus": '', "insulationResistance": '', "leakageReactance": 0.0}
_enums = {}
_refs = ["Ground", "TransformerObservation", "ToWinding", "FromWinding", "status"]
_many_refs = []
def getGround(self):
return self._Ground
def setGround(self, value):
if self._Ground is not None:
filtered = [x for x in self.Ground.WindingInsulations if x != self]
self._Ground._WindingInsulations = filtered
self._Ground = value
if self._Ground is not None:
if self not in self._Ground._WindingInsulations:
self._Ground._WindingInsulations.append(self)
Ground = property(getGround, setGround)
def getTransformerObservation(self):
return self._TransformerObservation
def setTransformerObservation(self, value):
if self._TransformerObservation is not None:
filtered = [x for x in self.TransformerObservation.WindingInsulationPFs if x != self]
self._TransformerObservation._WindingInsulationPFs = filtered
self._TransformerObservation = value
if self._TransformerObservation is not None:
if self not in self._TransformerObservation._WindingInsulationPFs:
self._TransformerObservation._WindingInsulationPFs.append(self)
TransformerObservation = property(getTransformerObservation, setTransformerObservation)
def getToWinding(self):
return self._ToWinding
def setToWinding(self, value):
if self._ToWinding is not None:
filtered = [x for x in self.ToWinding.ToWindingInsulations if x != self]
self._ToWinding._ToWindingInsulations = filtered
self._ToWinding = value
if self._ToWinding is not None:
if self not in self._ToWinding._ToWindingInsulations:
self._ToWinding._ToWindingInsulations.append(self)
ToWinding = property(getToWinding, setToWinding)
def getFromWinding(self):
return self._FromWinding
def setFromWinding(self, value):
if self._FromWinding is not None:
filtered = [x for x in self.FromWinding.FromWindingInsulations if x != self]
self._FromWinding._FromWindingInsulations = filtered
self._FromWinding = value
if self._FromWinding is not None:
if self not in self._FromWinding._FromWindingInsulations:
self._FromWinding._FromWindingInsulations.append(self)
FromWinding = property(getFromWinding, setFromWinding)
status = None
| 44.59854 | 203 | 0.716858 |
6ec4f3d722c6b198fa6f06107514eae852858c9f | 65,921 | py | Python | pysnmp-with-texts/SLA-MONITOR-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/SLA-MONITOR-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/SLA-MONITOR-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module SLA-MONITOR-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SLA-MONITOR-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:06:03 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection")
Dscp, = mibBuilder.importSymbols("DIFFSERV-DSCP-TC", "Dscp")
InetAddressType, InetPortNumber, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetPortNumber", "InetAddress")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
ModuleIdentity, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Counter64, Bits, Gauge32, MibIdentifier, ObjectIdentity, Counter32, NotificationType, iso, Integer32, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Counter64", "Bits", "Gauge32", "MibIdentifier", "ObjectIdentity", "Counter32", "NotificationType", "iso", "Integer32", "IpAddress")
RowStatus, TextualConvention, DisplayString, StorageType = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TextualConvention", "DisplayString", "StorageType")
policy, = mibBuilder.importSymbols("SYNOPTICS-ROOT-MIB", "policy")
slaMonitorMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 45, 4, 8))
slaMonitorMib.setRevisions(('2015-05-29 00:00', '2013-03-05 00:00', '2013-01-31 00:00', '2012-12-11 00:00', '2012-09-19 00:00', '2012-09-04 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: slaMonitorMib.setRevisionsDescriptions(('Version 6: Corrected MIB compiling errors.', 'Version 5: Added support for remote NTR and RTP test initiation and results retrieval using SNMP. Defined attribute to support refusing server test requests.', 'Version 4: Added slaMonitorAgentServerBypass and updated conformance statements for cross-platform use.', 'Version 3: Added slaMonitorAgentCertFileInstallAction, slaMonitorAgentCertFile for VSP9K/ERS8800', 'Version 2: Added slaMonitorAgentConfiguredAgentVrfName, slaMonitorAgentSlaParameter for VSP9K', 'Version 1: Initial version. Basic agent support.',))
if mibBuilder.loadTexts: slaMonitorMib.setLastUpdated('201505290000Z')
if mibBuilder.loadTexts: slaMonitorMib.setOrganization('Avaya Networks, Inc.')
if mibBuilder.loadTexts: slaMonitorMib.setContactInfo(' John Seligson Avaya Networks, Inc. 4655 Great America Parkway Santa Clara, CA 95054 USA Phone: +1 408 496 3424 Email: [email protected]')
if mibBuilder.loadTexts: slaMonitorMib.setDescription('An Avaya management module containing data definitions related to the SLA Monitor Agent.')
slaMonitorMibNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 4, 8, 0))
slaMonitorMibClasses = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 4, 8, 1))
slaMonitorMibConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 4, 8, 2))
slaMonitorAgtClasses = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1))
slaMonitorAgentStatus = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentStatus.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentStatus.setDescription('The value of this attribute indicates whether the SLA Monitor agent is enabled or not. An agent that is enabled will respond to SLA Monitor server discovery packets. A disabled agent will remain dormant until manually enabled.')
slaMonitorAgentAddressType = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 2), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorAgentAddressType.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentAddressType.setDescription('The type of IP address used by the agent. This object is limited to IPv4 and IPv6 addresses.')
slaMonitorAgentAddress = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 3), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorAgentAddress.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentAddress.setDescription('The IP address currently used by the SLA Monitor agent.')
slaMonitorAgentPort = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 4), InetPortNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorAgentPort.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentPort.setDescription('The UDP port currently used by the SLA Monitor agent for agent-server communication.')
slaMonitorAgentRegisteredWithServer = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("registered", 1), ("notRegistered", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorAgentRegisteredWithServer.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentRegisteredWithServer.setDescription('The value of this attribute indicates whether the SLA Monitor agent is registered with an SLA Monitor server or not.')
slaMonitorAgentRegisteredServerAddrType = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 6), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorAgentRegisteredServerAddrType.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentRegisteredServerAddrType.setDescription('The type of IP address used by the server. This object is limited to IPv4 and IPv6 addresses.')
slaMonitorAgentRegisteredServerAddr = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 7), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorAgentRegisteredServerAddr.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentRegisteredServerAddr.setDescription('The IP address of the SLA Monitor server with which the agent is currently registered.')
slaMonitorAgentRegisteredServerPort = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 8), InetPortNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorAgentRegisteredServerPort.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentRegisteredServerPort.setDescription('The TCP port utilized by the SLA Monitor server with which the agent is currently registered.')
slaMonitorAgentRegistrationTime = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 9), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorAgentRegistrationTime.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentRegistrationTime.setDescription('Specifies the maximum amount of time, in seconds, until the current registration period expires.')
slaMonitorAgentCliAvailable = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("available", 1), ("notAvailable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentCliAvailable.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentCliAvailable.setDescription('The value of this attribute indicates whether the SLA Monitor agent CLI is available or not.')
slaMonitorAgentCliTimeout = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 11), Unsigned32()).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentCliTimeout.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentCliTimeout.setDescription("Specifies the maximum amount of time, in seconds, until the CLI session is automatically terminated. The value of this attribute is pertinent only if CLI timeouts are enabled (slaMonitorAgentCliTimeoutMode equals 'enable(1)').")
slaMonitorAgentCliTimeoutMode = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentCliTimeoutMode.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentCliTimeoutMode.setDescription("The value of this attribute indicates whether the SLA Monitor agent automatic CLI session timeout is enabled or not. When this attribute is set to 'enable(1)', a CLI session will automatically timeout after slaMonitorAgentCliTimeout seconds.")
slaMonitorAgentSupportedApps = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 13), Bits().clone(namedValues=NamedValues(("other", 0), ("ntr", 1), ("rtp", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorAgentSupportedApps.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentSupportedApps.setDescription('An enumeration of the SLA Monitor agent capabilities.')
slaMonitorAgentConfiguredAgentAddrType = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 14), InetAddressType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAgentAddrType.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAgentAddrType.setDescription('The type of IP address used by the agent. This object is limited to IPv4 and IPv6 addresses.')
slaMonitorAgentConfiguredAgentAddr = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 15), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAgentAddr.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAgentAddr.setDescription('The IP address of the SLA Monitor agent. If the value of this attribute is 0.0.0.0, the SLA Monitor agent will utilize an active device IP address by default.')
slaMonitorAgentConfiguredAgentPort = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 16), InetPortNumber()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAgentPort.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAgentPort.setDescription('The UDP port utilized by the SLA Monitor agent for agent-server communication. If the value of this attribute is zero, the SLA Monitor agent will utilize a default port value for communication purposes.')
slaMonitorAgentConfiguredServerAddrType = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 17), InetAddressType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentConfiguredServerAddrType.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentConfiguredServerAddrType.setDescription('The type of IP address used by the server. This object is limited to IPv4 and IPv6 addresses.')
slaMonitorAgentConfiguredServerAddr = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 18), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentConfiguredServerAddr.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentConfiguredServerAddr.setDescription('The IP address of the SLA Monitor server with which the agent is allowed to register. If the value of this attribute is not 0.0.0.0, the SLA Monitor agent may only respond to server requests originating at the specified IP address (or the IP address specified by the slaMonitorAgentConfiguredAltServerAddr object if it is non-zero).')
slaMonitorAgentConfiguredServerPort = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 19), InetPortNumber()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentConfiguredServerPort.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentConfiguredServerPort.setDescription('The TCP port utilized by the SLA Monitor server with which the agent is allowed to register. If the value of this attribute is non-zero, the SLA Monitor agent may only respond to server requests originating from the specified port (or from the port specified by the slaMonitorAgentConfiguredAltServerPort object if it is non-zero).')
slaMonitorAgentConfiguredAltServerAddrType = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 20), InetAddressType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAltServerAddrType.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAltServerAddrType.setDescription('The type of IP address used by the server. This object is limited to IPv4 and IPv6 addresses.')
slaMonitorAgentConfiguredAltServerAddr = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 21), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAltServerAddr.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAltServerAddr.setDescription('The IP address of the alternate SLA Monitor server with which the agent is allowed to register. If the value of this attribute is not 0.0.0.0, the SLA Monitor agent may only respond to server requests originating at the specified IP address (or the IP address specified by the slaMonitorAgentConfiguredServerAddr object if it is non-zero).')
slaMonitorAgentConfiguredAltServerPort = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 22), InetPortNumber()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAltServerPort.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAltServerPort.setDescription('The TCP port utilized by the alternate SLA Monitor server with which the agent is allowed to register. If the value of this attribute is non-zero, the SLA Monitor agent may only respond to server requests originating from the specified port (or from the port specified by the slaMonitorAgentConfiguredServerPort object if it is non-zero).')
slaMonitorAgentToAgentPort = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 23), InetPortNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorAgentToAgentPort.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentToAgentPort.setDescription('The base UDP port currently used by the SLA Monitor agent for agent-agent communication. The base UDP port is used to derive multiple agent communication ports. An even base port values causes slaMonitorAgentToAgentPort and slaMonitorAgentToAgentPort+1 ports to be used. An odd base port values causes slaMonitorAgentToAgentPort and slaMonitorAgentToAgentPort-1 ports to be used.')
slaMonitorAgentConfiguredAgentToAgentPort = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 24), InetPortNumber()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAgentToAgentPort.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAgentToAgentPort.setDescription('The UDP port utilized by the SLA Monitor agent for agent-agent communication. If the value of this attribute is zero, the SLA Monitor agent will utilize a default port value for the base agent-agent UDP communication port.')
slaMonitorAgentEncryptionSupport = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("available", 1), ("notAvailable", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorAgentEncryptionSupport.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentEncryptionSupport.setDescription('Specifies if encrypted agent-server communication is supported by the agent.')
slaMonitorAgentConfiguredAgentVrfName = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 26), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAgentVrfName.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentConfiguredAgentVrfName.setDescription('The name of the virtual Router which SLA Monitor Agent IP address (slaMonitorAgentConfiguredAgentAddr) is associated with. This is only supported on VSP9K for now')
slaMonitorAgentSlaParameter = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 27), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentSlaParameter.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentSlaParameter.setDescription(' String directly passed to the SLA Agent as parameter')
slaMonitorAgentCertFileInstallAction = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("install", 2), ("deinstall", 3))).clone('noAction')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentCertFileInstallAction.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentCertFileInstallAction.setDescription("Operation object to install and deinstall the certificate file to a platform specific location. 'noactio' -is default setting and always is returned when this object is queried. 'install', object slaMonitorAgentCertFile will be used as source certificate file to install . 'deinstall', will ignore object slaMonitorAgentCertFile and just deinstall or remove the certificate file from the installed location")
slaMonitorAgentCertFile = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 29), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentCertFile.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentCertFile.setDescription('the certification file name for installation. It is only valid or used when object slaMonitorAgentCertFileInstallAction is set to install. Otherwise, it will be ignored')
slaMonitorAgentServerBypass = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentServerBypass.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentServerBypass.setDescription("The value of this attribute determines whether the SLA Monitor agent can perform certain operations without being registered with a server. When this attribute is set to 'enable(1)', an agent will respond to agent-to-agent test requests regardless of the server registration status. A 'disable(2)' setting will restrict agent responses when the agent is not registered with an SLA Monitor server.")
slaMonitorAgentRefuseServerTests = MibScalar((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 1, 31), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("allow", 1), ("refuse", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slaMonitorAgentRefuseServerTests.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentRefuseServerTests.setDescription("The value of this attribute determines whether the SLA Monitor agent will refuse test requests from the server. When this attribute is set to 'allow(1)', an agent will accept test requests from the server. A 'refuse(2)' setting will cause the agent to reject test requests from the server. From a usage perspective, it may be necessary to temporarily refuse server test requests if the agent is being actively managed by a server but the administrator wishes to use the CLI or SNMP interfaces to initiate tests manually.")
slaMonitorAgtTestClasses = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2))
slaMonitorNtrCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 1), )
if mibBuilder.loadTexts: slaMonitorNtrCtrlTable.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrCtrlTable.setDescription('Defines the SLAMon agent NTR test control table supporting the ability to initiate NTR tests remotely. The results of NTR test operations are stored in the slaMonitorNtrResultsTable and the slaMonitorNtrHopsTable.')
slaMonitorNtrCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 1, 1), ).setIndexNames((0, "SLA-MONITOR-MIB", "slaMonitorNtrCtrlOwnerId"), (0, "SLA-MONITOR-MIB", "slaMonitorNtrCtrlTestName"))
if mibBuilder.loadTexts: slaMonitorNtrCtrlEntry.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrCtrlEntry.setDescription('Defines an entry in the slaMonitorNtrCtrlTable. The first index element, slaMonitorNtrCtrlOwnerId, is of type SnmpAdminString, allowing for a management application to identify its entries. The second index, slaMonitorNtrCtrlTestName (also an SnmpAdminString), enables the same management application to have multiple requests configured.')
slaMonitorNtrCtrlOwnerId = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 1, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 32)))
if mibBuilder.loadTexts: slaMonitorNtrCtrlOwnerId.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrCtrlOwnerId.setDescription('The owner of an NTR test.')
slaMonitorNtrCtrlTestName = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 1, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 32)))
if mibBuilder.loadTexts: slaMonitorNtrCtrlTestName.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrCtrlTestName.setDescription('The name of an NTR test. This value is locally unique, within the scope of an slaMonitorNtrCtrlOwnerId.')
slaMonitorNtrCtrlTargetAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 1, 1, 3), InetAddressType().clone('ipv4')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorNtrCtrlTargetAddressType.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrCtrlTargetAddressType.setDescription('Specifies the target address type.')
slaMonitorNtrCtrlTargetAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 1, 1, 4), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorNtrCtrlTargetAddress.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrCtrlTargetAddress.setDescription('Specifies the target address used for the NTR test. The address type can be determined by examining the value of the corresponding slaMonitorNtrCtrlTargetAddressType. A value for this object MUST be provided during entry creation.')
slaMonitorNtrCtrlDscp = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 1, 1, 5), Dscp()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorNtrCtrlDscp.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrCtrlDscp.setDescription('Specifies the DSCP value for use in packets that are generated by the NTR test.')
slaMonitorNtrCtrlAttempts = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)).clone(2)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorNtrCtrlAttempts.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrCtrlAttempts.setDescription('Specifies the number of attempts are generated by the NTR test.')
slaMonitorNtrCtrlPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10000, 200000)).clone(20000)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorNtrCtrlPeriod.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrCtrlPeriod.setDescription('Specifies the interval between packets, in microseconds, generated by the NTR test.')
slaMonitorNtrCtrlAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorNtrCtrlAdminStatus.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrCtrlAdminStatus.setDescription("Used to initiate an NTR test: enabled(1) - initiate the test as defined by this slaMonitorNtrCtrlEntry. disabled(2) - deactivate the test defined by this slaMonitorNtrCtrlEntry, if possible. Refer to the corresponding slaMonitorNtrResultsOperStatus to determine the operational state of the test defined by this entry. When queried, 'disabled(2)' is always returned for this attribute.")
slaMonitorNtrCtrlLabel = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 1, 1, 9), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorNtrCtrlLabel.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrCtrlLabel.setDescription('A label used to reference the NTR control entry in a textual manner.')
slaMonitorNtrCtrlStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 1, 1, 10), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorNtrCtrlStorageType.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrCtrlStorageType.setDescription("The storage type for this conceptual row. Conceptual rows having the value 'permanent' need not allow write-access to any columnar objects in the row.")
slaMonitorNtrCtrlRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 1, 1, 11), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorNtrCtrlRowStatus.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrCtrlRowStatus.setDescription('This object allows entries to be created and deleted in the slaMonitorNtrCtrlTable. Deletion of an entry in this table results in the deletion of all corresponding (i.e., entries with the same slaMonitorNtrCtrlOwnerId and slaMonitorNtrCtrlTestName index values) entries in the slaMonitorNtrResultsTable and the slaMonitorNtrHopsTable. Initiation of an NTR test corresponding to this entry is controlled via slaMonitorNtrCtrlAdminStatus and not by entry creation. The operational state of an NTR test can be determined by examining the corresponding slaMonitorNtrResultsOperStatus object.')
slaMonitorNtrResultsTable = MibTable((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 2), )
if mibBuilder.loadTexts: slaMonitorNtrResultsTable.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrResultsTable.setDescription("Defines the SLAMon agent NTR results table that is used to track the status of NTR tests initiated through the the slaMonitorNtrCtrlTable. An entry is added to the slaMonitorNtrResultsTable when an NTR test defined by an slaMonitorNtrCtrlEntry is started by transition of its slaMonitorNtrCtrlAdminStatus object to 'enabled(1)'. If the object slaMonitorNtrCtrlAdminStatus has the value 'disabled(2)', then successfully writing 'enabled(1)' to the object slaMonitorNtrCtrlAdminStatus re-initializes an already existing entry in the slaMonitorNtrResultsTable. The values of objects in the re-initialized entry are the same as the values of objects in a new entry would be. An entry is removed from the slaMonitorNtrResultsTable when its corresponding slaMonitorNtrCtrlEntry is deleted.")
slaMonitorNtrResultsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 2, 1), ).setIndexNames((0, "SLA-MONITOR-MIB", "slaMonitorNtrCtrlOwnerId"), (0, "SLA-MONITOR-MIB", "slaMonitorNtrCtrlTestName"))
if mibBuilder.loadTexts: slaMonitorNtrResultsEntry.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrResultsEntry.setDescription('Defines an entry in the slaMonitorNtrResultsTable. The slaMonitorNtrResultsTable has the same indexing as the slaMonitorNtrCtrlTable so that a slaMonitorNtrResultsEntry corresponds to the slaMonitorNtrCtrlEntry that caused it to be created.')
slaMonitorNtrResultsOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inProgress", 1), ("aborted", 2), ("completed", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrResultsOperStatus.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrResultsOperStatus.setDescription('Reflects the operational state of an slaMonitorNtrCtrlEntry: inProgress(1) - test is active. aborted(2) - test has been stopped. Refer to object slaMonitorNtrResultsAbortData for details. completed(3) - test is completed. Refer to objects slaMonitorNtrResultsCompletionData and slaMonitorNtrResultsCompletionSummary for details.')
slaMonitorNtrResultsSrcAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 2, 1, 2), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrResultsSrcAddressType.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrResultsSrcAddressType.setDescription('Specifies the address type of the source IP address used for this test.')
slaMonitorNtrResultsSrcAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 2, 1, 3), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrResultsSrcAddress.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrResultsSrcAddress.setDescription('Specifies the source IP address used for this test.')
slaMonitorNtrResultsDstAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 2, 1, 4), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrResultsDstAddressType.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrResultsDstAddressType.setDescription('Specifies the address type of the destination IP address used for this test.')
slaMonitorNtrResultsDstAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 2, 1, 5), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrResultsDstAddress.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrResultsDstAddress.setDescription('Specifies the destination IP address used for this test.')
slaMonitorNtrResultsSrcPort = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 2, 1, 6), InetPortNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrResultsSrcPort.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrResultsSrcPort.setDescription('Specifies the Layer 4 source port used for this test.')
slaMonitorNtrResultsDstPort = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 2, 1, 7), InetPortNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrResultsDstPort.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrResultsDstPort.setDescription('Specifies the Layer 4 destination port used for this test.')
slaMonitorNtrResultsDscp = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 2, 1, 8), Dscp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrResultsDscp.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrResultsDscp.setDescription('Specifies the DSCP value contained in NTR test packets generated by the agent for this test.')
slaMonitorNtrResultsTTL = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 2, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrResultsTTL.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrResultsTTL.setDescription('The maximum time-to-live (TTL) value used during the execution of this test.')
slaMonitorNtrResultsCompletionData = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 2, 1, 10), Bits().clone(namedValues=NamedValues(("other", 0), ("remoteUnreachable", 1), ("remotePortUnreachable", 2), ("remoteNetUnreachable", 3), ("remoteHostUnreachable", 4), ("remoteProtocolUnreachable", 5), ("remoteFirewalledUnreachable", 6), ("remoteResponded", 7), ("remoteResponseLikely", 8), ("remoteNoResponse", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrResultsCompletionData.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrResultsCompletionData.setDescription('Provides details related to test completion. Multiple bits may be set to convey test status.')
slaMonitorNtrResultsCompletionSummary = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 2, 1, 11), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrResultsCompletionSummary.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrResultsCompletionSummary.setDescription('A user-readable string that is used to convey test results for completed tests in an easy-to-understand format.')
slaMonitorNtrResultsAbortData = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 2, 1, 12), Bits().clone(namedValues=NamedValues(("other", 0), ("agentDisabled", 1), ("agentBusy", 2), ("timeout", 3), ("cancelled", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrResultsAbortData.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrResultsAbortData.setDescription('Provides details for tests that were aborted. Multiple bits may be set to convey test status.')
slaMonitorNtrResultsHopCount = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 2, 1, 13), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrResultsHopCount.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrResultsHopCount.setDescription('The total number of hops for which information has been acquired during the execution of this test.')
slaMonitorNtrHopsTable = MibTable((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 3), )
if mibBuilder.loadTexts: slaMonitorNtrHopsTable.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrHopsTable.setDescription('Defines the SLAMon agent NTR test hop table for keeping track of the results of NTR tests on a per-hop basis.')
slaMonitorNtrHopsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 3, 1), ).setIndexNames((0, "SLA-MONITOR-MIB", "slaMonitorNtrCtrlOwnerId"), (0, "SLA-MONITOR-MIB", "slaMonitorNtrCtrlTestName"), (0, "SLA-MONITOR-MIB", "slaMonitorNtrHopsHopIndex"))
if mibBuilder.loadTexts: slaMonitorNtrHopsEntry.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrHopsEntry.setDescription('Defines an entry in the slaMonitorNtrHopsTable. The first two index elements identify the slaMonitorNtrCtrlEntry with which this slaMonitorNtrHopsEntry is associated. The third index element, slaMonitorNtrHopsHopIndex, selects a hop in an NTR test path.')
slaMonitorNtrHopsHopIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 3, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: slaMonitorNtrHopsHopIndex.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrHopsHopIndex.setDescription('Specifies the hop index for an NTR test hop. Values for this object with respect to the same slaMonitorNtrCtrlOwnerId and slaMonitorNtrCtrlTestName MUST start at 1 and be given increasing values for subsequent hops. The value of slaMonitorNtrHopsHopIndex does not necessarily represent the number of the hop on the traced path, though this is normally the case.')
slaMonitorNtrHopsTgtAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 3, 1, 2), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrHopsTgtAddressType.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrHopsTgtAddressType.setDescription('This object indicates the type of address stored in the corresponding slaMonitorNtrHopsTgtAddress object.')
slaMonitorNtrHopsTgtAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 3, 1, 3), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrHopsTgtAddress.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrHopsTgtAddress.setDescription('This object reports the IP address associated with the NTR test hop.')
slaMonitorNtrHopsRtt = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 3, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrHopsRtt.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrHopsRtt.setDescription('The NTR test round-trip-time (RTT), in microseconds, computed for this hop.')
slaMonitorNtrHopsIngressDscp = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 3, 1, 5), Dscp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrHopsIngressDscp.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrHopsIngressDscp.setDescription('The DSCP value in the NTR test packet received by the target host (slaMonitorNtrHopsTgtAddress) for this hop.')
slaMonitorNtrHopsEgressDscp = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 3, 1, 6), Dscp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorNtrHopsEgressDscp.setStatus('current')
if mibBuilder.loadTexts: slaMonitorNtrHopsEgressDscp.setDescription('The DSCP value in the NTR test response packet received by the SLAMon agent for this hop.')
slaMonitorRtpCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 4), )
if mibBuilder.loadTexts: slaMonitorRtpCtrlTable.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpCtrlTable.setDescription('Defines the SLAMon agent RTP test control table supporting the ability to initiate RTP tests remotely. The results of RTP test operations are stored in the slaMonitorRtpResultsTable.')
slaMonitorRtpCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 4, 1), ).setIndexNames((0, "SLA-MONITOR-MIB", "slaMonitorRtpCtrlOwnerId"), (0, "SLA-MONITOR-MIB", "slaMonitorRtpCtrlTestName"))
if mibBuilder.loadTexts: slaMonitorRtpCtrlEntry.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpCtrlEntry.setDescription('Defines an entry in the slaMonitorRtpCtrlTable. The first index element, slaMonitorRtpCtrlOwnerId, is of type SnmpAdminString, allowing for a management application to identify its entries. The second index, slaMonitorRtpCtrlTestName (also an SnmpAdminString), enables the same management application to have multiple requests configured.')
slaMonitorRtpCtrlOwnerId = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 4, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 32)))
if mibBuilder.loadTexts: slaMonitorRtpCtrlOwnerId.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpCtrlOwnerId.setDescription('The owner of an RTP test.')
slaMonitorRtpCtrlTestName = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 4, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 32)))
if mibBuilder.loadTexts: slaMonitorRtpCtrlTestName.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpCtrlTestName.setDescription('The name of an RTP test. This is locally unique, within the scope of an slaMonitorRtpCtrlOwnerId.')
slaMonitorRtpCtrlTargetAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 4, 1, 3), InetAddressType().clone('ipv4')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorRtpCtrlTargetAddressType.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpCtrlTargetAddressType.setDescription('Specifies the target address type.')
slaMonitorRtpCtrlTargetAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 4, 1, 4), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorRtpCtrlTargetAddress.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpCtrlTargetAddress.setDescription('Specifies the target address used for the RTP test. The address type can be determined by examining the value of the corresponding slaMonitorRtpCtrlTargetAddressType. A value for this object MUST be provided during entry creation.')
slaMonitorRtpCtrlDscp = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 4, 1, 5), Dscp()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorRtpCtrlDscp.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpCtrlDscp.setDescription('Specifies the DSCP value for use in packets that are generated by the RTP test.')
slaMonitorRtpCtrlTestPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 4, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 100)).clone(50)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorRtpCtrlTestPackets.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpCtrlTestPackets.setDescription('Specifies the number of test packets that are generated by the RTP test. Test packets are used for jitter determination.')
slaMonitorRtpCtrlSyncPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 4, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 100)).clone(10)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorRtpCtrlSyncPackets.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpCtrlSyncPackets.setDescription('Specifies the number of synchronization packets that are generated by the RTP test. Sync packets are used for network delay determination.')
slaMonitorRtpCtrlPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 4, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10000, 200000)).clone(20000)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorRtpCtrlPeriod.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpCtrlPeriod.setDescription('Specifies the interval between packets (in microseconds) generated by the RTP test.')
slaMonitorRtpCtrlAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 4, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorRtpCtrlAdminStatus.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpCtrlAdminStatus.setDescription("Used to initiate an RTP test: enabled(1) - initiate the test as defined by this slaMonitorRtpCtrlEntry. disabled(2) - deactivate the test defined by this slaMonitorRtpCtrlEntry, if possible. Refer to the corresponding slaMonitorRtpResultsOperStatus to determine the operational state of the test defined by this entry. When queried, 'disabled(2)' is always returned for this attribute.")
slaMonitorRtpCtrlLabel = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 4, 1, 10), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorRtpCtrlLabel.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpCtrlLabel.setDescription('A label used to reference the RTP control entry in a textual manner.')
slaMonitorRtpCtrlStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 4, 1, 11), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorRtpCtrlStorageType.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpCtrlStorageType.setDescription("The storage type for this conceptual row. Conceptual rows having the value 'permanent' need not allow write-access to any columnar objects in the row.")
slaMonitorRtpCtrlRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 4, 1, 12), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slaMonitorRtpCtrlRowStatus.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpCtrlRowStatus.setDescription('This object allows entries to be created and deleted in the slaMonitorRtpCtrlTable. Deletion of an entry in this table results in the deletion of all corresponding (i.e., entries with the same slaMonitorRtpCtrlOwnerId and slaMonitorRtpCtrlTestName index values) entries in the slaMonitorRtpResultsTable. Initiation of an RTP test corresponding to this entry is controlled via slaMonitorRtpCtrlAdminStatus and not by entry creation. The operational state of an RTP test can be determined by examining the corresponding slaMonitorRtpResultsOperStatus object.')
slaMonitorRtpResultsTable = MibTable((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5), )
if mibBuilder.loadTexts: slaMonitorRtpResultsTable.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsTable.setDescription("Defines the SLAMon agent RTP results table that is used to track the status of RTP tests initiated through the the slaMonitorRtpCtrlTable. An entry is added to the slaMonitorRtpResultsTable when an RTP test defined by an slaMonitorRtpCtrlEntry is started by transition of its slaMonitorRtpCtrlAdminStatus object to 'enabled(1)'. If the object slaMonitorRtpCtrlAdminStatus has the value 'disabled(2)', then successfully writing 'enabled(1)' to the object slaMonitorRtpCtrlAdminStatus re-initializes an already existing entry in the slaMonitorRtpResultsTable. The values of objects in the re-initialized entry are the same as the values of objects in a new entry would be. An entry is removed from the slaMonitorRtpResultsTable when its corresponding slaMonitorRtpCtrlEntry is deleted.")
slaMonitorRtpResultsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1), ).setIndexNames((0, "SLA-MONITOR-MIB", "slaMonitorRtpCtrlOwnerId"), (0, "SLA-MONITOR-MIB", "slaMonitorRtpCtrlTestName"))
if mibBuilder.loadTexts: slaMonitorRtpResultsEntry.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsEntry.setDescription('Defines an entry in the slaMonitorRtpResultsTable. The slaMonitorRtpResultsTable has the same indexing as the slaMonitorRtpCtrlTable so that a slaMonitorRtpResultsEntry corresponds to the slaMonitorRtpCtrlEntry that caused it to be created.')
slaMonitorRtpResultsOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inProgress", 1), ("aborted", 2), ("completed", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsOperStatus.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsOperStatus.setDescription('Reflects the operational state of an slaMonitorRtpCtrlEntry: inProgress(1) - test is active. aborted(2) - test has been stopped. Refer to object slaMonitorRtpResultsAbortData for details. completed(3) - test is completed. Refer to objects that contain completed test results (all attributes except slaMonitorRtpResultsAbortData) for details.')
slaMonitorRtpResultsSrcAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 2), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsSrcAddressType.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsSrcAddressType.setDescription('Specifies the address type of the source IP address used for this test.')
slaMonitorRtpResultsSrcAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 3), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsSrcAddress.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsSrcAddress.setDescription('Specifies the source IP address used for this test.')
slaMonitorRtpResultsDstAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 4), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsDstAddressType.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsDstAddressType.setDescription('Specifies the address type of the destination IP address used for this test.')
slaMonitorRtpResultsDstAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 5), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsDstAddress.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsDstAddress.setDescription('Specifies the destination IP address used for this test.')
slaMonitorRtpResultsSrcPort = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 6), InetPortNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsSrcPort.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsSrcPort.setDescription('Specifies the Layer 4 source port used for this test.')
slaMonitorRtpResultsDstPort = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 7), InetPortNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsDstPort.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsDstPort.setDescription('Specifies the Layer 4 destination port used for this test.')
slaMonitorRtpResultsDscp = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 8), Dscp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsDscp.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsDscp.setDescription('Specifies the DSCP value contained in RTP test packets generated by the agent for this test.')
slaMonitorRtpResultsAverageDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsAverageDelay.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsAverageDelay.setDescription('The average network delay (RTT) experienced during this test sequence, in microseconds.')
slaMonitorRtpResultsMedianDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsMedianDelay.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsMedianDelay.setDescription('The median network delay (RTT) experienced during this test sequence, in microseconds.')
slaMonitorRtpResultsPacketLoss = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsPacketLoss.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsPacketLoss.setDescription('A count of packets lost during this test sequence.')
slaMonitorRtpResultsOutOfOrderArrivals = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsOutOfOrderArrivals.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsOutOfOrderArrivals.setDescription('The number of out-of-order arrivals detected during this test sequence.')
slaMonitorRtpResultsJitterQuartile0 = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 13), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsJitterQuartile0.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsJitterQuartile0.setDescription('The quartile 0 network jitter value, in microseconds, experienced during this test sequence.')
slaMonitorRtpResultsJitterQuartile1 = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 14), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsJitterQuartile1.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsJitterQuartile1.setDescription('The quartile 1 network jitter value, in microseconds, experienced during this test sequence.')
slaMonitorRtpResultsJitterQuartile2 = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 15), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsJitterQuartile2.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsJitterQuartile2.setDescription('The quartile 2 network jitter value, in microseconds, experienced during this test sequence.')
slaMonitorRtpResultsJitterQuartile3 = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 16), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsJitterQuartile3.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsJitterQuartile3.setDescription('The quartile 3 network jitter value, in microseconds, experienced during this test sequence.')
slaMonitorRtpResultsJitterQuartile4 = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 17), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsJitterQuartile4.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsJitterQuartile4.setDescription('The quartile 4 network jitter value, in microseconds, experienced during this test sequence.')
slaMonitorRtpResultsAbortData = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 4, 8, 1, 2, 5, 1, 18), Bits().clone(namedValues=NamedValues(("other", 0), ("agentDisabled", 1), ("agentBusy", 2), ("timeout", 3), ("cancelled", 4), ("deniedByTarget", 5), ("networkIssue", 6), ("timeSync", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slaMonitorRtpResultsAbortData.setStatus('current')
if mibBuilder.loadTexts: slaMonitorRtpResultsAbortData.setDescription('Provides details for tests that were aborted. Multiple bits may be set to convey test status.')
slaMonitorAgentExceptionDetected = NotificationType((1, 3, 6, 1, 4, 1, 45, 4, 8, 0, 1))
if mibBuilder.loadTexts: slaMonitorAgentExceptionDetected.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentExceptionDetected.setDescription('Indicates that an exception event that impacts the local SLA Monitor agent has been detected.')
slaMonitorMibCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 4, 8, 2, 1))
slaMonitorMibGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 4, 8, 2, 2))
slaMonitorMibCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 45, 4, 8, 2, 1, 1)).setObjects(("SLA-MONITOR-MIB", "slaMonitorAgentConfigGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
slaMonitorMibCompliance = slaMonitorMibCompliance.setStatus('current')
if mibBuilder.loadTexts: slaMonitorMibCompliance.setDescription('Describes the requirements for conformance to the Avaya Networks SLA Monitor Agent MIB.')
slaMonitorAgentConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 45, 4, 8, 2, 2, 1)).setObjects(("SLA-MONITOR-MIB", "slaMonitorAgentStatus"), ("SLA-MONITOR-MIB", "slaMonitorAgentAddressType"), ("SLA-MONITOR-MIB", "slaMonitorAgentAddress"), ("SLA-MONITOR-MIB", "slaMonitorAgentPort"), ("SLA-MONITOR-MIB", "slaMonitorAgentRegisteredWithServer"), ("SLA-MONITOR-MIB", "slaMonitorAgentRegisteredServerAddrType"), ("SLA-MONITOR-MIB", "slaMonitorAgentRegisteredServerAddr"), ("SLA-MONITOR-MIB", "slaMonitorAgentRegisteredServerPort"), ("SLA-MONITOR-MIB", "slaMonitorAgentRegistrationTime"), ("SLA-MONITOR-MIB", "slaMonitorAgentCliAvailable"), ("SLA-MONITOR-MIB", "slaMonitorAgentCliTimeout"), ("SLA-MONITOR-MIB", "slaMonitorAgentCliTimeoutMode"), ("SLA-MONITOR-MIB", "slaMonitorAgentSupportedApps"), ("SLA-MONITOR-MIB", "slaMonitorAgentConfiguredAgentAddrType"), ("SLA-MONITOR-MIB", "slaMonitorAgentConfiguredAgentAddr"), ("SLA-MONITOR-MIB", "slaMonitorAgentConfiguredAgentPort"), ("SLA-MONITOR-MIB", "slaMonitorAgentConfiguredServerAddrType"), ("SLA-MONITOR-MIB", "slaMonitorAgentConfiguredServerAddr"), ("SLA-MONITOR-MIB", "slaMonitorAgentConfiguredServerPort"), ("SLA-MONITOR-MIB", "slaMonitorAgentConfiguredAltServerAddrType"), ("SLA-MONITOR-MIB", "slaMonitorAgentConfiguredAltServerAddr"), ("SLA-MONITOR-MIB", "slaMonitorAgentConfiguredAltServerPort"), ("SLA-MONITOR-MIB", "slaMonitorAgentToAgentPort"), ("SLA-MONITOR-MIB", "slaMonitorAgentConfiguredAgentToAgentPort"), ("SLA-MONITOR-MIB", "slaMonitorAgentEncryptionSupport"), ("SLA-MONITOR-MIB", "slaMonitorAgentConfiguredAgentVrfName"), ("SLA-MONITOR-MIB", "slaMonitorAgentSlaParameter"), ("SLA-MONITOR-MIB", "slaMonitorAgentCertFileInstallAction"), ("SLA-MONITOR-MIB", "slaMonitorAgentCertFile"), ("SLA-MONITOR-MIB", "slaMonitorAgentServerBypass"), ("SLA-MONITOR-MIB", "slaMonitorAgentRefuseServerTests"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
slaMonitorAgentConfigGroup = slaMonitorAgentConfigGroup.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentConfigGroup.setDescription('Objects used for SLA Monitor Agent configuration and monitoring.')
slaMonitorAgentNtrTestGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 45, 4, 8, 2, 2, 2)).setObjects(("SLA-MONITOR-MIB", "slaMonitorNtrCtrlTargetAddressType"), ("SLA-MONITOR-MIB", "slaMonitorNtrCtrlTargetAddress"), ("SLA-MONITOR-MIB", "slaMonitorNtrCtrlDscp"), ("SLA-MONITOR-MIB", "slaMonitorNtrCtrlAttempts"), ("SLA-MONITOR-MIB", "slaMonitorNtrCtrlPeriod"), ("SLA-MONITOR-MIB", "slaMonitorNtrCtrlAdminStatus"), ("SLA-MONITOR-MIB", "slaMonitorNtrCtrlLabel"), ("SLA-MONITOR-MIB", "slaMonitorNtrCtrlStorageType"), ("SLA-MONITOR-MIB", "slaMonitorNtrCtrlRowStatus"), ("SLA-MONITOR-MIB", "slaMonitorNtrResultsOperStatus"), ("SLA-MONITOR-MIB", "slaMonitorNtrResultsSrcAddressType"), ("SLA-MONITOR-MIB", "slaMonitorNtrResultsSrcAddress"), ("SLA-MONITOR-MIB", "slaMonitorNtrResultsDstAddressType"), ("SLA-MONITOR-MIB", "slaMonitorNtrResultsDstAddress"), ("SLA-MONITOR-MIB", "slaMonitorNtrResultsSrcPort"), ("SLA-MONITOR-MIB", "slaMonitorNtrResultsDstPort"), ("SLA-MONITOR-MIB", "slaMonitorNtrResultsDscp"), ("SLA-MONITOR-MIB", "slaMonitorNtrResultsTTL"), ("SLA-MONITOR-MIB", "slaMonitorNtrResultsCompletionData"), ("SLA-MONITOR-MIB", "slaMonitorNtrResultsCompletionSummary"), ("SLA-MONITOR-MIB", "slaMonitorNtrResultsAbortData"), ("SLA-MONITOR-MIB", "slaMonitorNtrResultsHopCount"), ("SLA-MONITOR-MIB", "slaMonitorNtrHopsTgtAddressType"), ("SLA-MONITOR-MIB", "slaMonitorNtrHopsTgtAddress"), ("SLA-MONITOR-MIB", "slaMonitorNtrHopsRtt"), ("SLA-MONITOR-MIB", "slaMonitorNtrHopsIngressDscp"), ("SLA-MONITOR-MIB", "slaMonitorNtrHopsEgressDscp"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
slaMonitorAgentNtrTestGroup = slaMonitorAgentNtrTestGroup.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentNtrTestGroup.setDescription('Objects used for SLA Monitor Agent NTR test execution and results reporting.')
slaMonitorAgentRtpTestGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 45, 4, 8, 2, 2, 3)).setObjects(("SLA-MONITOR-MIB", "slaMonitorRtpCtrlTargetAddressType"), ("SLA-MONITOR-MIB", "slaMonitorRtpCtrlTargetAddress"), ("SLA-MONITOR-MIB", "slaMonitorRtpCtrlDscp"), ("SLA-MONITOR-MIB", "slaMonitorRtpCtrlTestPackets"), ("SLA-MONITOR-MIB", "slaMonitorRtpCtrlSyncPackets"), ("SLA-MONITOR-MIB", "slaMonitorRtpCtrlPeriod"), ("SLA-MONITOR-MIB", "slaMonitorRtpCtrlAdminStatus"), ("SLA-MONITOR-MIB", "slaMonitorRtpCtrlLabel"), ("SLA-MONITOR-MIB", "slaMonitorRtpCtrlStorageType"), ("SLA-MONITOR-MIB", "slaMonitorRtpCtrlRowStatus"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsOperStatus"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsSrcAddressType"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsSrcAddress"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsDstAddressType"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsDstAddress"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsSrcPort"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsDstPort"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsDscp"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsAverageDelay"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsMedianDelay"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsPacketLoss"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsOutOfOrderArrivals"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsJitterQuartile0"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsJitterQuartile1"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsJitterQuartile2"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsJitterQuartile3"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsJitterQuartile4"), ("SLA-MONITOR-MIB", "slaMonitorRtpResultsAbortData"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
slaMonitorAgentRtpTestGroup = slaMonitorAgentRtpTestGroup.setStatus('current')
if mibBuilder.loadTexts: slaMonitorAgentRtpTestGroup.setDescription('Objects used for SLA Monitor Agent RTP test execution and results reporting.')
mibBuilder.exportSymbols("SLA-MONITOR-MIB", slaMonitorNtrHopsTable=slaMonitorNtrHopsTable, slaMonitorNtrResultsSrcAddressType=slaMonitorNtrResultsSrcAddressType, slaMonitorRtpResultsDscp=slaMonitorRtpResultsDscp, slaMonitorRtpResultsSrcAddress=slaMonitorRtpResultsSrcAddress, slaMonitorNtrResultsOperStatus=slaMonitorNtrResultsOperStatus, slaMonitorRtpResultsJitterQuartile2=slaMonitorRtpResultsJitterQuartile2, slaMonitorAgentSupportedApps=slaMonitorAgentSupportedApps, slaMonitorAgentPort=slaMonitorAgentPort, slaMonitorNtrResultsDstAddress=slaMonitorNtrResultsDstAddress, slaMonitorNtrResultsSrcAddress=slaMonitorNtrResultsSrcAddress, slaMonitorRtpCtrlTargetAddressType=slaMonitorRtpCtrlTargetAddressType, slaMonitorAgentNtrTestGroup=slaMonitorAgentNtrTestGroup, slaMonitorAgentServerBypass=slaMonitorAgentServerBypass, slaMonitorRtpCtrlDscp=slaMonitorRtpCtrlDscp, slaMonitorRtpCtrlAdminStatus=slaMonitorRtpCtrlAdminStatus, slaMonitorNtrHopsIngressDscp=slaMonitorNtrHopsIngressDscp, slaMonitorNtrCtrlRowStatus=slaMonitorNtrCtrlRowStatus, slaMonitorRtpCtrlRowStatus=slaMonitorRtpCtrlRowStatus, slaMonitorAgtClasses=slaMonitorAgtClasses, slaMonitorAgentRegisteredServerPort=slaMonitorAgentRegisteredServerPort, slaMonitorMibCompliance=slaMonitorMibCompliance, slaMonitorAgentConfiguredAgentToAgentPort=slaMonitorAgentConfiguredAgentToAgentPort, slaMonitorNtrResultsDscp=slaMonitorNtrResultsDscp, slaMonitorAgentConfiguredServerPort=slaMonitorAgentConfiguredServerPort, slaMonitorAgentCliAvailable=slaMonitorAgentCliAvailable, slaMonitorRtpCtrlOwnerId=slaMonitorRtpCtrlOwnerId, slaMonitorRtpResultsDstAddress=slaMonitorRtpResultsDstAddress, slaMonitorRtpResultsSrcAddressType=slaMonitorRtpResultsSrcAddressType, slaMonitorNtrResultsEntry=slaMonitorNtrResultsEntry, slaMonitorNtrCtrlLabel=slaMonitorNtrCtrlLabel, slaMonitorAgentCliTimeout=slaMonitorAgentCliTimeout, slaMonitorAgentCliTimeoutMode=slaMonitorAgentCliTimeoutMode, slaMonitorRtpResultsDstAddressType=slaMonitorRtpResultsDstAddressType, slaMonitorRtpResultsMedianDelay=slaMonitorRtpResultsMedianDelay, slaMonitorRtpCtrlTable=slaMonitorRtpCtrlTable, slaMonitorNtrResultsTable=slaMonitorNtrResultsTable, slaMonitorRtpResultsJitterQuartile3=slaMonitorRtpResultsJitterQuartile3, PYSNMP_MODULE_ID=slaMonitorMib, slaMonitorRtpResultsJitterQuartile0=slaMonitorRtpResultsJitterQuartile0, slaMonitorNtrCtrlStorageType=slaMonitorNtrCtrlStorageType, slaMonitorRtpCtrlPeriod=slaMonitorRtpCtrlPeriod, slaMonitorAgentRegistrationTime=slaMonitorAgentRegistrationTime, slaMonitorMib=slaMonitorMib, slaMonitorRtpCtrlTestName=slaMonitorRtpCtrlTestName, slaMonitorAgentConfiguredAltServerAddrType=slaMonitorAgentConfiguredAltServerAddrType, slaMonitorRtpCtrlTestPackets=slaMonitorRtpCtrlTestPackets, slaMonitorRtpResultsOperStatus=slaMonitorRtpResultsOperStatus, slaMonitorAgentConfiguredAgentAddr=slaMonitorAgentConfiguredAgentAddr, slaMonitorRtpResultsPacketLoss=slaMonitorRtpResultsPacketLoss, slaMonitorAgentRtpTestGroup=slaMonitorAgentRtpTestGroup, slaMonitorNtrResultsDstAddressType=slaMonitorNtrResultsDstAddressType, slaMonitorNtrHopsTgtAddressType=slaMonitorNtrHopsTgtAddressType, slaMonitorRtpResultsDstPort=slaMonitorRtpResultsDstPort, slaMonitorNtrCtrlTable=slaMonitorNtrCtrlTable, slaMonitorRtpResultsOutOfOrderArrivals=slaMonitorRtpResultsOutOfOrderArrivals, slaMonitorAgentConfiguredAgentPort=slaMonitorAgentConfiguredAgentPort, slaMonitorRtpResultsJitterQuartile4=slaMonitorRtpResultsJitterQuartile4, slaMonitorNtrResultsSrcPort=slaMonitorNtrResultsSrcPort, slaMonitorRtpResultsEntry=slaMonitorRtpResultsEntry, slaMonitorNtrResultsTTL=slaMonitorNtrResultsTTL, slaMonitorMibNotifications=slaMonitorMibNotifications, slaMonitorAgentRegisteredServerAddr=slaMonitorAgentRegisteredServerAddr, slaMonitorAgentEncryptionSupport=slaMonitorAgentEncryptionSupport, slaMonitorAgentCertFile=slaMonitorAgentCertFile, slaMonitorRtpCtrlStorageType=slaMonitorRtpCtrlStorageType, slaMonitorRtpResultsJitterQuartile1=slaMonitorRtpResultsJitterQuartile1, slaMonitorMibConformance=slaMonitorMibConformance, slaMonitorNtrHopsEntry=slaMonitorNtrHopsEntry, slaMonitorNtrHopsHopIndex=slaMonitorNtrHopsHopIndex, slaMonitorAgentAddress=slaMonitorAgentAddress, slaMonitorAgentToAgentPort=slaMonitorAgentToAgentPort, slaMonitorAgentRegisteredServerAddrType=slaMonitorAgentRegisteredServerAddrType, slaMonitorAgentConfiguredAltServerPort=slaMonitorAgentConfiguredAltServerPort, slaMonitorNtrCtrlTargetAddressType=slaMonitorNtrCtrlTargetAddressType, slaMonitorNtrHopsTgtAddress=slaMonitorNtrHopsTgtAddress, slaMonitorAgentConfiguredServerAddr=slaMonitorAgentConfiguredServerAddr, slaMonitorAgentRefuseServerTests=slaMonitorAgentRefuseServerTests, slaMonitorMibClasses=slaMonitorMibClasses, slaMonitorMibGroups=slaMonitorMibGroups, slaMonitorAgentRegisteredWithServer=slaMonitorAgentRegisteredWithServer, slaMonitorRtpResultsAverageDelay=slaMonitorRtpResultsAverageDelay, slaMonitorNtrCtrlPeriod=slaMonitorNtrCtrlPeriod, slaMonitorAgentCertFileInstallAction=slaMonitorAgentCertFileInstallAction, slaMonitorNtrCtrlEntry=slaMonitorNtrCtrlEntry, slaMonitorNtrCtrlTestName=slaMonitorNtrCtrlTestName, slaMonitorAgentExceptionDetected=slaMonitorAgentExceptionDetected, slaMonitorNtrCtrlDscp=slaMonitorNtrCtrlDscp, slaMonitorAgentConfigGroup=slaMonitorAgentConfigGroup, slaMonitorNtrResultsCompletionData=slaMonitorNtrResultsCompletionData, slaMonitorRtpCtrlLabel=slaMonitorRtpCtrlLabel, slaMonitorNtrResultsDstPort=slaMonitorNtrResultsDstPort, slaMonitorAgentConfiguredServerAddrType=slaMonitorAgentConfiguredServerAddrType, slaMonitorAgentConfiguredAltServerAddr=slaMonitorAgentConfiguredAltServerAddr, slaMonitorNtrCtrlAdminStatus=slaMonitorNtrCtrlAdminStatus, slaMonitorNtrCtrlAttempts=slaMonitorNtrCtrlAttempts, slaMonitorNtrCtrlTargetAddress=slaMonitorNtrCtrlTargetAddress, slaMonitorNtrHopsEgressDscp=slaMonitorNtrHopsEgressDscp, slaMonitorAgentConfiguredAgentAddrType=slaMonitorAgentConfiguredAgentAddrType, slaMonitorAgentSlaParameter=slaMonitorAgentSlaParameter, slaMonitorNtrResultsHopCount=slaMonitorNtrResultsHopCount, slaMonitorRtpCtrlEntry=slaMonitorRtpCtrlEntry, slaMonitorAgentStatus=slaMonitorAgentStatus, slaMonitorAgentConfiguredAgentVrfName=slaMonitorAgentConfiguredAgentVrfName, slaMonitorAgentAddressType=slaMonitorAgentAddressType, slaMonitorNtrResultsCompletionSummary=slaMonitorNtrResultsCompletionSummary, slaMonitorAgtTestClasses=slaMonitorAgtTestClasses, slaMonitorRtpResultsSrcPort=slaMonitorRtpResultsSrcPort, slaMonitorRtpResultsAbortData=slaMonitorRtpResultsAbortData, slaMonitorRtpCtrlSyncPackets=slaMonitorRtpCtrlSyncPackets, slaMonitorNtrResultsAbortData=slaMonitorNtrResultsAbortData, slaMonitorNtrHopsRtt=slaMonitorNtrHopsRtt, slaMonitorRtpCtrlTargetAddress=slaMonitorRtpCtrlTargetAddress, slaMonitorRtpResultsTable=slaMonitorRtpResultsTable, slaMonitorNtrCtrlOwnerId=slaMonitorNtrCtrlOwnerId, slaMonitorMibCompliances=slaMonitorMibCompliances)
| 184.136872 | 6,901 | 0.808043 |
eb96dff3ca80f8d36abdb589b88bf107b2c17306 | 1,380 | py | Python | check_files/reify_batch_prediction_dataset.py | mmerce/bigmler | e411bb292a3c8db4cac6754b2b744ffe27fdb47a | [
"Apache-2.0"
] | null | null | null | check_files/reify_batch_prediction_dataset.py | mmerce/bigmler | e411bb292a3c8db4cac6754b2b744ffe27fdb47a | [
"Apache-2.0"
] | null | null | null | check_files/reify_batch_prediction_dataset.py | mmerce/bigmler | e411bb292a3c8db4cac6754b2b744ffe27fdb47a | [
"Apache-2.0"
] | null | null | null | from bigml.api import BigML
api = BigML()
source1_file = "iris.csv"
args = \
{u'fields': {u'000000': {u'name': u'sepal length', u'optype': u'numeric'},
u'000001': {u'name': u'sepal width', u'optype': u'numeric'},
u'000002': {u'name': u'petal length', u'optype': u'numeric'},
u'000003': {u'name': u'petal width', u'optype': u'numeric'},
u'000004': {u'name': u'species',
u'optype': u'categorical',
u'term_analysis': {u'enabled': True}}}}
source2 = api.create_source(source1_file, args)
api.ok(source2)
args = \
{u'objective_field': {u'id': u'000004'}}
dataset1 = api.create_dataset(source2, args)
api.ok(dataset1)
model1 = api.create_model(dataset1)
api.ok(model1)
args = \
{u'operating_kind': u'probability', u'output_dataset': True}
batchprediction1 = api.create_batch_prediction(model1, dataset1, args)
api.ok(batchprediction1)
dataset2 = api.get_dataset(batchprediction1["object"]["output_dataset_resource"])
api.ok(dataset2)
args = \
{u'fields': {u'100000': {u'name': u'species', u'preferred': True}},
u'objective_field': {u'id': u'100000'}}
dataset3 = api.update_dataset(dataset2, args)
api.ok(dataset3)
| 38.333333 | 85 | 0.566667 |
8a071d0c8d8773c5c40b94a1c39c8e05c81e5219 | 23,269 | py | Python | tests/test_runner/tests.py | sproggi/django | bb9587eef159bb8708965aef51c6a4b36a625c02 | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 1 | 2015-07-31T22:44:06.000Z | 2015-07-31T22:44:06.000Z | tests/test_runner/tests.py | sproggi/django | bb9587eef159bb8708965aef51c6a4b36a625c02 | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 3 | 2017-06-08T19:48:12.000Z | 2021-01-09T13:54:25.000Z | tests/test_runner/tests.py | sproggi/django | bb9587eef159bb8708965aef51c6a4b36a625c02 | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 2 | 2015-03-18T15:49:40.000Z | 2018-09-04T17:51:07.000Z | """
Tests for django test runner
"""
import unittest
from unittest import mock
from admin_scripts.tests import AdminScriptTestCase
from django import db
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.core.management.base import SystemCheckError
from django.test import (
SimpleTestCase, TransactionTestCase, skipUnlessDBFeature,
)
from django.test.runner import DiscoverRunner
from django.test.testcases import connections_support_transactions
from django.test.utils import (
captured_stderr, dependency_ordered, get_unique_databases_and_mirrors,
iter_test_cases,
)
from django.utils.deprecation import RemovedInDjango50Warning
from .models import B, Person, Through
class MySuite:
def __init__(self):
self.tests = []
def addTest(self, test):
self.tests.append(test)
def __iter__(self):
yield from self.tests
class IterTestCasesTests(unittest.TestCase):
def make_test_suite(self, suite=None, suite_class=None):
if suite_class is None:
suite_class = unittest.TestSuite
if suite is None:
suite = suite_class()
class Tests1(unittest.TestCase):
def test1(self):
pass
def test2(self):
pass
class Tests2(unittest.TestCase):
def test1(self):
pass
def test2(self):
pass
loader = unittest.defaultTestLoader
for test_cls in (Tests1, Tests2):
tests = loader.loadTestsFromTestCase(test_cls)
subsuite = suite_class()
# Only use addTest() to simplify testing a custom TestSuite.
for test in tests:
subsuite.addTest(test)
suite.addTest(subsuite)
return suite
def assertTestNames(self, tests, expected):
# Each test.id() has a form like the following:
# "test_runner.tests.IterTestCasesTests.test_iter_test_cases.<locals>.Tests1.test1".
# It suffices to check only the last two parts.
names = ['.'.join(test.id().split('.')[-2:]) for test in tests]
self.assertEqual(names, expected)
def test_basic(self):
suite = self.make_test_suite()
tests = iter_test_cases(suite)
self.assertTestNames(tests, expected=[
'Tests1.test1', 'Tests1.test2', 'Tests2.test1', 'Tests2.test2',
])
def test_reverse(self):
suite = self.make_test_suite()
tests = iter_test_cases(suite, reverse=True)
self.assertTestNames(tests, expected=[
'Tests2.test2', 'Tests2.test1', 'Tests1.test2', 'Tests1.test1',
])
def test_custom_test_suite_class(self):
suite = self.make_test_suite(suite_class=MySuite)
tests = iter_test_cases(suite)
self.assertTestNames(tests, expected=[
'Tests1.test1', 'Tests1.test2', 'Tests2.test1', 'Tests2.test2',
])
def test_mixed_test_suite_classes(self):
suite = self.make_test_suite(suite=MySuite())
child_suite = list(suite)[0]
self.assertNotIsInstance(child_suite, MySuite)
tests = list(iter_test_cases(suite))
self.assertEqual(len(tests), 4)
self.assertNotIsInstance(tests[0], unittest.TestSuite)
class DependencyOrderingTests(unittest.TestCase):
def test_simple_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
('s3', ('s3_db', ['charlie'])),
]
dependencies = {
'alpha': ['charlie'],
'bravo': ['charlie'],
}
ordered = dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig, value in ordered]
self.assertIn('s1', ordered_sigs)
self.assertIn('s2', ordered_sigs)
self.assertIn('s3', ordered_sigs)
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2'))
def test_chained_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
('s3', ('s3_db', ['charlie'])),
]
dependencies = {
'alpha': ['bravo'],
'bravo': ['charlie'],
}
ordered = dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig, value in ordered]
self.assertIn('s1', ordered_sigs)
self.assertIn('s2', ordered_sigs)
self.assertIn('s3', ordered_sigs)
# Explicit dependencies
self.assertLess(ordered_sigs.index('s2'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2'))
# Implied dependencies
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1'))
def test_multiple_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
('s3', ('s3_db', ['charlie'])),
('s4', ('s4_db', ['delta'])),
]
dependencies = {
'alpha': ['bravo', 'delta'],
'bravo': ['charlie'],
'delta': ['charlie'],
}
ordered = dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig, aliases in ordered]
self.assertIn('s1', ordered_sigs)
self.assertIn('s2', ordered_sigs)
self.assertIn('s3', ordered_sigs)
self.assertIn('s4', ordered_sigs)
# Explicit dependencies
self.assertLess(ordered_sigs.index('s2'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s4'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s4'))
# Implicit dependencies
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1'))
def test_circular_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
]
dependencies = {
'bravo': ['alpha'],
'alpha': ['bravo'],
}
with self.assertRaises(ImproperlyConfigured):
dependency_ordered(raw, dependencies=dependencies)
def test_own_alias_dependency(self):
raw = [
('s1', ('s1_db', ['alpha', 'bravo']))
]
dependencies = {
'alpha': ['bravo']
}
with self.assertRaises(ImproperlyConfigured):
dependency_ordered(raw, dependencies=dependencies)
# reordering aliases shouldn't matter
raw = [
('s1', ('s1_db', ['bravo', 'alpha']))
]
with self.assertRaises(ImproperlyConfigured):
dependency_ordered(raw, dependencies=dependencies)
class MockTestRunner:
def __init__(self, *args, **kwargs):
pass
MockTestRunner.run_tests = mock.Mock(return_value=[])
class ManageCommandTests(unittest.TestCase):
def test_custom_test_runner(self):
call_command('test', 'sites',
testrunner='test_runner.tests.MockTestRunner')
MockTestRunner.run_tests.assert_called_with(('sites',))
def test_bad_test_runner(self):
with self.assertRaises(AttributeError):
call_command('test', 'sites', testrunner='test_runner.NonexistentRunner')
def test_time_recorded(self):
with captured_stderr() as stderr:
call_command('test', '--timing', 'sites', testrunner='test_runner.tests.MockTestRunner')
self.assertIn('Total run took', stderr.getvalue())
class CustomTestRunnerOptionsSettingsTests(AdminScriptTestCase):
"""
Custom runners can add command line arguments. The runner is specified
through a settings file.
"""
def setUp(self):
super().setUp()
settings = {
'TEST_RUNNER': '\'test_runner.runner.CustomOptionsTestRunner\'',
}
self.write_settings('settings.py', sdict=settings)
def test_default_options(self):
args = ['test', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, '1:2:3')
def test_default_and_given_options(self):
args = ['test', '--settings=test_project.settings', '--option_b=foo']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, '1:foo:3')
def test_option_name_and_value_separated(self):
args = ['test', '--settings=test_project.settings', '--option_b', 'foo']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, '1:foo:3')
def test_all_options_given(self):
args = ['test', '--settings=test_project.settings', '--option_a=bar',
'--option_b=foo', '--option_c=31337']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'bar:foo:31337')
class CustomTestRunnerOptionsCmdlineTests(AdminScriptTestCase):
"""
Custom runners can add command line arguments when the runner is specified
using --testrunner.
"""
def setUp(self):
super().setUp()
self.write_settings('settings.py')
def test_testrunner_option(self):
args = [
'test', '--testrunner', 'test_runner.runner.CustomOptionsTestRunner',
'--option_a=bar', '--option_b=foo', '--option_c=31337'
]
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, 'bar:foo:31337')
def test_testrunner_equals(self):
args = [
'test', '--testrunner=test_runner.runner.CustomOptionsTestRunner',
'--option_a=bar', '--option_b=foo', '--option_c=31337'
]
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, 'bar:foo:31337')
def test_no_testrunner(self):
args = ['test', '--testrunner']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertIn('usage', err)
self.assertNotIn('Traceback', err)
self.assertNoOutput(out)
class Ticket17477RegressionTests(AdminScriptTestCase):
def setUp(self):
super().setUp()
self.write_settings('settings.py')
def test_ticket_17477(self):
"""'manage.py help test' works after r16352."""
args = ['help', 'test']
out, err = self.run_manage(args)
self.assertNoOutput(err)
class SQLiteInMemoryTestDbs(TransactionTestCase):
available_apps = ['test_runner']
databases = {'default', 'other'}
@unittest.skipUnless(all(db.connections[conn].vendor == 'sqlite' for conn in db.connections),
"This is an sqlite-specific issue")
def test_transaction_support(self):
# Assert connections mocking is appropriately applied by preventing
# any attempts at calling create_test_db on the global connection
# objects.
for connection in db.connections.all():
create_test_db = mock.patch.object(
connection.creation,
'create_test_db',
side_effect=AssertionError("Global connection object shouldn't be manipulated.")
)
create_test_db.start()
self.addCleanup(create_test_db.stop)
for option_key, option_value in (
('NAME', ':memory:'), ('TEST', {'NAME': ':memory:'})):
tested_connections = db.ConnectionHandler({
'default': {
'ENGINE': 'django.db.backends.sqlite3',
option_key: option_value,
},
'other': {
'ENGINE': 'django.db.backends.sqlite3',
option_key: option_value,
},
})
with mock.patch('django.test.utils.connections', new=tested_connections):
other = tested_connections['other']
DiscoverRunner(verbosity=0).setup_databases()
msg = (
"DATABASES setting '%s' option set to sqlite3's ':memory:' value "
"shouldn't interfere with transaction support detection." % option_key
)
# Transaction support is properly initialized for the 'other' DB.
self.assertTrue(other.features.supports_transactions, msg)
# And all the DBs report that they support transactions.
self.assertTrue(connections_support_transactions(), msg)
class DummyBackendTest(unittest.TestCase):
def test_setup_databases(self):
"""
setup_databases() doesn't fail with dummy database backend.
"""
tested_connections = db.ConnectionHandler({})
with mock.patch('django.test.utils.connections', new=tested_connections):
runner_instance = DiscoverRunner(verbosity=0)
old_config = runner_instance.setup_databases()
runner_instance.teardown_databases(old_config)
class AliasedDefaultTestSetupTest(unittest.TestCase):
def test_setup_aliased_default_database(self):
"""
setup_databases() doesn't fail when 'default' is aliased
"""
tested_connections = db.ConnectionHandler({
'default': {
'NAME': 'dummy'
},
'aliased': {
'NAME': 'dummy'
}
})
with mock.patch('django.test.utils.connections', new=tested_connections):
runner_instance = DiscoverRunner(verbosity=0)
old_config = runner_instance.setup_databases()
runner_instance.teardown_databases(old_config)
class SetupDatabasesTests(SimpleTestCase):
def setUp(self):
self.runner_instance = DiscoverRunner(verbosity=0)
def test_setup_aliased_databases(self):
tested_connections = db.ConnectionHandler({
'default': {
'ENGINE': 'django.db.backends.dummy',
'NAME': 'dbname',
},
'other': {
'ENGINE': 'django.db.backends.dummy',
'NAME': 'dbname',
}
})
with mock.patch('django.db.backends.dummy.base.DatabaseWrapper.creation_class') as mocked_db_creation:
with mock.patch('django.test.utils.connections', new=tested_connections):
old_config = self.runner_instance.setup_databases()
self.runner_instance.teardown_databases(old_config)
mocked_db_creation.return_value.destroy_test_db.assert_called_once_with('dbname', 0, False)
def test_setup_test_database_aliases(self):
"""
The default database must be the first because data migrations
use the default alias by default.
"""
tested_connections = db.ConnectionHandler({
'other': {
'ENGINE': 'django.db.backends.dummy',
'NAME': 'dbname',
},
'default': {
'ENGINE': 'django.db.backends.dummy',
'NAME': 'dbname',
}
})
with mock.patch('django.test.utils.connections', new=tested_connections):
test_databases, _ = get_unique_databases_and_mirrors()
self.assertEqual(
test_databases,
{
('', '', 'django.db.backends.dummy', 'test_dbname'): (
'dbname',
['default', 'other'],
),
},
)
def test_destroy_test_db_restores_db_name(self):
tested_connections = db.ConnectionHandler({
'default': {
'ENGINE': settings.DATABASES[db.DEFAULT_DB_ALIAS]["ENGINE"],
'NAME': 'xxx_test_database',
},
})
# Using the real current name as old_name to not mess with the test suite.
old_name = settings.DATABASES[db.DEFAULT_DB_ALIAS]["NAME"]
with mock.patch('django.db.connections', new=tested_connections):
tested_connections['default'].creation.destroy_test_db(old_name, verbosity=0, keepdb=True)
self.assertEqual(tested_connections['default'].settings_dict["NAME"], old_name)
def test_serialization(self):
tested_connections = db.ConnectionHandler({
'default': {
'ENGINE': 'django.db.backends.dummy',
},
})
with mock.patch('django.db.backends.dummy.base.DatabaseWrapper.creation_class') as mocked_db_creation:
with mock.patch('django.test.utils.connections', new=tested_connections):
self.runner_instance.setup_databases()
mocked_db_creation.return_value.create_test_db.assert_called_once_with(
verbosity=0, autoclobber=False, serialize=True, keepdb=False
)
def test_serialized_off(self):
tested_connections = db.ConnectionHandler({
'default': {
'ENGINE': 'django.db.backends.dummy',
'TEST': {'SERIALIZE': False},
},
})
msg = (
'The SERIALIZE test database setting is deprecated as it can be '
'inferred from the TestCase/TransactionTestCase.databases that '
'enable the serialized_rollback feature.'
)
with mock.patch('django.db.backends.dummy.base.DatabaseWrapper.creation_class') as mocked_db_creation:
with mock.patch('django.test.utils.connections', new=tested_connections):
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
self.runner_instance.setup_databases()
mocked_db_creation.return_value.create_test_db.assert_called_once_with(
verbosity=0, autoclobber=False, serialize=False, keepdb=False
)
@skipUnlessDBFeature('supports_sequence_reset')
class AutoIncrementResetTest(TransactionTestCase):
"""
Creating the same models in different test methods receive the same PK
values since the sequences are reset before each test method.
"""
available_apps = ['test_runner']
reset_sequences = True
def _test(self):
# Regular model
p = Person.objects.create(first_name='Jack', last_name='Smith')
self.assertEqual(p.pk, 1)
# Auto-created many-to-many through model
p.friends.add(Person.objects.create(first_name='Jacky', last_name='Smith'))
self.assertEqual(p.friends.through.objects.first().pk, 1)
# Many-to-many through model
b = B.objects.create()
t = Through.objects.create(person=p, b=b)
self.assertEqual(t.pk, 1)
def test_autoincrement_reset1(self):
self._test()
def test_autoincrement_reset2(self):
self._test()
class EmptyDefaultDatabaseTest(unittest.TestCase):
def test_empty_default_database(self):
"""
An empty default database in settings does not raise an ImproperlyConfigured
error when running a unit test that does not use a database.
"""
tested_connections = db.ConnectionHandler({'default': {}})
with mock.patch('django.db.connections', new=tested_connections):
connection = tested_connections[db.utils.DEFAULT_DB_ALIAS]
self.assertEqual(connection.settings_dict['ENGINE'], 'django.db.backends.dummy')
connections_support_transactions()
class RunTestsExceptionHandlingTests(unittest.TestCase):
def test_run_checks_raises(self):
"""
Teardown functions are run when run_checks() raises SystemCheckError.
"""
with mock.patch('django.test.runner.DiscoverRunner.setup_test_environment'), \
mock.patch('django.test.runner.DiscoverRunner.setup_databases'), \
mock.patch('django.test.runner.DiscoverRunner.build_suite'), \
mock.patch('django.test.runner.DiscoverRunner.run_checks', side_effect=SystemCheckError), \
mock.patch('django.test.runner.DiscoverRunner.teardown_databases') as teardown_databases, \
mock.patch('django.test.runner.DiscoverRunner.teardown_test_environment') as teardown_test_environment:
runner = DiscoverRunner(verbosity=0, interactive=False)
with self.assertRaises(SystemCheckError):
runner.run_tests(['test_runner_apps.sample.tests_sample.TestDjangoTestCase'])
self.assertTrue(teardown_databases.called)
self.assertTrue(teardown_test_environment.called)
def test_run_checks_raises_and_teardown_raises(self):
"""
SystemCheckError is surfaced when run_checks() raises SystemCheckError
and teardown databases() raises ValueError.
"""
with mock.patch('django.test.runner.DiscoverRunner.setup_test_environment'), \
mock.patch('django.test.runner.DiscoverRunner.setup_databases'), \
mock.patch('django.test.runner.DiscoverRunner.build_suite'), \
mock.patch('django.test.runner.DiscoverRunner.run_checks', side_effect=SystemCheckError), \
mock.patch('django.test.runner.DiscoverRunner.teardown_databases', side_effect=ValueError) \
as teardown_databases, \
mock.patch('django.test.runner.DiscoverRunner.teardown_test_environment') as teardown_test_environment:
runner = DiscoverRunner(verbosity=0, interactive=False)
with self.assertRaises(SystemCheckError):
runner.run_tests(['test_runner_apps.sample.tests_sample.TestDjangoTestCase'])
self.assertTrue(teardown_databases.called)
self.assertFalse(teardown_test_environment.called)
def test_run_checks_passes_and_teardown_raises(self):
"""
Exceptions on teardown are surfaced if no exceptions happen during
run_checks().
"""
with mock.patch('django.test.runner.DiscoverRunner.setup_test_environment'), \
mock.patch('django.test.runner.DiscoverRunner.setup_databases'), \
mock.patch('django.test.runner.DiscoverRunner.build_suite'), \
mock.patch('django.test.runner.DiscoverRunner.run_checks'), \
mock.patch('django.test.runner.DiscoverRunner.teardown_databases', side_effect=ValueError) \
as teardown_databases, \
mock.patch('django.test.runner.DiscoverRunner.teardown_test_environment') as teardown_test_environment:
runner = DiscoverRunner(verbosity=0, interactive=False)
with self.assertRaises(ValueError):
# Suppress the output when running TestDjangoTestCase.
with mock.patch('sys.stderr'):
runner.run_tests(['test_runner_apps.sample.tests_sample.TestDjangoTestCase'])
self.assertTrue(teardown_databases.called)
self.assertFalse(teardown_test_environment.called)
| 39.041946 | 119 | 0.623748 |
57225fbb97fa1b910635e3b6971ca3228684b582 | 562 | py | Python | setup.py | fredjn/eiffel-graphql-api | 52e54954bd418686a5668e6dfe9fbb4bc35273af | [
"Apache-2.0"
] | 3 | 2019-12-16T13:54:21.000Z | 2019-12-20T21:52:14.000Z | setup.py | fredjn/eiffel-graphql-api | 52e54954bd418686a5668e6dfe9fbb4bc35273af | [
"Apache-2.0"
] | 41 | 2019-12-16T13:32:30.000Z | 2021-09-01T06:51:51.000Z | setup.py | fredjn/eiffel-graphql-api | 52e54954bd418686a5668e6dfe9fbb4bc35273af | [
"Apache-2.0"
] | 7 | 2019-12-19T13:38:46.000Z | 2021-12-20T14:05:00.000Z | # -*- coding: utf-8 -*-
"""Setup file for eiffel_graphql_api.
Use setup.cfg to configure your project.
This file was generated with PyScaffold 3.2.3.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
import sys
from pkg_resources import VersionConflict, require
from setuptools import setup
try:
require("setuptools>=38.3")
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
if __name__ == "__main__":
setup(use_pyscaffold=True)
| 23.416667 | 71 | 0.733096 |
6d4d47974b4629f9a753f7fbfc6d80f714c8156c | 507 | py | Python | python/Josephus Survivor.py | TechieHelper/Codewars | 98ea8deb14ae1422162895f481e4175ab5868955 | [
"MIT"
] | null | null | null | python/Josephus Survivor.py | TechieHelper/Codewars | 98ea8deb14ae1422162895f481e4175ab5868955 | [
"MIT"
] | null | null | null | python/Josephus Survivor.py | TechieHelper/Codewars | 98ea8deb14ae1422162895f481e4175ab5868955 | [
"MIT"
] | null | null | null | # A program to see which person survives Josephus permutation
people = 7
likelihood = 3
def josephus_survivor(n,k):
data = []
for i in range(n):
data.append(n-i)
data.sort()
numToLeave = 0
for i in range(3):
for j in range(k):
numToLeave += data[numToLeave]
if numToLeave > n:
numToLeave = numToLeave % n
data.remove(numToLeave)
n = len(data)
return data,numToLeave
print(josephus_survivor(7,3)) | 14.911765 | 61 | 0.575937 |
58f28a4a242ab9ed93c19c814e2cdc5c32146797 | 665 | py | Python | sesion_05/manage.py | bernest/modulo-django-desarrollo-web-cdmx-20-05pt | 33f971f032f7d3902a49a993d46e3ecefb21d59b | [
"MIT"
] | null | null | null | sesion_05/manage.py | bernest/modulo-django-desarrollo-web-cdmx-20-05pt | 33f971f032f7d3902a49a993d46e3ecefb21d59b | [
"MIT"
] | null | null | null | sesion_05/manage.py | bernest/modulo-django-desarrollo-web-cdmx-20-05pt | 33f971f032f7d3902a49a993d46e3ecefb21d59b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bedu_rest.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.913043 | 73 | 0.679699 |
8316ffea4f7f0d45f8fded053326d8d61749e5f8 | 507 | py | Python | Aula14/ex12.py | danicon/MD2-Curso_Python | 77a2eb2d123eb1359dd7c84360c83bf3b3033ab4 | [
"MIT"
] | 1 | 2020-11-28T14:48:03.000Z | 2020-11-28T14:48:03.000Z | Aula14/ex12.py | danicon/MD2-Curso_Python | 77a2eb2d123eb1359dd7c84360c83bf3b3033ab4 | [
"MIT"
] | null | null | null | Aula14/ex12.py | danicon/MD2-Curso_Python | 77a2eb2d123eb1359dd7c84360c83bf3b3033ab4 | [
"MIT"
] | null | null | null | resp = 'S'
soma = quant = media = maior = menor = 0
while resp in 'Ss':
num = int(input('Digite um número: '))
soma += num
quant += 1
if quant == 1:
maior =menor = num
else:
if num > maior:
maior = num
if num < menor:
menor = num
resp = str(input('Quer continuar? [S/N] ')).upper().strip()[0]
media = soma / quant
print(f'Você digitou {quant} números e a média foi {media}')
print(f'O maior valor foi {maior} e o menore foi {menor}')
| 28.166667 | 66 | 0.550296 |
29309f53b097c21f32efd36ae008e0802f13522b | 1,550 | py | Python | nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py | vferat/nipype | 536c57da150d157dcb5c121af43aaeab71cdbd5f | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py | vferat/nipype | 536c57da150d157dcb5c121af43aaeab71cdbd5f | [
"Apache-2.0"
] | 2 | 2018-04-17T19:18:16.000Z | 2020-03-04T22:05:02.000Z | nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py | oesteban/nipype | c14f24eba1da08711bbb894e049ee858ed740096 | [
"Apache-2.0"
] | null | null | null | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..brains import BRAINSTransformConvert
def test_BRAINSTransformConvert_inputs():
input_map = dict(
args=dict(argstr='%s', ),
displacementVolume=dict(
argstr='--displacementVolume %s',
hash_files=False,
),
environ=dict(
nohash=True,
usedefault=True,
),
inputTransform=dict(
argstr='--inputTransform %s',
extensions=None,
),
outputPrecisionType=dict(argstr='--outputPrecisionType %s', ),
outputTransform=dict(
argstr='--outputTransform %s',
hash_files=False,
),
outputTransformType=dict(argstr='--outputTransformType %s', ),
referenceVolume=dict(
argstr='--referenceVolume %s',
extensions=None,
),
)
inputs = BRAINSTransformConvert.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_BRAINSTransformConvert_outputs():
output_map = dict(
displacementVolume=dict(extensions=None, ),
outputTransform=dict(extensions=None, ),
)
outputs = BRAINSTransformConvert.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 32.978723 | 70 | 0.616774 |
34cff6e90e3d24a8997adb31e36755ba929755b8 | 10,567 | py | Python | pyvoltha/adapters/extensions/events/kpi/adapter_pm_metrics.py | nsharma70/pyvoltha | ea01eb85f45e3cd0bed12b4b446e5af7f66c16db | [
"Apache-2.0"
] | null | null | null | pyvoltha/adapters/extensions/events/kpi/adapter_pm_metrics.py | nsharma70/pyvoltha | ea01eb85f45e3cd0bed12b4b446e5af7f66c16db | [
"Apache-2.0"
] | 1 | 2021-03-25T23:34:15.000Z | 2021-03-25T23:34:15.000Z | pyvoltha/adapters/extensions/events/kpi/adapter_pm_metrics.py | rohan-agra/VOL-2311-pyvoltha | 54ac9266cf4fc27a7583ba08ada779ad107e9cfe | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division
import structlog
import arrow
from twisted.internet.task import LoopingCall
from voltha_protos.events_pb2 import KpiEvent2, KpiEventType, MetricInformation, MetricMetaData
from voltha_protos.events_pb2 import Event, EventType, EventCategory, EventSubCategory
from voltha_protos.device_pb2 import PmConfig
class AdapterPmMetrics(object):
"""
Base class for Device Adapter PM Metrics Manager
Device specific (OLT, ONU, OpenOMCI, ...) will derive groups of PM information
and this base class is primarily used to provide a consistent interface to configure,
start, and stop statistics collection.
"""
DEFAULT_FREQUENCY_KEY = 'default-collection-frequency'
DEFAULT_COLLECTION_FREQUENCY = 15 * 10 # 1/10ths of a second
# If the collection object has a property of the following name, it will be used
# to retrieve the UTC Collection Timestamp (UTC seconds since epoch). If the collection
# object does not support this attribute, the current time will be used. If the attribute
# is supported, but returns None, this signals that no metrics are currently available
# for collection.
TIMESTAMP_ATTRIBUTE = 'timestamp'
def __init__(self, event_mgr, core_proxy, device_id, logical_device_id, serial_number,
grouped=False, freq_override=False, **kwargs):
"""
Initializer for shared Device Adapter PM metrics manager
:param core_proxy: (CoreProxy) Gateway between CORE and an adapter
:param device_id: (str) Device ID
:param logical_device_id: (str) VOLTHA Logical Device ID
:param grouped: (bool) Flag indicating if statistics are managed as a group
:param freq_override: (bool) Flag indicating if frequency collection can be specified
on a per group basis
:param kwargs: (dict) Device Adapter specific values
"""
self.log = structlog.get_logger(device_id=device_id)
self.event_mgr = event_mgr
self.device_id = device_id
self.core_proxy = core_proxy
self.name = core_proxy.listening_topic
self.logical_device_id = logical_device_id
self.serial_number = serial_number
self.default_freq = kwargs.get(AdapterPmMetrics.DEFAULT_FREQUENCY_KEY,
AdapterPmMetrics.DEFAULT_COLLECTION_FREQUENCY)
self._event = "KPI_EVENT"
self._category = EventCategory.EQUIPMENT
self._sub_category = EventSubCategory.ONU
self.grouped = grouped
self.freq_override = grouped and freq_override
self.lc = None
self.pm_group_metrics = dict() # name -> PmGroupConfig
def update(self, pm_config):
# TODO: Move any common steps into base class
raise NotImplementedError('Your derived class should override this method')
def make_proto(self, pm_config=None):
raise NotImplementedError('Your derived class should override this method')
def start_collector(self, callback=None):
"""
Start the collection loop for an adapter if the frequency > 0
:param callback: (callable) Function to call to collect PM data
"""
self.log.info("starting-pm-collection", device_name=self.name, default_freq=self.default_freq)
if callback is None:
callback = self.collect_and_publish_metrics
if self.lc is None:
self.lc = LoopingCall(callback)
if self.default_freq > 0:
self.lc.start(interval=self.default_freq / 10)
def stop_collector(self):
""" Stop the collection loop"""
if self.lc is not None and self.default_freq > 0:
self.lc.stop()
def collect_group_metrics(self, group_name, group, names, config):
"""
Collect the metrics for a specific PM group.
This common collection method expects that the 'group object' provide as the second
parameter supports an attribute or property with the name of the value to
retrieve.
:param group_name: (str) The unique collection name. The name should not contain spaces.
:param group: (object) The object to query for the value of various attributes (PM names)
:param names: (set) A collection of PM names that, if implemented as a property in the object,
will return a value to store in the returned PM dictionary
:param config: (PMConfig) PM Configuration settings. The enabled flag is examined to determine
if the data associated with a PM Name will be collected.
:return: (MetricInformation) collected metrics
"""
assert ' ' not in group_name, 'Spaces are not allowed in metric titles, use an underscore'
if group is None:
return None
metrics = dict()
context = dict()
now = getattr(group, AdapterPmMetrics.TIMESTAMP_ATTRIBUTE) \
if hasattr(group, AdapterPmMetrics.TIMESTAMP_ATTRIBUTE) \
else arrow.utcnow().float_timestamp
if now is None:
return None # No metrics available at this time for collection
for (metric, t) in names:
if config[metric].type == PmConfig.CONTEXT and hasattr(group, metric):
context[metric] = str(getattr(group, metric))
elif config[metric].type in (PmConfig.COUNTER, PmConfig.GAUGE, PmConfig.STATE):
if config[metric].enabled and hasattr(group, metric):
metrics[metric] = getattr(group, metric)
# Check length of metric data. Will be zero if if/when individual group
# metrics can be disabled and all are (or or not supported by the
# underlying adapter)
if len(metrics) == 0:
return None
return MetricInformation(metadata=MetricMetaData(title=group_name,
ts=now,
logical_device_id=self.logical_device_id,
serial_no=self.serial_number,
device_id=self.device_id,
context=context),
metrics=metrics)
def collect_metrics(self, data=None):
"""
Collect metrics for this adapter.
The adapter type (OLT, ONU, ..) should provide a derived class where this
method iterates through all metrics and collects them up in a dictionary with
the group/metric name as the key, and the metric values as the contents.
The data collected (or passed in) is a list of pairs/tuples. Each
pair is composed of a MetricMetaData metadata-portion and list of MetricValuePairs
that contains a single individual metric or list of metrics if this is a
group metric.
This method is called for each adapter at a fixed frequency.
TODO: Currently all group metrics are collected on a single timer tick.
This needs to be fixed as independent group or instance collection is
desirable.
:param data: (list) Existing list of collected metrics (MetricInformation).
This is provided to allow derived classes to call into
further encapsulated classes.
:return: (list) metadata and metrics pairs - see description above
"""
raise NotImplementedError('Your derived class should override this method')
def collect_and_publish_metrics(self):
""" Request collection of all enabled metrics and publish them """
try:
data = self.collect_metrics()
raised_ts = arrow.utcnow().timestamp
self.publish_metrics(data, raised_ts)
except Exception as e:
self.log.exception('failed-to-collect-kpis', e=e)
def publish_metrics(self, data, raised_ts):
"""
Publish the metrics during a collection.
The data collected (or passed in) is a list of dictionary pairs/tuple. Each
pair is composed of a metadata-portion and a metrics-portion that contains
information for a specific instance of an individual metric or metric group.
:param data: (list) Existing list of collected metrics (MetricInformation)
to convert to a KPIEvent and publish
"""
self.log.debug('publish-metrics', data=data)
event_header = self.event_mgr.get_event_header(EventType.KPI_EVENT2,
self._category,
self._sub_category,
self._event,
raised_ts)
if len(data):
try:
# TODO: Existing adapters use the KpiEvent, if/when all existing
# adapters use the shared KPI library, we may want to
# deprecate the KPIEvent
event_body = KpiEvent2(
type=KpiEventType.slice,
ts=arrow.utcnow().float_timestamp,
slice_data=data
)
self.event_mgr.send_event(event_header, event_body)
except Exception as e:
self.log.exception('failed-to-submit-kpis', e=e)
# TODO: Need to support on-demand counter update if provided by the PM 'group'.
# Currently we expect PM data to be periodically polled by a separate
# mechanism. The on-demand counter update should be optional in case the
# particular device adapter group of data is polled anyway for other reasons.
| 46.756637 | 102 | 0.632725 |
71a1cac50f33d21478576a67a50b6e63ae6073ce | 1,253 | py | Python | examples/ES/es.py | jkren6/PARL | 7299032f8e1804bb4ada0f087fd485816046fa90 | [
"Apache-2.0"
] | 3,172 | 2018-05-22T02:02:29.000Z | 2022-03-31T09:14:56.000Z | examples/ES/es.py | ic7y/PARL | e8797bd0d31d81bc81aae8b12792ff922bcb8ea9 | [
"Apache-2.0"
] | 422 | 2018-05-17T16:58:45.000Z | 2022-03-31T02:03:25.000Z | examples/ES/es.py | ic7y/PARL | e8797bd0d31d81bc81aae8b12792ff922bcb8ea9 | [
"Apache-2.0"
] | 794 | 2018-05-21T18:33:19.000Z | 2022-03-30T13:38:09.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import parl
__all__ = ['ES']
class ES(parl.Algorithm):
def __init__(self, model):
"""ES algorithm.
Since parameters of the model is updated in the numpy level, `learn` function is not needed
in this algorithm.
Args:
model(`parl.Model`): policy model of ES algorithm.
"""
self.model = model
def predict(self, obs):
"""Use the policy model to predict actions of observations.
Args:
obs(layers.data): data layer of observations.
Returns:
tensor of predicted actions.
"""
return self.model(obs)
| 29.833333 | 99 | 0.664804 |
dd69c690e622bb861ac88d038fb561f5f4428dfe | 8,609 | py | Python | basic_mcmc.py | bikestra/bdapy | accea9908651730acd49fce8beda7a684529ca16 | [
"Apache-2.0"
] | 48 | 2015-01-04T07:24:34.000Z | 2022-03-09T15:41:02.000Z | basic_mcmc.py | bikestra/bdapy | accea9908651730acd49fce8beda7a684529ca16 | [
"Apache-2.0"
] | null | null | null | basic_mcmc.py | bikestra/bdapy | accea9908651730acd49fce8beda7a684529ca16 | [
"Apache-2.0"
] | 11 | 2016-04-29T15:00:30.000Z | 2020-05-20T03:59:57.000Z | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# # Basic Markov-Chain Monte-Carlo (MCMC) Sampling #
#
# ## Gibbs Sampling from Bivariate Normal Distribution (BDA 11.1) ##
#
# Here, we sample from a bivariate normal distribution using Gibbs sampling, although it is not simple to draw from actual joint distribution. The posterior distribution is assumed to be:
# $$
# \left( \begin{matrix} \theta_1 \\ \theta_2 \end{matrix} \middle) \right| y
# \sim
# \text{N}
# \left(
# \left( \begin{matrix} y_1 \\ y_2 \end{matrix} \right),
# \left( \begin{matrix} 1 & \rho \\ \rho & 1 \end{matrix} \right)
# \right).
# $$
# The conditional distributions are:
# $$
# \theta_1 \mid \theta_2, y \sim \text{N}(y_1 + \rho(\theta_2 - y_2), 1-\rho^2),
# $$
# $$
# \theta_2 \mid \theta_1, y \sim \text{N}(y_2 + \rho(\theta_1 - y_1), 1-\rho^2).
# $$
# <codecell>
# prepare inline pylab
% pylab inline
import numpy as np
import pylab as plt
np.random.seed(13531)
# <codecell>
def gibbs_bivariate(y1, y2, rho, start_theta1, start_theta2, num_samples):
scale = np.sqrt(1.0 - rho ** 2)
theta1_samples = [start_theta1]
theta2_samples = [start_theta2]
current_theta1 = start_theta1
current_theta2 = start_theta2
for i in xrange(num_samples):
current_theta1 = np.random.normal(loc=y1 + rho * (current_theta2 - y2), scale=scale)
theta1_samples.append(current_theta1)
theta2_samples.append(current_theta2)
current_theta2 = np.random.normal(loc=y2 + rho * (current_theta1 - y1), scale=scale)
theta1_samples.append(current_theta1)
theta2_samples.append(current_theta2)
return theta1_samples, theta2_samples
# <markdowncell>
# This is Figure 11.2 (a).
# <codecell>
starting_points = [(2.5,2.5),(-2.5,2.5),(2.5,-2.5),(-2.5,-2.5),(0,0)]
plt.plot(zip(*starting_points)[0], zip(*starting_points)[1], 'ks')
for start_theta1, start_theta2 in starting_points:
theta1_samples, theta2_samples = gibbs_bivariate(0, 0, 0.8, start_theta1, start_theta2, 10)
plt.plot(theta1_samples, theta2_samples, color='k', linestyle='-', linewidth=1)
plt.xlim(-4,4)
plt.ylim(-4,4)
plt.axes().set_aspect('equal')
# <markdowncell>
# This is Figure 11.2 (b).
# <codecell>
plt.plot(zip(*starting_points)[0], zip(*starting_points)[1], 'ks')
for start_theta1, start_theta2 in starting_points:
theta1_samples, theta2_samples = gibbs_bivariate(0, 0, 0.8, start_theta1, start_theta2, 500)
plt.plot(theta1_samples, theta2_samples, color='k', linestyle='-', linewidth=1)
plt.xlim(-4,4)
plt.ylim(-4,4)
plt.axes().set_aspect('equal')
# <markdowncell>
# This is Figure 11.2 (c)
# <codecell>
for start_theta1, start_theta2 in starting_points:
theta1_samples, theta2_samples = gibbs_bivariate(0, 0, 0.8, start_theta1, start_theta2, 500)
plt.scatter(theta1_samples[len(theta1_samples)/2:], theta2_samples[len(theta2_samples)/2:], marker='.', s=1)
plt.xlim(-4,4)
plt.ylim(-4,4)
plt.axes().set_aspect('equal')
# <markdowncell>
# ## Metropolis Sampling from Bivariate Normal (BDA 11.3) ##
#
# Here, we are sampling from $p(\theta \mid y) = \text{N}(\theta \mid 0, I)$, where $I$ is a $2 \times 2$ identity matrix. We use $J_t(\theta^* \mid \theta^{t-1}) = \text{N}(\theta^* \mid \theta^{t-1}, 0.2^2 I)$ as a proposal distribution, which is quite inefficient.
# <codecell>
import scipy.stats as stats
from scipy.stats import norm
def metropolis_bivariate(y1, y2, start_theta1, start_theta2, num_samples,
include_start=True, include_loglik=False):
if include_start:
theta1_samples = [start_theta1]
theta2_samples = [start_theta2]
else:
theta1_samples = []
theta2_samples = []
loglik_samples = []
current_theta1 = start_theta1
current_theta2 = start_theta2
# somehow the book is using unnormalized log probability. don't know why.
current_log_prob = norm.logpdf((current_theta1,current_theta2),loc=(0,0),scale=(1,1)).sum() \
- norm.logpdf((0,0),loc=(0,0),scale=(1,1)).sum()
for i in xrange(num_samples):
proposal_theta1, proposal_theta2 = np.random.normal(loc=(current_theta1, current_theta2),
scale=(0.2,0.2))
proposal_log_prob = norm.logpdf((proposal_theta1,proposal_theta2),
loc=(0,0),scale=(1,1)).sum() \
- norm.logpdf((0,0),
loc=(0,0),scale=(1,1)).sum()
if proposal_log_prob > current_log_prob:
flag_accept = True
else:
acceptance_prob = np.exp(proposal_log_prob - current_log_prob)
if np.random.random() < acceptance_prob:
flag_accept = True
else:
flag_accept = False
if flag_accept:
current_theta1 = proposal_theta1
current_theta2 = proposal_theta2
current_log_prob = proposal_log_prob
theta1_samples.append(current_theta1)
theta2_samples.append(current_theta2)
loglik_samples.append(current_log_prob)
if include_loglik:
return theta1_samples, theta2_samples, loglik_samples
else:
return theta1_samples, theta2_samples
# <markdowncell>
# This is Figure 11.1 (a).
# <codecell>
starting_points = [(2.5,2.5),(-2.5,2.5),(2.5,-2.5),(-2.5,-2.5)]
plt.plot(zip(*starting_points)[0], zip(*starting_points)[1], 'ks')
for start_theta1, start_theta2 in starting_points:
theta1_samples, theta2_samples = metropolis_bivariate(0, 0, start_theta1, start_theta2, 50)
plt.plot(theta1_samples,
theta2_samples,
color='k', linestyle='-', linewidth=1)
plt.xlim(-4,4)
plt.ylim(-4,4)
plt.axes().set_aspect('equal')
# <markdowncell>
# This is Figure 11.1 (b).
# <codecell>
starting_points = [(2.5,2.5),(-2.5,2.5),(2.5,-2.5),(-2.5,-2.5)]
plt.plot(zip(*starting_points)[0], zip(*starting_points)[1], 'ks')
for start_theta1, start_theta2 in starting_points:
theta1_samples, theta2_samples = metropolis_bivariate(0, 0, start_theta1, start_theta2, 1000)
plt.plot(theta1_samples,
theta2_samples,
color='k', linestyle='-', linewidth=1)
plt.xlim(-4,4)
plt.ylim(-4,4)
plt.axes().set_aspect('equal')
# <markdowncell>
# This is Figure 11.1 (c).
# <codecell>
for start_theta1, start_theta2 in starting_points:
theta1_samples, theta2_samples = metropolis_bivariate(0, 0, start_theta1, start_theta2, 1000)
theta1_samples_tail = theta1_samples[len(theta1_samples)/2:]
theta2_samples_tail = theta2_samples[len(theta2_samples)/2:]
plt.scatter(theta1_samples_tail + np.random.uniform(low=-0.001, high=0.001, size=len(theta1_samples_tail)),
theta2_samples_tail + np.random.uniform(low=-0.001, high=0.001, size=len(theta2_samples_tail)),
marker='.', s=1)
plt.xlim(-4,4)
plt.ylim(-4,4)
plt.axes().set_aspect('equal')
# <markdowncell>
# ## Assessing Convergence (BDA 11.4) ##
#
# $\hat{R}$ can be used to monitor the convergence of MCMC sampling. Table 11.1 is reproduced here:
# <codecell>
from pandas import *
for num_iter in [50,500,2000,5000]:
seq_len = num_iter/2.0
dfs = []
print "When the number of iterations is: %d" % num_iter
for sequence_num, (start_theta1, start_theta2) in enumerate(starting_points):
theta1_samples, theta2_samples, loglik_samples =\
metropolis_bivariate(0, 0, start_theta1, start_theta2, num_iter,
False, True)
# convert samples into dataframe
theta1_samples_tail = theta1_samples[len(theta1_samples)/2:]
theta2_samples_tail = theta2_samples[len(theta2_samples)/2:]
loglik_samples_tail = loglik_samples[len(loglik_samples)/2:]
seqnum = concatenate([repeat(sequence_num * 2, len(theta1_samples_tail)/2),
repeat(sequence_num * 2 + 1,
len(theta1_samples_tail) - len(theta1_samples_tail)/2)])
df = DataFrame({'theta1':theta1_samples_tail,'theta2':theta2_samples_tail,
'loglik':loglik_samples_tail,'seqnum':seqnum})
dfs.append(df)
whole_df = pandas.concat(dfs)
print whole_df.quantile([0.025,0.5,0.975])
W_values = whole_df.groupby('seqnum').var().mean(axis=0)
B_values = whole_df.groupby('seqnum').mean().var(axis=0) * (seq_len)
print "Rhat values:"
print sqrt((seq_len-1.0)/seq_len + 1.0/seq_len * (B_values / W_values))
print "\n\n"
| 36.172269 | 268 | 0.649204 |
df8b6f581c69ffba7b90a12497fd6b131b189d81 | 5,107 | py | Python | yt_dlp/WS_Extractor/lynda.py | evolution-ant/local-youtube-dl | e9be36e8cdc585c0e24a18c74d790b62af6e11a7 | [
"Unlicense"
] | null | null | null | yt_dlp/WS_Extractor/lynda.py | evolution-ant/local-youtube-dl | e9be36e8cdc585c0e24a18c74d790b62af6e11a7 | [
"Unlicense"
] | null | null | null | yt_dlp/WS_Extractor/lynda.py | evolution-ant/local-youtube-dl | e9be36e8cdc585c0e24a18c74d790b62af6e11a7 | [
"Unlicense"
] | null | null | null | # encoding: utf-8
import re
from ..extractor.common import (
InfoExtractor,
)
from ..extractor.lynda import (
LyndaBaseIE,
LyndaIE as OldLyndaIE
)
from ..utils import (
int_or_none,
ExtractorError
)
from ..compat import compat_str
class LyndaIE(OldLyndaIE):
_VALID_URL = r'https?://www\.lynda\.com/(?:[^/]+/[^/]+/\d+(-\d+)*|player/embed)/(?P<id>\d+)'
def _real_extract(self, url):
webPage = self._download_webpage(url, url)
if OldLyndaIE.suitable(url):
ie = OldLyndaIE()
ie.set_downloader(self._downloader)
# 下载字幕
self._downloader.params['listsubtitles'] = True
result = ie._real_extract(url)
self._downloader.params['listsubtitles'] = False
result['thumbnail'] = self._og_search_thumbnail(webPage)
return result
video_id = self._search_regex(r'data-initial-video-id="(\d+)', webPage, 'video_id')
query = {
'videoId': video_id,
'type': 'video',
}
video = self._download_json(
'https://www.lynda.com/ajax/player', video_id,
'Downloading video JSON', fatal=False, query=query)
# Fallback scenario
if not video:
raise Exception('not support!')
if 'Status' in video:
raise ExtractorError(
'lynda returned error: %s' % video['Message'], expected=True)
if video.get('HasAccess') is False:
self._raise_unavailable(video_id)
video_id = compat_str(video.get('ID') or video_id)
duration = int_or_none(video.get('DurationInSeconds'))
title = video['Title']
thumbnail = self._og_search_thumbnail(webPage)
formats = []
fmts = video.get('Formats')
if fmts:
formats.extend([{
'url': f['Url'],
'ext': f.get('Extension'),
'width': int_or_none(f.get('Width')),
'height': int_or_none(f.get('Height')),
'filesize': int_or_none(f.get('FileSize')),
'format_id': compat_str(f.get('Resolution')) if f.get('Resolution') else None,
} for f in fmts if f.get('Url')])
prioritized_streams = video.get('PrioritizedStreams')
if prioritized_streams:
for prioritized_stream_id, prioritized_stream in list(prioritized_streams.items()):
formats.extend([{
'url': video_url,
'height': int_or_none(format_id),
'format_id': '%s-%s' % (prioritized_stream_id, format_id),
} for format_id, video_url in list(prioritized_stream.items())])
self._check_formats(formats, video_id)
self._sort_formats(formats)
self._downloader.params['listsubtitles'] = True
subtitles = self.extract_subtitles(video_id)
self._downloader.params['listsubtitles'] = False
return {
'id': video_id,
'title': title,
'duration': duration,
'thumbnail': thumbnail,
'subtitles': subtitles,
'formats': formats
}
class LyndaPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://(?:www|m)\.lynda\.com/.*'
def _real_extract(self, url):
self.to_screen('LyndaPlaylistIE 1')
webpage = self._download_webpage(url, url)
self.to_screen('LyndaPlaylistIE 2')
try:
#self.to_screen(webpage)
mobj = re.search(r'data-tracking-category="course-page"\s*data-course-id="(.*?)"|'
r'<input\s*type="hidden"\s*id="currentCourseId"\s*value="(.*?)"|'
r'<link rel="alternate"\s*href=".*\/course\/([^\"]*)',
webpage)
self.to_screen('LyndaPlaylistIE 2.1')
if mobj:
self.to_screen('LyndaPlaylistIE 3.1')
courseId = mobj.group(1) if mobj.group(1) is not None else mobj.group(2)
if not courseId:
courseId = mobj.group(3)
self.to_screen('LyndaPlaylistIE 3')
url = 'http://www.lynda.com/ajax/player?courseId=%s&type=course' % courseId
data = self._download_json(url, url)
self.to_screen('LyndaPlaylistIE 4')
entries = []
self.to_screen('LyndaPlaylistIE 5')
for chapter in data['Chapters']:
for video in chapter['Videos']:
entries.append({'id': str(video['ID']), 'title': video['Title'],
'url': 'http://www.lynda.com' + video['CourseURLs']['www.lynda.com'].replace('2.html', '2/' + str(video['ID']) + '-4.html'),
'duration': video['DurationInSeconds']})
self.to_screen('LyndaPlaylistIE 6')
return self.playlist_result(entries, courseId, data['Title'])
else:
return None
except Exception as e:
self.to_screen(e.message)
return None | 37.551471 | 148 | 0.547288 |
086b6c60437a280bbc69784635d525aa6cfa435f | 1,375 | py | Python | Exercises/parse_ranges/test_parse_ranges.py | t-reppert/PythonMorsels | c681b7dd0d91b4c97bfd45d88c629542f7ce1b96 | [
"MIT"
] | null | null | null | Exercises/parse_ranges/test_parse_ranges.py | t-reppert/PythonMorsels | c681b7dd0d91b4c97bfd45d88c629542f7ce1b96 | [
"MIT"
] | null | null | null | Exercises/parse_ranges/test_parse_ranges.py | t-reppert/PythonMorsels | c681b7dd0d91b4c97bfd45d88c629542f7ce1b96 | [
"MIT"
] | null | null | null | import unittest
from parse_ranges import parse_ranges
class ParseRangesTests(unittest.TestCase):
"""Tests for parse_ranges."""
def test_three_ranges(self):
self.assertEqual(
list(parse_ranges('1-2,4-4,8-10')),
[1, 2, 4, 8, 9, 10],
)
def test_with_spaces(self):
self.assertEqual(
list(parse_ranges('0-0, 4-8, 20-21, 43-45')),
[0, 4, 5, 6, 7, 8, 20, 21, 43, 44, 45],
)
#@unittest.expectedFailure
def test_return_iterator(self):
numbers = parse_ranges('0-0, 4-8, 20-21, 43-45')
self.assertEqual(next(numbers), 0)
self.assertEqual(list(numbers), [4, 5, 6, 7, 8, 20, 21, 43, 44, 45])
self.assertEqual(list(numbers), [])
numbers = parse_ranges('100-1000000000000')
self.assertEqual(next(numbers), 100)
#@unittest.expectedFailure
def test_with_individual_numbers(self):
self.assertEqual(
list(parse_ranges('0,4-8,20,43-45')),
[0, 4, 5, 6, 7, 8, 20, 43, 44, 45],
)
#@unittest.expectedFailure
def test_ignore_arrows(self):
self.assertEqual(
list(parse_ranges('0, 4-8, 20->exit, 43-45')),
[0, 4, 5, 6, 7, 8, 20, 43, 44, 45],
)
if __name__ == "__main__":
unittest.main(verbosity=2) | 29.255319 | 77 | 0.547636 |
92a6e42ab5925af7087bc8bf95d2f294218e696f | 552 | py | Python | core/advertisements/migrations/0004_auto_20210111_1001.py | dotHashemi/qeshm-employment | 3f7bd2b50b8cae0df74c1abbb5befe1f46546dc9 | [
"MIT"
] | 2 | 2021-01-12T07:22:14.000Z | 2021-01-12T15:47:00.000Z | core/advertisements/migrations/0004_auto_20210111_1001.py | dotHashemi/qeshm-employment | 3f7bd2b50b8cae0df74c1abbb5befe1f46546dc9 | [
"MIT"
] | null | null | null | core/advertisements/migrations/0004_auto_20210111_1001.py | dotHashemi/qeshm-employment | 3f7bd2b50b8cae0df74c1abbb5befe1f46546dc9 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-01-11 06:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('advertisements', '0003_auto_20210111_0944'),
]
operations = [
migrations.RenameField(
model_name='advertisement',
old_name='status',
new_name='isActive',
),
migrations.AddField(
model_name='advertisement',
name='isVerified',
field=models.BooleanField(default=False),
),
]
| 23 | 54 | 0.586957 |
f61d1c14282ea0326ad9372f172f3f44e94a3e5a | 3,812 | py | Python | migrations/versions/22cab9c1bbf4_.py | iq9/say-so-backend-flask | 1e463afd29bb312466d8c0e24d61152782223acf | [
"MIT"
] | 1 | 2021-01-03T16:13:35.000Z | 2021-01-03T16:13:35.000Z | migrations/versions/22cab9c1bbf4_.py | rbrooks/say-so-backend-flask | 1e463afd29bb312466d8c0e24d61152782223acf | [
"MIT"
] | 1 | 2020-05-28T06:22:31.000Z | 2020-05-28T06:22:31.000Z | migrations/versions/22cab9c1bbf4_.py | iq9/say-so-backend-flask | 1e463afd29bb312466d8c0e24d61152782223acf | [
"MIT"
] | null | null | null | """empty message
Revision ID: 22cab9c1bbf4
Revises:
Create Date: 2020-04-20 22:43:15.612577
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '22cab9c1bbf4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tags',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('tagname', sa.String(length=100), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=80), nullable=False),
sa.Column('email', sa.String(length=100), nullable=False),
sa.Column('password', sa.Binary(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('bio', sa.String(length=300), nullable=True),
sa.Column('image', sa.String(length=120), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('userprofile',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('article',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('slug', sa.Text(), nullable=True),
sa.Column('title', sa.String(length=100), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('createdAt', sa.DateTime(), nullable=False),
sa.Column('updatedAt', sa.DateTime(), nullable=False),
sa.Column('author_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['author_id'], ['userprofile.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('slug')
)
op.create_table('followers_assoc',
sa.Column('follower', sa.Integer(), nullable=True),
sa.Column('followed_by', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['followed_by'], ['userprofile.user_id'], ),
sa.ForeignKeyConstraint(['follower'], ['userprofile.user_id'], )
)
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('createdAt', sa.DateTime(), nullable=False),
sa.Column('updatedAt', sa.DateTime(), nullable=False),
sa.Column('author_id', sa.Integer(), nullable=False),
sa.Column('article_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['article_id'], ['article.id'], ),
sa.ForeignKeyConstraint(['author_id'], ['userprofile.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('favoritor_assoc',
sa.Column('favoriter', sa.Integer(), nullable=True),
sa.Column('favorited_article', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['favorited_article'], ['article.id'], ),
sa.ForeignKeyConstraint(['favoriter'], ['userprofile.id'], )
)
op.create_table('tag_assoc',
sa.Column('tag', sa.Integer(), nullable=True),
sa.Column('article', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['article'], ['article.id'], ),
sa.ForeignKeyConstraint(['tag'], ['tags.id'], )
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tag_assoc')
op.drop_table('favoritor_assoc')
op.drop_table('comment')
op.drop_table('followers_assoc')
op.drop_table('article')
op.drop_table('userprofile')
op.drop_table('users')
op.drop_table('tags')
# ### end Alembic commands ###
| 37.742574 | 72 | 0.663431 |
5849c4008b5af9ea09744ed7a1ae13ad3c6c3bcd | 2,512 | py | Python | examples/gen-enums.py | erdmann/pyvips | 0da5d23247a36929c1e0b285331f17715225ecca | [
"MIT"
] | null | null | null | examples/gen-enums.py | erdmann/pyvips | 0da5d23247a36929c1e0b285331f17715225ecca | [
"MIT"
] | null | null | null | examples/gen-enums.py | erdmann/pyvips | 0da5d23247a36929c1e0b285331f17715225ecca | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys
import xml.etree.ElementTree as ET
from pyvips import ffi, values_for_enum, vips_lib, \
type_map, type_name, type_from_name
# This file generates enums.py -- the set of classes giving the permissible
# values for the pyvips enums. Run with something like:
#
# ./gen-enums.py ~/GIT/libvips/libvips/Vips-8.0.gir > enums.py
# mv enums.py ../pyvips
# The GIR file
root = ET.parse(sys.argv[1]).getroot()
namespace = {
"goi": "http://www.gtk.org/introspection/core/1.0"
}
# find all the enumerations and make a dict for them
xml_enums = {}
for node in root.findall("goi:namespace/goi:enumeration", namespace):
xml_enums[node.get('name')] = node
def remove_prefix(enum_str):
prefix = 'Vips'
if enum_str.startswith(prefix):
return enum_str[len(prefix):]
return enum_str
def generate_enums():
# otherwise we're missing some enums
vips_lib.vips_token_get_type()
vips_lib.vips_saveable_get_type()
vips_lib.vips_image_type_get_type()
all_enums = []
def add_enum(gtype, a, b):
nickname = type_name(gtype)
all_enums.append(nickname)
type_map(gtype, add_enum)
return ffi.NULL
type_map(type_from_name('GEnum'), add_enum)
for name in all_enums:
gtype = type_from_name(name)
python_name = remove_prefix(name)
if python_name not in xml_enums:
continue
node = xml_enums[python_name]
enum_doc = node.find("goi:doc", namespace)
print('')
print('')
print(f'class {python_name}(object):')
print(f' """{python_name}.')
if enum_doc is not None:
print('')
print(f'{enum_doc.text}')
print('')
print('Attributes:')
print('')
for value in values_for_enum(gtype):
python_name = value.replace('-', '_')
member = node.find(f"goi:member[@name='{python_name}']", namespace)
member_doc = member.find("goi:doc", namespace)
if member_doc is not None:
text = member_doc.text
print(f' {python_name.upper()} (str): {text}')
print('')
print(' """')
print('')
for value in values_for_enum(gtype):
python_name = value.replace('-', '_').upper()
print(f' {python_name} = \'{value}\'')
if __name__ == "__main__":
print('# libvips enums -- this file is generated automatically')
generate_enums()
| 27.304348 | 79 | 0.608678 |
e501f14eb7656eabf3777d5e3c944c315ea0643b | 54 | py | Python | litex/soc/cores/cpu/neorv32/__init__.py | motec-research/litex | 0bd2abf68d3047e9bba09604b99b9019b02084fd | [
"ADSL"
] | 1 | 2021-12-25T13:49:55.000Z | 2021-12-25T13:49:55.000Z | litex/soc/cores/cpu/neorv32/__init__.py | motec-research/litex | 0bd2abf68d3047e9bba09604b99b9019b02084fd | [
"ADSL"
] | null | null | null | litex/soc/cores/cpu/neorv32/__init__.py | motec-research/litex | 0bd2abf68d3047e9bba09604b99b9019b02084fd | [
"ADSL"
] | 1 | 2021-12-25T13:49:57.000Z | 2021-12-25T13:49:57.000Z | from litex.soc.cores.cpu.neorv32.core import NEORV32
| 27 | 53 | 0.814815 |
61162b027610267f22226a090154efc517a1648f | 3,480 | py | Python | script/hassfest/zeroconf.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 6 | 2017-08-02T19:26:39.000Z | 2020-03-14T22:47:41.000Z | script/hassfest/zeroconf.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 58 | 2020-08-03T07:33:02.000Z | 2022-03-31T06:02:05.000Z | script/hassfest/zeroconf.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 14 | 2018-08-19T16:28:26.000Z | 2021-09-02T18:26:53.000Z | """Generate zeroconf file."""
from collections import OrderedDict, defaultdict
import json
from typing import Dict
from .model import Config, Integration
BASE = """
\"\"\"Automatically generated by hassfest.
To update, run python3 -m script.hassfest
\"\"\"
# fmt: off
ZEROCONF = {}
HOMEKIT = {}
""".strip()
def generate_and_validate(integrations: Dict[str, Integration]):
"""Validate and generate zeroconf data."""
service_type_dict = defaultdict(list)
homekit_dict = {}
for domain in sorted(integrations):
integration = integrations[domain]
if not integration.manifest:
continue
service_types = integration.manifest.get("zeroconf", [])
homekit = integration.manifest.get("homekit", {})
homekit_models = homekit.get("models", [])
if not (service_types or homekit_models):
continue
for entry in service_types:
data = {"domain": domain}
if isinstance(entry, dict):
typ = entry["type"]
entry_without_type = entry.copy()
del entry_without_type["type"]
data.update(entry_without_type)
else:
typ = entry
service_type_dict[typ].append(data)
for model in homekit_models:
if model in homekit_dict:
integration.add_error(
"zeroconf",
f"Integrations {domain} and {homekit_dict[model]} "
"have overlapping HomeKit models",
)
break
homekit_dict[model] = domain
# HomeKit models are matched on starting string, make sure none overlap.
warned = set()
for key in homekit_dict:
if key in warned:
continue
# n^2 yoooo
for key_2 in homekit_dict:
if key == key_2 or key_2 in warned:
continue
if key.startswith(key_2) or key_2.startswith(key):
integration.add_error(
"zeroconf",
f"Integrations {homekit_dict[key]} and {homekit_dict[key_2]} "
"have overlapping HomeKit models",
)
warned.add(key)
warned.add(key_2)
break
zeroconf = OrderedDict(
(key, service_type_dict[key]) for key in sorted(service_type_dict)
)
homekit = OrderedDict((key, homekit_dict[key]) for key in sorted(homekit_dict))
return BASE.format(json.dumps(zeroconf, indent=4), json.dumps(homekit, indent=4))
def validate(integrations: Dict[str, Integration], config: Config):
"""Validate zeroconf file."""
zeroconf_path = config.root / "homeassistant/generated/zeroconf.py"
config.cache["zeroconf"] = content = generate_and_validate(integrations)
if config.specific_integrations:
return
with open(str(zeroconf_path)) as fp:
current = fp.read().strip()
if current != content:
config.add_error(
"zeroconf",
"File zeroconf.py is not up to date. Run python3 -m script.hassfest",
fixable=True,
)
return
def generate(integrations: Dict[str, Integration], config: Config):
"""Generate zeroconf file."""
zeroconf_path = config.root / "homeassistant/generated/zeroconf.py"
with open(str(zeroconf_path), "w") as fp:
fp.write(f"{config.cache['zeroconf']}\n")
| 30 | 85 | 0.590805 |
9a90a905ee9b33352e3ef08635beef3424c9f448 | 2,504 | py | Python | azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/express_route_circuit_authorization.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure/mgmt/network/v2017_10_01/models/express_route_circuit_authorization.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 2 | 2016-09-30T21:40:24.000Z | 2017-11-10T18:16:18.000Z | azure/mgmt/network/v2017_10_01/models/express_route_circuit_authorization.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ExpressRouteCircuitAuthorization(SubResource):
"""Authorization in an ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param authorization_key: The authorization key.
:type authorization_key: str
:param authorization_use_status: AuthorizationUseStatus. Possible values
are: 'Available' and 'InUse'. Possible values include: 'Available',
'InUse'
:type authorization_use_status: str or
~azure.mgmt.network.v2017_10_01.models.AuthorizationUseStatus
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
"""
_validation = {
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'authorization_use_status': {'key': 'properties.authorizationUseStatus', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, authorization_key=None, authorization_use_status=None, provisioning_state=None, name=None):
super(ExpressRouteCircuitAuthorization, self).__init__(id=id)
self.authorization_key = authorization_key
self.authorization_use_status = authorization_use_status
self.provisioning_state = provisioning_state
self.name = name
self.etag = None
| 41.04918 | 123 | 0.654553 |
5fd20929bef778417a3ddb57c20432229637e02e | 4,565 | py | Python | PythonLinearNonlinearControl/common/utils.py | Geonhee-LEE/PythonLinearNonlinearControl | 2a2467098108641483778c09ceb7906cb49f6cee | [
"MIT"
] | 425 | 2020-03-31T07:17:48.000Z | 2022-03-30T09:44:41.000Z | PythonLinearNonlinearControl/common/utils.py | Geonhee-LEE/PythonLinearNonlinearControl | 2a2467098108641483778c09ceb7906cb49f6cee | [
"MIT"
] | 6 | 2020-06-22T23:50:41.000Z | 2021-11-19T08:48:35.000Z | PythonLinearNonlinearControl/common/utils.py | Geonhee-LEE/PythonLinearNonlinearControl | 2a2467098108641483778c09ceb7906cb49f6cee | [
"MIT"
] | 88 | 2020-04-03T12:58:54.000Z | 2022-03-28T07:01:22.000Z | import numpy as np
def rotate_pos(pos, angle):
""" Transformation the coordinate in the angle
Args:
pos (numpy.ndarray): local state, shape(data_size, 2)
angle (float): rotate angle, in radians
Returns:
rotated_pos (numpy.ndarray): shape(data_size, 2)
"""
rot_mat = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
return np.dot(pos, rot_mat.T)
def fit_angle_in_range(angles, min_angle=-np.pi, max_angle=np.pi):
""" Check angle range and correct the range
Args:
angle (numpy.ndarray): in radians
min_angle (float): maximum of range in radians, default -pi
max_angle (float): minimum of range in radians, default pi
Returns:
fitted_angle (numpy.ndarray): range angle in radians
"""
if max_angle < min_angle:
raise ValueError("max angle must be greater than min angle")
if (max_angle - min_angle) < 2.0 * np.pi:
raise ValueError("difference between max_angle \
and min_angle must be greater than 2.0 * pi")
output = np.array(angles)
output_shape = output.shape
output = output.flatten()
output -= min_angle
output %= 2 * np.pi
output += 2 * np.pi
output %= 2 * np.pi
output += min_angle
output = np.minimum(max_angle, np.maximum(min_angle, output))
return output.reshape(output_shape)
def update_state_with_Runge_Kutta(state, u, functions, dt=0.01, batch=True):
""" update state in Runge Kutta methods
Args:
state (array-like): state of system
u (array-like): input of system
functions (list): update function of each state,
each function will be called like func(state, u)
We expect that this function returns differential of each state
dt (float): float in seconds
batch (bool): state and u is given by batch or not
Returns:
next_state (np.array): next state of system
Notes:
sample of function is as follows:
def func_x(self, x_1, x_2, u):
x_dot = (1. - x_1**2 - x_2**2) * x_2 - x_1 + u
return x_dot
Note that the function return x_dot.
"""
if not batch:
state_size = len(state)
assert state_size == len(functions), \
"Invalid functions length, You need to give the state size functions"
k0 = np.zeros(state_size)
k1 = np.zeros(state_size)
k2 = np.zeros(state_size)
k3 = np.zeros(state_size)
for i, func in enumerate(functions):
k0[i] = dt * func(state, u)
for i, func in enumerate(functions):
k1[i] = dt * func(state + k0 / 2., u)
for i, func in enumerate(functions):
k2[i] = dt * func(state + k1 / 2., u)
for i, func in enumerate(functions):
k3[i] = dt * func(state + k2, u)
return state + (k0 + 2. * k1 + 2. * k2 + k3) / 6.
else:
batch_size, state_size = state.shape
assert state_size == len(functions), \
"Invalid functions length, You need to give the state size functions"
k0 = np.zeros((batch_size, state_size))
k1 = np.zeros((batch_size, state_size))
k2 = np.zeros((batch_size, state_size))
k3 = np.zeros((batch_size, state_size))
for i, func in enumerate(functions):
k0[:, i] = dt * func(state, u)
for i, func in enumerate(functions):
k1[:, i] = dt * func(state + k0 / 2., u)
for i, func in enumerate(functions):
k2[:, i] = dt * func(state + k1 / 2., u)
for i, func in enumerate(functions):
k3[:, i] = dt * func(state + k2, u)
return state + (k0 + 2. * k1 + 2. * k2 + k3) / 6.
def line_search(grad, sol, compute_eval_val,
init_alpha=0.001, max_iter=100, update_ratio=1.):
""" line search
Args:
grad (numpy.ndarray): gradient
sol (numpy.ndarray): sol
compute_eval_val (numpy.ndarray): function to compute evaluation value
Returns:
alpha (float): result of line search
"""
assert grad.shape == sol.shape
base_val = np.inf
alpha = init_alpha
original_sol = sol.copy()
for _ in range(max_iter):
updated_sol = original_sol - alpha * grad
eval_val = compute_eval_val(updated_sol)
if eval_val < base_val:
alpha += init_alpha * update_ratio
base_val = eval_val
else:
break
return alpha
| 30.844595 | 81 | 0.585761 |
2fc34b0fd412fcb1730b03dd307fd3a4ec07bee2 | 9,211 | py | Python | raiden/waiting.py | karlb/raiden | 61ade0559add1a97588ae6bdedd5e0b99ed41de3 | [
"MIT"
] | 1 | 2019-10-08T11:56:50.000Z | 2019-10-08T11:56:50.000Z | raiden/waiting.py | karlb/raiden | 61ade0559add1a97588ae6bdedd5e0b99ed41de3 | [
"MIT"
] | null | null | null | raiden/waiting.py | karlb/raiden | 61ade0559add1a97588ae6bdedd5e0b99ed41de3 | [
"MIT"
] | null | null | null | import gevent
import structlog
from raiden.transfer import channel, views
from raiden.transfer.events import EventPaymentReceivedSuccess
from raiden.transfer.state import (
CHANNEL_AFTER_CLOSE_STATES,
CHANNEL_STATE_SETTLED,
NODE_NETWORK_REACHABLE,
)
from raiden.utils import typing
# type alias to avoid both circular dependencies and flake8 errors
RaidenService = 'RaidenService'
log = structlog.get_logger(__name__) # pylint: disable=invalid-name
def wait_for_block(
raiden: RaidenService,
block_number: typing.BlockNumber,
retry_timeout: float,
) -> None:
current_block_number = views.block_number(
views.state_from_raiden(raiden),
)
while current_block_number < block_number:
gevent.sleep(retry_timeout)
current_block_number = views.block_number(
views.state_from_raiden(raiden),
)
def wait_for_newchannel(
raiden: RaidenService,
payment_network_id: typing.PaymentNetworkID,
token_address: typing.TokenAddress,
partner_address: typing.Address,
retry_timeout: float,
) -> None:
"""Wait until the channel with partner_address is registered.
Note:
This does not time out, use gevent.Timeout.
"""
channel_state = views.get_channelstate_for(
views.state_from_raiden(raiden),
payment_network_id,
token_address,
partner_address,
)
while channel_state is None:
gevent.sleep(retry_timeout)
channel_state = views.get_channelstate_for(
views.state_from_raiden(raiden),
payment_network_id,
token_address,
partner_address,
)
def wait_for_participant_newbalance(
raiden: RaidenService,
payment_network_id: typing.PaymentNetworkID,
token_address: typing.TokenAddress,
partner_address: typing.Address,
target_address: typing.Address,
target_balance: typing.TokenAmount,
retry_timeout: float,
) -> None:
"""Wait until a given channels balance exceeds the target balance.
Note:
This does not time out, use gevent.Timeout.
"""
if target_address == raiden.address:
balance = lambda channel_state: channel_state.our_state.contract_balance
elif target_address == partner_address:
balance = lambda channel_state: channel_state.partner_state.contract_balance
else:
raise ValueError('target_address must be one of the channel participants')
channel_state = views.get_channelstate_for(
views.state_from_raiden(raiden),
payment_network_id,
token_address,
partner_address,
)
while balance(channel_state) < target_balance:
gevent.sleep(retry_timeout)
channel_state = views.get_channelstate_for(
views.state_from_raiden(raiden),
payment_network_id,
token_address,
partner_address,
)
def wait_for_payment_balance(
raiden: RaidenService,
payment_network_id: typing.PaymentNetworkID,
token_address: typing.TokenAddress,
partner_address: typing.Address,
target_address: typing.Address,
target_balance: typing.TokenAmount,
retry_timeout: float,
) -> None:
"""Wait until a given channels balance exceeds the target balance.
Note:
This does not time out, use gevent.Timeout.
"""
def get_balance(end_state):
if end_state.balance_proof:
return end_state.balance_proof.transferred_amount
else:
return 0
if target_address == raiden.address:
balance = lambda channel_state: get_balance(channel_state.partner_state)
elif target_address == partner_address:
balance = lambda channel_state: get_balance(channel_state.our_state)
else:
raise ValueError('target_address must be one of the channel participants')
channel_state = views.get_channelstate_for(
views.state_from_raiden(raiden),
payment_network_id,
token_address,
partner_address,
)
while balance(channel_state) < target_balance:
log.critical('wait', b=balance(channel_state), t=target_balance)
gevent.sleep(retry_timeout)
channel_state = views.get_channelstate_for(
views.state_from_raiden(raiden),
payment_network_id,
token_address,
partner_address,
)
def wait_for_close(
raiden: RaidenService,
payment_network_id: typing.PaymentNetworkID,
token_address: typing.Address,
channel_ids: typing.List[typing.ChannelID],
retry_timeout: float,
) -> None:
"""Wait until all channels are closed.
Note:
This does not time out, use gevent.Timeout.
"""
channel_ids = list(channel_ids)
while channel_ids:
last_id = channel_ids[-1]
channel_state = views.get_channelstate_by_id(
views.state_from_raiden(raiden),
payment_network_id,
token_address,
last_id,
)
channel_is_settled = (
channel_state is None or
channel.get_status(channel_state) in CHANNEL_AFTER_CLOSE_STATES
)
if channel_is_settled:
channel_ids.pop()
else:
gevent.sleep(retry_timeout)
def wait_for_payment_network(
raiden: RaidenService,
payment_network_id: typing.PaymentNetworkID,
token_address: typing.TokenAddress,
retry_timeout: float,
) -> None:
token_network = views.get_token_network_by_token_address(
views.state_from_raiden(raiden),
payment_network_id,
token_address,
)
while token_network is None:
gevent.sleep(retry_timeout)
token_network = views.get_token_network_by_token_address(
views.state_from_raiden(raiden),
payment_network_id,
token_address,
)
def wait_for_settle(
raiden: RaidenService,
payment_network_id: typing.PaymentNetworkID,
token_address: typing.TokenAddress,
channel_ids: typing.List[typing.ChannelID],
retry_timeout: float,
) -> None:
"""Wait until all channels are settled.
Note:
This does not time out, use gevent.Timeout.
"""
if not isinstance(channel_ids, list):
raise ValueError('channel_ids must be a list')
channel_ids = list(channel_ids)
while channel_ids:
last_id = channel_ids[-1]
channel_state = views.get_channelstate_by_id(
views.state_from_raiden(raiden),
payment_network_id,
token_address,
last_id,
)
channel_is_settled = (
channel_state is None or
channel.get_status(channel_state) == CHANNEL_STATE_SETTLED
)
if channel_is_settled:
channel_ids.pop()
else:
gevent.sleep(retry_timeout)
def wait_for_settle_all_channels(
raiden: RaidenService,
retry_timeout: float,
) -> None:
"""Wait until all channels are settled.
Note:
This does not time out, use gevent.Timeout.
"""
chain_state = views.state_from_raiden(raiden)
id_paymentnetworkstate = chain_state.identifiers_to_paymentnetworks.items()
for payment_network_id, payment_network_state in id_paymentnetworkstate:
id_tokennetworkstate = payment_network_state.tokenidentifiers_to_tokennetworks.items()
for token_network_id, token_network_state in id_tokennetworkstate:
channel_ids = token_network_state.channelidentifiers_to_channels.keys()
wait_for_settle(
raiden,
payment_network_id,
token_network_id,
channel_ids,
retry_timeout,
)
def wait_for_healthy(
raiden: RaidenService,
node_address: typing.Address,
retry_timeout: float,
) -> None:
"""Wait until `node_address` becomes healthy.
Note:
This does not time out, use gevent.Timeout.
"""
network_statuses = views.get_networkstatuses(
views.state_from_raiden(raiden),
)
while network_statuses.get(node_address) != NODE_NETWORK_REACHABLE:
gevent.sleep(retry_timeout)
network_statuses = views.get_networkstatuses(
views.state_from_raiden(raiden),
)
def wait_for_transfer_success(
raiden: RaidenService,
payment_identifier: typing.PaymentID,
amount: typing.PaymentAmount,
retry_timeout: float,
) -> None:
"""Wait until a direct transfer with a specific identifier and amount
is seen in the WAL.
Note:
This does not time out, use gevent.Timeout.
"""
found = False
while not found:
state_events = raiden.wal.storage.get_events()
for event in state_events:
found = (
isinstance(event, EventPaymentReceivedSuccess) and
event.identifier == payment_identifier and
event.amount == amount
)
if found:
break
gevent.sleep(retry_timeout)
| 29.712903 | 94 | 0.658778 |
e76b8512c3380414cf09f374980f1d5d8a9d9efd | 20,099 | py | Python | tests/unit/test_sns.py | zonywhoop/localstack | 673e1a23374362c64606fb36c0746ee29cbf5553 | [
"Apache-2.0"
] | 2 | 2021-02-06T10:07:22.000Z | 2021-03-27T02:08:11.000Z | tests/unit/test_sns.py | zonywhoop/localstack | 673e1a23374362c64606fb36c0746ee29cbf5553 | [
"Apache-2.0"
] | 1 | 2021-03-01T13:55:42.000Z | 2021-03-01T13:55:42.000Z | tests/unit/test_sns.py | zonywhoop/localstack | 673e1a23374362c64606fb36c0746ee29cbf5553 | [
"Apache-2.0"
] | 1 | 2021-02-08T03:29:16.000Z | 2021-02-08T03:29:16.000Z | import json
import uuid
import base64
import unittest
import dateutil.parser
import re
from localstack.services.sns import sns_listener
class SNSTests(unittest.TestCase):
def setUp(self):
self.subscriber = {
'Protocol': 'sqs',
'RawMessageDelivery': 'false',
'TopicArn': 'arn',
}
# Reset subscriptions
sns_listener.SNS_SUBSCRIPTIONS = {}
def test_unsubscribe_without_arn_should_error(self):
sns = sns_listener.ProxyListenerSNS()
error = sns.forward_request('POST', '/', 'Action=Unsubscribe', '')
self.assertTrue(error is not None)
self.assertEqual(error.status_code, 400)
def test_unsubscribe_should_remove_listener(self):
sub_arn = 'arn:aws:sns:us-east-1:000000000000:test-topic:45e61c7f-dca5-4fcd-be2b-4e1b0d6eef72'
topic_arn = 'arn:aws:sns:us-east-1:000000000000:test-topic'
sns_listener.do_subscribe(
topic_arn,
'arn:aws:sqs:us-east-1:000000000000:test-queue',
'sqs',
sub_arn,
{}
)
self.assertTrue(sns_listener.get_subscription_by_arn(sub_arn))
sns_listener.do_unsubscribe(sub_arn)
self.assertFalse(sns_listener.get_subscription_by_arn(sub_arn))
def test_get_subscribe_attributes(self):
req_data = {
'Attribute.entry.1.key': ['RawMessageDelivery'],
'Attribute.entry.1.value': ['true'],
'Attribute.entry.2.key': ['FilterPolicy'],
'Attribute.entry.2.value': ['{"type": ["foo", "bar"]}']
}
attributes = sns_listener.get_subscribe_attributes(req_data)
self.assertDictEqual(
attributes,
{
'RawMessageDelivery': 'true',
'FilterPolicy': '{"type": ["foo", "bar"]}'
}
)
def test_create_sns_message_body_raw_message_delivery(self):
self.subscriber['RawMessageDelivery'] = 'true'
action = {
'Message': ['msg']
}
result = sns_listener.create_sns_message_body(self.subscriber, action)
self.assertEqual(result, 'msg')
def test_create_sns_message_body(self):
action = {
'Message': ['msg']
}
result_str = sns_listener.create_sns_message_body(self.subscriber, action, str(uuid.uuid4()))
result = json.loads(result_str)
try:
uuid.UUID(result.pop('MessageId'))
except KeyError:
assert False, 'MessageId missing in SNS response message body'
except ValueError:
assert False, 'SNS response MessageId not a valid UUID'
try:
dateutil.parser.parse(result.pop('Timestamp'))
except KeyError:
assert False, 'Timestamp missing in SNS response message body'
except ValueError:
assert False, 'SNS response Timestamp not a valid ISO 8601 date'
self.assertEqual(result, {
'Message': 'msg',
'Signature': 'EXAMPLEpH+..',
'SignatureVersion': '1',
'SigningCertURL':
'https://sns.us-east-1.amazonaws.com/SimpleNotificationService-0000000000000000000000.pem',
'TopicArn': 'arn',
'Type': 'Notification'
})
# Now add a subject
action = {
'Message': ['msg'],
'Subject': ['subject'],
'MessageAttributes.entry.1.Name': ['attr1'],
'MessageAttributes.entry.1.Value.DataType': ['String'],
'MessageAttributes.entry.1.Value.StringValue': ['value1'],
'MessageAttributes.entry.1.Value.BinaryValue': ['value1'],
'MessageAttributes.entry.2.Name': ['attr2'],
'MessageAttributes.entry.2.Value.DataType': ['String'],
'MessageAttributes.entry.2.Value.StringValue': ['value2'],
'MessageAttributes.entry.2.Value.BinaryValue': ['value2'],
}
result_str = sns_listener.create_sns_message_body(self.subscriber, action)
result = json.loads(result_str)
del result['MessageId']
del result['Timestamp']
msg = {
'Message': 'msg',
'Subject': 'subject',
'Signature': 'EXAMPLEpH+..',
'SignatureVersion': '1',
'SigningCertURL':
'https://sns.us-east-1.amazonaws.com/SimpleNotificationService-0000000000000000000000.pem',
'TopicArn': 'arn',
'Type': 'Notification',
'MessageAttributes': {
'attr1': {
'Type': 'String',
'Value': 'value1',
}, 'attr2': {
'Type': 'String',
'Value': 'value2',
}
}
}
expected = json.dumps(msg)
self.assertEqual(result, json.loads(expected))
def test_create_sns_message_body_json_structure(self):
action = {
'Message': ['{"default": {"message": "abc"}}'],
'MessageStructure': ['json']
}
result_str = sns_listener.create_sns_message_body(self.subscriber, action)
result = json.loads(result_str)
self.assertEqual(result['Message'], {'message': 'abc'})
def test_create_sns_message_body_json_structure_without_default_key(self):
action = {
'Message': ['{"message": "abc"}'],
'MessageStructure': ['json']
}
with self.assertRaises(Exception) as exc:
sns_listener.create_sns_message_body(self.subscriber, action)
self.assertEqual(str(exc.exception), "Unable to find 'default' key in message payload")
def test_create_sns_message_body_json_structure_sqs_protocol(self):
action = {
'Message': ['{"default": "default message", "sqs": "sqs message"}'],
'MessageStructure': ['json']
}
result_str = sns_listener.create_sns_message_body(self.subscriber, action)
result = json.loads(result_str)
self.assertEqual(result['Message'], 'sqs message')
def test_create_sqs_message_attributes(self):
self.subscriber['RawMessageDelivery'] = 'true'
action = {
'Message': ['msg'],
'Subject': ['subject'],
'MessageAttributes.entry.1.Name': ['attr1'],
'MessageAttributes.entry.1.Value.DataType': ['String'],
'MessageAttributes.entry.1.Value.StringValue': ['value1'],
'MessageAttributes.entry.2.Name': ['attr2'],
'MessageAttributes.entry.2.Value.DataType': ['Binary'],
# SNS gets binary data as base64 encoded string, but it should pass raw bytes further to SQS
'MessageAttributes.entry.2.Value.BinaryValue': [base64.b64encode('value2'.encode('utf-8'))],
'MessageAttributes.entry.3.Name': ['attr3'],
'MessageAttributes.entry.3.Value.DataType': ['Number'],
'MessageAttributes.entry.3.Value.StringValue': ['3'],
}
attributes = sns_listener.get_message_attributes(action)
result = sns_listener.create_sqs_message_attributes(self.subscriber, attributes)
self.assertEqual(result['attr1']['DataType'], 'String')
self.assertEqual(result['attr1']['StringValue'], 'value1')
self.assertEqual(result['attr2']['DataType'], 'Binary')
self.assertEqual(result['attr2']['BinaryValue'], 'value2'.encode('utf-8'))
self.assertEqual(result['attr3']['DataType'], 'Number')
self.assertEqual(result['attr3']['StringValue'], '3')
def test_create_sns_message_timestamp_millis(self):
action = {
'Message': ['msg']
}
result_str = sns_listener.create_sns_message_body(self.subscriber, action)
result = json.loads(result_str)
timestamp = result.pop('Timestamp')
end = timestamp[-5:]
matcher = re.compile(r'\.[0-9]{3}Z')
match = matcher.match(end)
self.assertTrue(match is not None)
def test_only_one_subscription_per_topic_per_endpoint(self):
sub_arn = 'arn:aws:sns:us-east-1:000000000000:test-topic:45e61c7f-dca5-4fcd-be2b-4e1b0d6eef72'
topic_arn = 'arn:aws:sns:us-east-1:000000000000:test-topic'
for i in [1, 2]:
sns_listener.do_subscribe(
topic_arn,
'arn:aws:sqs:us-east-1:000000000000:test-queue-1',
'sqs',
sub_arn,
{}
)
self.assertEqual(len(sns_listener.SNS_SUBSCRIPTIONS[topic_arn]), 1)
def test_filter_policy(self):
test_data = [
(
'no filter with no attributes',
{},
{},
True
),
(
'no filter with attributes',
{},
{'filter': {'Type': 'String', 'Value': 'type1'}},
True
),
(
'exact string filter',
{'filter': 'type1'},
{'filter': {'Type': 'String', 'Value': 'type1'}},
True
),
(
'exact string filter on an array',
{'filter': 'soccer'},
{'filter': {'Type': 'String.Array', 'Value': "['soccer', 'rugby', 'hockey']"}},
True
),
(
'exact string filter with no attributes',
{'filter': 'type1'},
{},
False
),
(
'exact string filter with no match',
{'filter': 'type1'},
{'filter': {'Type': 'String', 'Value': 'type2'}},
False
),
(
'or string filter with match',
{'filter': ['type1', 'type2']},
{'filter': {'Type': 'String', 'Value': 'type1'}},
True
),
(
'or string filter with other match',
{'filter': ['type1', 'type2']},
{'filter': {'Type': 'String', 'Value': 'type2'}},
True
),
(
'or string filter match with an array',
{'filter': ['soccer', 'basketball']},
{'filter': {'Type': 'String.Array', 'Value': "['soccer', 'rugby', 'hockey']"}},
True
),
(
'or string filter with no attributes',
{'filter': ['type1', 'type2']},
{},
False
),
(
'or string filter with no match',
{'filter': ['type1', 'type2']},
{'filter': {'Type': 'String', 'Value': 'type3'}},
False
),
(
'or string filter no match with an array',
{'filter': ['volleyball', 'basketball']},
{'filter': {'Type': 'String.Array', 'Value': "['soccer', 'rugby', 'hockey']"}},
False
),
(
'anything-but string filter with match',
{'filter': [{'anything-but': 'type1'}]},
{'filter': {'Type': 'String', 'Value': 'type1'}},
False
),
(
'anything-but string filter with no match',
{'filter': [{'anything-but': 'type1'}]},
{'filter': {'Type': 'String', 'Value': 'type2'}},
True
),
(
'prefix string filter with match',
{'filter': [{'prefix': 'typ'}]},
{'filter': {'Type': 'String', 'Value': 'type1'}},
True
),
(
'prefix string filter match with an array',
{'filter': [{'prefix': 'soc'}]},
{'filter': {'Type': 'String.Array', 'Value': "['soccer', 'rugby', 'hockey']"}},
True
),
(
'prefix string filter with no match',
{'filter': [{'prefix': 'test'}]},
{'filter': {'Type': 'String', 'Value': 'type2'}},
False
),
(
'numeric = filter with match',
{'filter': [{'numeric': ['=', 300]}]},
{'filter': {'Type': 'Number', 'Value': 300}},
True
),
(
'numeric = filter with no match',
{'filter': [{'numeric': ['=', 300]}]},
{'filter': {'Type': 'Number', 'Value': 301}},
False
),
(
'numeric > filter with match',
{'filter': [{'numeric': ['>', 300]}]},
{'filter': {'Type': 'Number', 'Value': 301}},
True
),
(
'numeric > filter with no match',
{'filter': [{'numeric': ['>', 300]}]},
{'filter': {'Type': 'Number', 'Value': 300}},
False
),
(
'numeric < filter with match',
{'filter': [{'numeric': ['<', 300]}]},
{'filter': {'Type': 'Number', 'Value': 299}},
True
),
(
'numeric < filter with no match',
{'filter': [{'numeric': ['<', 300]}]},
{'filter': {'Type': 'Number', 'Value': 300}},
False
),
(
'numeric >= filter with match',
{'filter': [{'numeric': ['>=', 300]}]},
{'filter': {'Type': 'Number', 'Value': 300}},
True
),
(
'numeric >= filter with no match',
{'filter': [{'numeric': ['>=', 300]}]},
{'filter': {'Type': 'Number', 'Value': 299}},
False
),
(
'numeric <= filter with match',
{'filter': [{'numeric': ['<=', 300]}]},
{'filter': {'Type': 'Number', 'Value': 300}},
True
),
(
'numeric <= filter with no match',
{'filter': [{'numeric': ['<=', 300]}]},
{'filter': {'Type': 'Number', 'Value': 301}},
False
),
(
'numeric filter with bad data',
{'filter': [{'numeric': ['=', 300]}]},
{'filter': {'Type': 'String', 'Value': 'test'}},
False
),
(
'logical OR with match',
{'filter': ['test1', 'test2', {'prefix': 'typ'}]},
{'filter': {'Type': 'String', 'Value': 'test2'}},
True
),
(
'logical OR with match',
{'filter': ['test1', 'test2', {'prefix': 'typ'}]},
{'filter': {'Type': 'String', 'Value': 'test1'}},
True
),
(
'logical OR with match on an array',
{'filter': ['test1', 'test2', {'prefix': 'typ'}]},
{'filter': {'Type': 'String.Array', 'Value': "['test1', 'other']"}},
True
),
(
'logical OR no match',
{'filter': ['test1', 'test2', {'prefix': 'typ'}]},
{'filter': {'Type': 'String', 'Value': 'test3'}},
False
),
(
'logical OR no match on an array',
{'filter': ['test1', 'test2', {'prefix': 'typ'}]},
{'filter': {'Type': 'String.Array', 'Value': "['anything', 'something']"}},
False
),
(
'logical AND with match',
{'filter': [{'numeric': ['=', 300]}], 'other': [{'prefix': 'typ'}]},
{'filter': {'Type': 'Number', 'Value': 300}, 'other': {'Type': 'String', 'Value': 'type1'}},
True
),
(
'logical AND missing first attribute',
{'filter': [{'numeric': ['=', 300]}], 'other': [{'prefix': 'typ'}]},
{'other': {'Type': 'String', 'Value': 'type1'}},
False
),
(
'logical AND missing second attribute',
{'filter': [{'numeric': ['=', 300]}], 'other': [{'prefix': 'typ'}]},
{'filter': {'Type': 'Number', 'Value': 300}},
False
),
(
'logical AND no match',
{'filter': [{'numeric': ['=', 300]}], 'other': [{'prefix': 'typ'}]},
{'filter': {'Type': 'Number', 'Value': 299}, 'other': {'Type': 'String', 'Value': 'type1'}},
False
),
(
'multiple numeric filters with first match',
{'filter': [{'numeric': ['=', 300]}, {'numeric': ['=', 500]}]},
{'filter': {'Type': 'Number', 'Value': 300}},
True
),
(
'multiple numeric filters with second match',
{'filter': [{'numeric': ['=', 300]}, {'numeric': ['=', 500]}]},
{'filter': {'Type': 'Number', 'Value': 500}},
True
),
(
'multiple prefix filters with first match',
{'filter': [{'prefix': 'typ'}, {'prefix': 'tes'}]},
{'filter': {'Type': 'String', 'Value': 'type1'}},
True
),
(
'multiple prefix filters with second match',
{'filter': [{'prefix': 'typ'}, {'prefix': 'tes'}]},
{'filter': {'Type': 'String', 'Value': 'test'}},
True
),
(
'multiple anything-but filters with second match',
{'filter': [{'anything-but': 'type1'}, {'anything-but': 'type2'}]},
{'filter': {'Type': 'String', 'Value': 'type2'}},
True
),
(
'multiple numeric conditions',
{'filter': [{'numeric': ['>', 0, '<=', 150]}]},
{'filter': {'Type': 'Number', 'Value': 122}},
True
),
(
'multiple numeric conditions',
{'filter': [{'numeric': ['>', 0, '<=', 150]}]},
{'filter': {'Type': 'Number', 'Value': 200}},
False
),
(
'multiple numeric conditions',
{'filter': [{'numeric': ['>', 0, '<=', 150]}]},
{'filter': {'Type': 'Number', 'Value': -1}},
False
),
(
'multiple conditions on an array',
{'filter': ['test1', 'test2', {'prefix': 'som'}]},
{'filter': {'Type': 'String.Array', 'Value': "['anything', 'something']"}},
True
)
]
for test in test_data:
test_name = test[0]
filter_policy = test[1]
attributes = test[2]
expected = test[3]
self.assertEqual(sns_listener.check_filter_policy(filter_policy, attributes), expected, test_name)
def test_is_raw_message_delivery(self):
valid_true_values = ['true', 'True', True]
for true_value in valid_true_values:
self.subscriber['RawMessageDelivery'] = true_value
self.assertTrue(sns_listener.is_raw_message_delivery(self.subscriber))
def test_is_not_raw_message_delivery(self):
invalid_values = ['false', 'False', False, 'somevalue', '']
for invalid_values in invalid_values:
self.subscriber['RawMessageDelivery'] = invalid_values
self.assertFalse(sns_listener.is_raw_message_delivery(self.subscriber))
del self.subscriber['RawMessageDelivery']
self.assertFalse(sns_listener.is_raw_message_delivery(self.subscriber))
| 38.651923 | 110 | 0.464202 |
48f5aaacde39ea55fa9aa8760ec455ce073a40db | 26,058 | py | Python | node/market.py | benemorius/OpenBazaar | 01be48a53a84099bbb8564d050f618caf82bc008 | [
"MIT"
] | 1 | 2019-06-18T07:38:33.000Z | 2019-06-18T07:38:33.000Z | node/market.py | benemorius/OpenBazaar | 01be48a53a84099bbb8564d050f618caf82bc008 | [
"MIT"
] | null | null | null | node/market.py | benemorius/OpenBazaar | 01be48a53a84099bbb8564d050f618caf82bc008 | [
"MIT"
] | null | null | null | """
This module manages all market related activities
"""
from StringIO import StringIO
import ast
from base64 import b64decode, b64encode
import logging
import traceback
from PIL import Image, ImageOps
import gnupg
import tornado
from zmq.eventloop import ioloop
import constants
from pybitcointools.main import privkey_to_pubkey
from data_uri import DataURI
from orders import Orders
from protocol import proto_page, query_page
from threading import Thread
from crypto_util import makePrivCryptor
import random
import json
import hashlib
ioloop.install()
class Market(object):
def __init__(self, transport, db):
"""This class manages the active market for the application
Attributes:
transport (CryptoTransportLayer): Transport layer for messaging between nodes.
dht (DHT): For storage across the network.
market_id (int): Indicates which local market we're working with.
"""
# Current
self.transport = transport
self.dht = transport.get_dht()
self.market_id = transport.get_market_id()
# self._myself = transport.get_myself()
self.peers = self.dht.getActivePeers()
self.db = db
self.orders = Orders(transport, self.market_id, db)
self.pages = {}
self.mypage = None
self.signature = None
self.nickname = ""
self.log = logging.getLogger(
'[%s] %s' % (self.market_id, self.__class__.__name__)
)
self.settings = self.transport.settings
self.gpg = gnupg.GPG()
# Register callbacks for incoming events
self.transport.add_callbacks([
('query_myorders', self.on_query_myorders),
('peer', self.on_peer),
('query_page', self.on_query_page),
('query_listings', self.on_query_listings),
('negotiate_pubkey', self.on_negotiate_pubkey),
('proto_response_pubkey', self.on_response_pubkey)
])
self.load_page()
# Periodically refresh buckets
loop = tornado.ioloop.IOLoop.instance()
refreshCB = tornado.ioloop.PeriodicCallback(self.dht._refreshNode,
constants.refreshTimeout,
io_loop=loop)
refreshCB.start()
def load_page(self):
nickname = self.settings['nickname'] \
if 'nickname' in self.settings else ""
# store_description = self.settings['storeDescription'] if 'storeDescription' self.settings else ""
self.nickname = nickname
def disable_welcome_screen(self):
self.db.updateEntries(
"settings",
{'market_id': self.transport.market_id},
{"welcome": "disable"}
)
def private_key(self):
return self.settings['secret']
def on_listing_results(self, results):
self.log.debug('Listings %s' % results)
@staticmethod
def process_contract_image(image):
uri = DataURI(image)
imageData = uri.data
# mime_type = uri.mimetype
charset = uri.charset
image = Image.open(StringIO(imageData))
croppedImage = ImageOps.fit(image, (200, 200), centering=(0.5, 0.5))
data = StringIO()
croppedImage.save(data, format='PNG')
new_uri = DataURI.make(
'image/png',
charset=charset,
base64=True,
data=data.getvalue())
data.close()
return new_uri
def get_contract_id(self):
return random.randint(0, 1000000)
def linebreak_signing_data(self, data):
json_string = json.dumps(data, indent=0)
seg_len = 52
out_text = "\n".join(
json_string[x:x + seg_len]
for x in range(0, len(json_string), seg_len)
)
return out_text
def generate_contract_key(self, signed_contract):
contract_hash = hashlib.sha1(str(signed_contract)).hexdigest()
hash_value = hashlib.new('ripemd160')
hash_value.update(contract_hash)
return hash_value.hexdigest()
def save_contract_to_db(self, contract_id, body, signed_body, key):
self.db.insertEntry(
"contracts",
{
"id": contract_id,
"market_id": self.transport.market_id,
"contract_body": json.dumps(body),
"signed_contract_body": str(signed_body),
"state": "seed",
"deleted": 0,
"key": key
}
)
def update_keywords_on_network(self, key, keywords):
for keyword in keywords:
keyword = keyword.upper()
hash_value = hashlib.new('ripemd160')
keyword_key = 'keyword-%s' % keyword
hash_value.update(keyword_key.encode('utf-8'))
keyword_key = hash_value.hexdigest()
self.transport.dht.iterativeStore(
self.transport,
keyword_key,
json.dumps({
'keyword_index_add': {
"guid": self.transport.guid,
"key": key
}
}),
self.transport.guid
)
def save_contract(self, msg):
contract_id = self.get_contract_id()
# Refresh market settings
self.settings = self.get_settings()
msg['Seller']['seller_PGP'] = self.gpg.export_keys(self.settings['PGPPubkeyFingerprint'])
msg['Seller']['seller_BTC_uncompressed_pubkey'] = self.settings['btc_pubkey']
msg['Seller']['seller_GUID'] = self.settings['guid']
msg['Seller']['seller_Bitmessage'] = self.settings['bitmessage']
# Process and crop thumbs for images
if 'item_images' in msg['Contract']:
if 'image1' in msg['Contract']['item_images']:
img = msg['Contract']['item_images']['image1']
new_uri = self.process_contract_image(img)
msg['Contract']['item_images'] = new_uri
# Line break the signing data
out_text = self.linebreak_signing_data(msg)
# Sign the contract
signed_data = self.gpg.sign(out_text,
passphrase='P@ssw0rd',
keyid=self.settings.get('PGPPubkeyFingerprint'))
# Save contract to DHT
contract_key = self.generate_contract_key(signed_data)
# Store contract in database
self.save_contract_to_db(contract_id, msg, signed_data, contract_key)
# Store listing
self.transport.dht.iterativeStore(
self.transport,
contract_key,
str(signed_data),
self.transport.guid
)
self.update_listings_index()
# If keywords are present
keywords = msg['Contract']['item_keywords']
self.update_keywords_on_network(contract_key, keywords)
def shipping_address(self):
settings = self.get_settings()
shipping_address = {"recipient_name": settings.get('recipient_name'),
"street1": settings.get('street1'),
"street2": settings.get('street2'),
"city": settings.get('city'),
"stateRegion": settings.get('stateRegion'),
"stateProvinceRegion": settings.get('stateProvinceRegion'),
"zip": settings.get('zip'),
"country": settings.get('country'),
"countryCode": settings.get('countryCode')}
return shipping_address
def add_trusted_notary(self, guid, nickname=""):
self.log.debug('%s %s' % (guid, nickname))
notaries = self.settings.get('notaries')
self.log.debug('notaries: %s' % notaries)
if notaries == "" or notaries == []:
notaries = []
else:
notaries = json.loads(notaries)
for notary in notaries:
self.log.info(notary)
if notary.get('guid') == guid:
if notary.get('nickname') != nickname:
notary['nickname'] = nickname
notary['idx'] = notary
self.settings['notaries'] = notaries
return
notaries.append({"guid": guid, "nickname": nickname})
self.settings['notaries'] = json.dumps(notaries)
if 'btc_pubkey' in self.settings:
del self.settings['btc_pubkey']
self.db.updateEntries(
"settings",
{'market_id': self.transport.market_id},
self.settings
)
def _decode_list(self, data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = self._decode_list(item)
elif isinstance(item, dict):
item = self._decode_dict(item)
rv.append(item)
return rv
def _decode_dict(self, data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = self._decode_list(value)
elif isinstance(value, dict):
value = self._decode_dict(value)
rv[key] = value
return rv
def remove_trusted_notary(self, guid):
notaries = self.settings.get('notaries')
notaries = ast.literal_eval(notaries)
for idx, notary in enumerate(notaries):
if notary.get('guid') == guid:
del notaries[idx]
self.settings['notaries'] = json.dumps(notaries)
self.db.updateEntries(
"settings",
{'market_id': self.transport.market_id},
self.settings
)
def republish_contracts(self):
self.log.info("Publishing contracts")
listings = self.db.selectEntries("contracts", {"deleted": 0})
for listing in listings:
self.transport.dht.iterativeStore(
self.transport,
listing['key'],
listing.get('signed_contract_body'),
self.transport.guid
)
self.log.info("Finished republishing contracts")
self.update_listings_index()
def get_notaries(self, online_only=False):
self.log.debug('Getting notaries')
notaries = []
settings = self.get_settings()
# Untested code
if online_only:
notaries = {}
for n in settings['notaries']:
peer = self.dht.routingTable.getContact(n.guid)
if peer is not None:
t = Thread(target=peer.start_handshake)
t.start()
notaries.append(n)
return notaries
# End of untested code
return settings['notaries']
@staticmethod
def valid_guid(guid):
return len(guid) == 40 and int(guid, 16)
def republish_listing(self, msg):
listing_id = msg.get('productID')
listing = self.db.selectEntries("products", {"id": listing_id})
if listing:
listing = listing[0]
else:
return
listing_key = listing['key']
self.transport.dht.iterativeStore(
self.transport,
listing_key,
listing.get('signed_contract_body'),
self.transport.guid
)
self.update_listings_index()
# If keywords store them in the keyword index
# keywords = msg['Contract']['item_keywords']
# self.log.info('Keywords: %s' % keywords)
# for keyword in keywords:
#
# hash_value = hashlib.new('ripemd160')
# hash_value.update('keyword-%s' % keyword)
# keyword_key = hash_value.hexdigest()
#
# self.transport.dht.iterativeStore(self.transport, keyword_key, json.dumps({'keyword_index_add': contract_key}), self.transport.guid)
def update_listings_index(self):
# Store to marketplace listing index
contract_index_key = hashlib.sha1('contracts-%s' %
self.transport.guid).hexdigest()
hashvalue = hashlib.new('ripemd160')
hashvalue.update(contract_index_key)
contract_index_key = hashvalue.hexdigest()
# Calculate index of contracts
contract_ids = self.db.selectEntries(
"contracts",
{"market_id": self.transport.market_id, "deleted": 0}
)
my_contracts = []
for contract_id in contract_ids:
my_contracts.append(contract_id['key'])
self.log.debug('My Contracts: %s' % my_contracts)
# Sign listing index for validation and tamper resistance
data_string = str({'guid': self.transport.guid,
'contracts': my_contracts})
signature = makePrivCryptor(self.transport.settings['secret']).sign(data_string).encode('hex')
value = {'signature': signature,
'data': {'guid': self.transport.guid,
'contracts': my_contracts}}
# Pass off to thread to keep GUI snappy
t = Thread(target=self.transport.dht.iterativeStore, args=(self.transport,
contract_index_key,
value,
self.transport.guid,))
t.start()
def remove_contract(self, msg):
self.log.info("Removing contract: %s" % msg)
# Remove from DHT keyword indices
self.remove_from_keyword_indexes(msg['contract_id'])
self.db.updateEntries("contracts", {"id": msg["contract_id"]}, {"deleted": 1})
self.update_listings_index()
def remove_from_keyword_indexes(self, contract_id):
contract = self.db.selectEntries("contracts", {"id": contract_id})[0]
contract_key = contract['key']
contract = json.loads(contract['contract_body'])
contract_keywords = contract['Contract']['item_keywords']
for keyword in contract_keywords:
# Remove keyword from index
hash_value = hashlib.new('ripemd160')
keyword_key = 'keyword-%s' % keyword
hash_value.update(keyword_key.encode('utf-8'))
keyword_key = hash_value.hexdigest()
self.transport.dht.iterativeStore(
self.transport,
keyword_key,
json.dumps({
'keyword_index_remove': {
"guid": self.transport.guid,
"key": contract_key
}
}),
self.transport.guid
)
def get_messages(self):
self.log.info("Listing messages for market: %s" % self.transport.market_id)
settings = self.get_settings()
try:
# Request all messages for our address
inboxmsgs = json.loads(self.transport.bitmessage_api.getInboxMessagesByReceiver(
settings['bitmessage']))
for m in inboxmsgs['inboxMessages']:
# Base64 decode subject and content
m['subject'] = b64decode(m['subject'])
m['message'] = b64decode(m['message'])
# TODO: Augment with market, if available
return {"messages": inboxmsgs}
except Exception as e:
self.log.error("Failed to get inbox messages: {}".format(e))
self.log.error(traceback.format_exc())
return {}
def send_message(self, msg):
self.log.info("Sending message for market: %s" % self.transport.market_id)
settings = self.get_settings()
try:
# Base64 decode subject and content
self.log.info("Encoding message: {}".format(msg))
subject = b64encode(msg['subject'])
body = b64encode(msg['body'])
result = self.transport.bitmessage_api.sendMessage(
msg['to'], settings['bitmessage'], subject, body
)
self.log.info("Send message result: {}".format(result))
return {}
except Exception as e:
self.log.error("Failed to send message: %s" % e)
self.log.error(traceback.format_exc())
return {}
def get_contracts(self, page=0):
self.log.info('Getting contracts for market: %s' % self.transport.market_id)
contracts = self.db.selectEntries(
"contracts",
{"market_id": self.transport.market_id, "deleted": 0},
limit=10,
limit_offset=(page * 10)
)
my_contracts = []
for contract in contracts:
try:
print contract
contract_body = json.loads(u"%s" % contract['contract_body'])
item_price = contract_body.get('Contract').get('item_price') if contract_body.get('Contract').get('item_price') > 0 else 0
shipping_price = contract_body.get('Contract').get('item_delivery').get('shipping_price') if contract_body.get('Contract').get('item_delivery').get('shipping_price') > 0 else 0
my_contracts.append({"key": contract['key'] if 'key' in contract else "",
"id": contract['id'] if 'id' in contract else "",
"item_images": contract_body.get('Contract').get('item_images'),
"signed_contract_body": contract['signed_contract_body'] if 'signed_contract_body' in contract else "",
"contract_body": contract_body,
"unit_price": item_price,
"deleted": contract.get('deleted'),
"shipping_price": shipping_price,
"item_title": contract_body.get('Contract').get('item_title'),
"item_desc": contract_body.get('Contract').get('item_desc'),
"item_condition": contract_body.get('Contract').get('item_condition'),
"item_quantity_available": contract_body.get('Contract').get('item_quantity')})
except:
self.log.error('Problem loading the contract body JSON')
return {"contracts": my_contracts, "page": page,
"total_contracts": len(self.db.selectEntries("contracts", {"deleted": "0"}))}
def undo_remove_contract(self, contract_id):
self.log.info('Undo remove contract: %s' % contract_id)
self.db.updateEntries("contracts",
{"market_id": self.transport.market_id.replace("'", "''"), "id": contract_id},
{"deleted": "0"})
# SETTINGS
def save_settings(self, msg):
self.log.debug("Settings to save %s" % msg)
# Check for any updates to arbiter or notary status to push to the DHT
if 'notary' in msg:
# Generate notary index key
hash_value = hashlib.new('ripemd160')
hash_value.update('notary-index')
key = hash_value.hexdigest()
if msg['notary'] is True:
self.log.info('Letting the network know you are now a notary')
data = json.dumps({'notary_index_add': self.transport.guid})
self.transport.dht.iterativeStore(self.transport, key, data, self.transport.guid)
else:
self.log.info('Letting the network know you are not a notary')
data = json.dumps({'notary_index_remove': self.transport.guid})
self.transport.dht.iterativeStore(self.transport, key, data, self.transport.guid)
# Update nickname
self.transport.nickname = msg['nickname']
if 'burnAmount' in msg:
del msg['burnAmount']
if 'burnAddr' in msg:
del msg['burnAddr']
# Update local settings
self.db.updateEntries("settings", {'market_id': self.transport.market_id}, msg)
def get_settings(self):
self.log.info('Getting settings info for Market %s' % self.transport.market_id)
settings = self.db.getOrCreate("settings", {"market_id": self.transport.market_id})
if settings['arbiter'] == 1:
settings['arbiter'] = True
if settings['notary'] == 1:
settings['notary'] = True
settings['notaries'] = ast.literal_eval(settings['notaries']) if settings['notaries'] != "" else []
settings['trustedArbiters'] = ast.literal_eval(settings['trustedArbiters']) if settings['trustedArbiters'] != "" else []
settings['privkey'] = settings['privkey'] if 'secret' in settings else ""
settings['btc_pubkey'] = privkey_to_pubkey(settings.get('privkey'))
settings['secret'] = settings['secret'] if 'secret' in settings else ""
self.log.info('SETTINGS: %s' % settings)
if settings:
return settings
else:
return {}
# PAGE QUERYING
def query_page(self, find_guid, callback=lambda msg: None):
self.log.info('Querying node for market page: %s' % find_guid)
msg = query_page(find_guid)
msg['uri'] = self.transport.uri
msg['senderGUID'] = self.transport.guid
msg['sin'] = self.transport.sin
msg['pubkey'] = self.transport.pubkey
self.transport.send(msg, find_guid, callback)
# Return your page info if someone requests it on the network
def on_query_page(self, peer):
self.log.info("Someone is querying for your page")
settings = self.get_settings()
new_peer = self.transport.get_crypto_peer(
peer['senderGUID'],
peer['uri'],
pubkey=peer['pubkey'],
nickname=peer['senderNick']
)
def send_page_query():
t = Thread(target=new_peer.start_handshake)
t.start()
new_peer.send(proto_page(self.transport.uri,
self.transport.pubkey,
self.transport.guid,
settings['storeDescription'],
self.signature,
settings['nickname'],
settings['PGPPubKey'] if 'PGPPubKey' in settings else '',
settings['email'] if 'email' in settings else '',
settings['bitmessage'] if 'bitmessage' in settings else '',
settings['arbiter'] if 'arbiter' in settings else '',
settings['notary'] if 'notary' in settings else '',
settings['arbiterDescription'] if 'arbiterDescription' in settings else '',
self.transport.sin))
t = Thread(target=send_page_query)
t.start()
def on_query_myorders(self, peer):
self.log.info("Someone is querying for your page: %s" % peer)
def on_query_listings(self, peer, page=0):
self.log.info("Someone is querying your listings: %s" % peer)
contracts = self.get_contracts(page)
if len(contracts['contracts']) == 0:
self.transport.send({"type": "no_listing_result"}, peer['senderGUID'])
return
else:
for contract in contracts['contracts']:
contract = contract
contract['type'] = "listing_result"
self.transport.send(contract, peer['senderGUID'])
def on_peer(self, peer):
pass
def on_negotiate_pubkey(self, ident_pubkey):
self.log.info("Someone is asking for your real pubKey")
assert "nickname" in ident_pubkey
assert "ident_pubkey" in ident_pubkey
nickname = ident_pubkey['nickname']
ident_pubkey = ident_pubkey['ident_pubkey'].decode("hex")
self.transport.respond_pubkey_if_mine(nickname, ident_pubkey)
def on_response_pubkey(self, response):
assert "pubkey" in response
assert "nickname" in response
assert "signature" in response
pubkey = response["pubkey"].decode("hex")
# signature = response["signature"].decode("hex")
nickname = response["nickname"]
# Cache mapping for later.
if nickname not in self.transport.nick_mapping:
self.transport.nick_mapping[nickname] = [None, pubkey]
# Verify signature here...
# Add to our dict.
self.transport.nick_mapping[nickname][1] = pubkey
self.log.info("[market] mappings: ###############")
for key, value in self.transport.nick_mapping.iteritems():
self.log.info("'%s' -> '%s' (%s)" % (
key, value[1].encode("hex") if value[1] is not None else value[1],
value[0].encode("hex") if value[0] is not None else value[0]))
self.log.info("##################################")
def release_funds_to_merchant(self, buyer_order_id, tx, script, signatures, guid):
self.log.debug('Release funds to merchant: %s %s %s %s' % (buyer_order_id, tx, signatures, guid))
self.transport.send(
{
'type': 'release_funds_tx',
'tx': tx,
'script': script,
'buyer_order_id': buyer_order_id,
'signatures': signatures
},
guid
)
self.log.debug('TX sent to merchant')
| 38.490399 | 192 | 0.559828 |
f4cd424d5fab8444435a13792ab1633bf1dfc8b9 | 551 | py | Python | shabanipy/plotting/utils.py | ShabaniLab/DataAnalysis | e234b7d0e4ff8ecc11e58134e6309a095abcd2c0 | [
"MIT"
] | 6 | 2019-06-25T20:01:03.000Z | 2022-03-25T23:15:57.000Z | shabanipy/plotting/utils.py | ShabaniLab/DataAnalysis | e234b7d0e4ff8ecc11e58134e6309a095abcd2c0 | [
"MIT"
] | null | null | null | shabanipy/plotting/utils.py | ShabaniLab/DataAnalysis | e234b7d0e4ff8ecc11e58134e6309a095abcd2c0 | [
"MIT"
] | 5 | 2019-06-11T17:21:54.000Z | 2021-08-24T14:45:08.000Z | """General plotting utilities."""
from matplotlib import rcParams
def format_phase(value, tick_number):
"""The value are expected in unit of π
"""
if value == 0:
return '0'
elif value == 1.0:
return 'π'
elif value == -1.0:
return '-π'
else:
return f'{value:g}π'
def stamp(ax, text):
"""Stamp the plot with an ID."""
ax.text(
1,
1,
text,
size=0.4 * rcParams["font.size"],
ha="right",
va="bottom",
transform=ax.transAxes,
)
| 18.366667 | 42 | 0.513612 |
ec81e78e0e8ea6ff39479a00ce00bf8d16c9875b | 546 | py | Python | lesson06/sunzhaohui/modules/auth.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson06/sunzhaohui/modules/auth.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson06/sunzhaohui/modules/auth.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | # _*_ encoding:utf-8 _*_
__author__ = 'sunzhaohui'
__date__ = '2019-07-01 15:03'
import sys
USERINFO = ("51reboot", "123456")
class Auth(object):
def __init__(self,username,password):
self.username = username
self.password = password
def login(self):
if self.username == USERINFO[0] and self.password == USERINFO[1]:
return True
else:
return False
def session(self):
pass
def logout(self):
print('good bye {}'.format(self.username))
sys.exit(0) | 21.84 | 73 | 0.59707 |
7de0dd48d529dc70d94356c98cb58055266d661c | 23,659 | py | Python | google/ads/google_ads/v2/services/expanded_landing_page_view_service_client.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | 1 | 2019-11-30T23:42:39.000Z | 2019-11-30T23:42:39.000Z | google/ads/google_ads/v2/services/expanded_landing_page_view_service_client.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v2/services/expanded_landing_page_view_service_client.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | 1 | 2020-09-30T17:04:06.000Z | 2020-09-30T17:04:06.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.ads.googleads.v2.services ExpandedLandingPageViewService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.path_template
import grpc
from google.ads.google_ads.v2.services import enums
from google.ads.google_ads.v2.services import expanded_landing_page_view_service_client_config
from google.ads.google_ads.v2.services.transports import expanded_landing_page_view_service_grpc_transport
from google.ads.google_ads.v2.proto.resources import account_budget_pb2
from google.ads.google_ads.v2.proto.resources import account_budget_proposal_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_ad_asset_view_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_ad_label_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_ad_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_audience_view_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_bid_modifier_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_criterion_label_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_criterion_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_criterion_simulation_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_extension_setting_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_feed_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_label_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_simulation_pb2
from google.ads.google_ads.v2.proto.resources import ad_parameter_pb2
from google.ads.google_ads.v2.proto.resources import ad_pb2
from google.ads.google_ads.v2.proto.resources import ad_schedule_view_pb2
from google.ads.google_ads.v2.proto.resources import age_range_view_pb2
from google.ads.google_ads.v2.proto.resources import asset_pb2
from google.ads.google_ads.v2.proto.resources import bidding_strategy_pb2
from google.ads.google_ads.v2.proto.resources import billing_setup_pb2
from google.ads.google_ads.v2.proto.resources import campaign_audience_view_pb2
from google.ads.google_ads.v2.proto.resources import campaign_bid_modifier_pb2
from google.ads.google_ads.v2.proto.resources import campaign_budget_pb2
from google.ads.google_ads.v2.proto.resources import campaign_criterion_pb2
from google.ads.google_ads.v2.proto.resources import campaign_criterion_simulation_pb2
from google.ads.google_ads.v2.proto.resources import campaign_draft_pb2
from google.ads.google_ads.v2.proto.resources import campaign_experiment_pb2
from google.ads.google_ads.v2.proto.resources import campaign_extension_setting_pb2
from google.ads.google_ads.v2.proto.resources import campaign_feed_pb2
from google.ads.google_ads.v2.proto.resources import campaign_label_pb2
from google.ads.google_ads.v2.proto.resources import campaign_pb2
from google.ads.google_ads.v2.proto.resources import campaign_shared_set_pb2
from google.ads.google_ads.v2.proto.resources import carrier_constant_pb2
from google.ads.google_ads.v2.proto.resources import change_status_pb2
from google.ads.google_ads.v2.proto.resources import click_view_pb2
from google.ads.google_ads.v2.proto.resources import conversion_action_pb2
from google.ads.google_ads.v2.proto.resources import custom_interest_pb2
from google.ads.google_ads.v2.proto.resources import customer_client_link_pb2
from google.ads.google_ads.v2.proto.resources import customer_client_pb2
from google.ads.google_ads.v2.proto.resources import customer_extension_setting_pb2
from google.ads.google_ads.v2.proto.resources import customer_feed_pb2
from google.ads.google_ads.v2.proto.resources import customer_label_pb2
from google.ads.google_ads.v2.proto.resources import customer_manager_link_pb2
from google.ads.google_ads.v2.proto.resources import customer_negative_criterion_pb2
from google.ads.google_ads.v2.proto.resources import customer_pb2
from google.ads.google_ads.v2.proto.resources import detail_placement_view_pb2
from google.ads.google_ads.v2.proto.resources import display_keyword_view_pb2
from google.ads.google_ads.v2.proto.resources import distance_view_pb2
from google.ads.google_ads.v2.proto.resources import domain_category_pb2
from google.ads.google_ads.v2.proto.resources import dynamic_search_ads_search_term_view_pb2
from google.ads.google_ads.v2.proto.resources import expanded_landing_page_view_pb2
from google.ads.google_ads.v2.proto.services import account_budget_proposal_service_pb2
from google.ads.google_ads.v2.proto.services import account_budget_proposal_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import account_budget_service_pb2
from google.ads.google_ads.v2.proto.services import account_budget_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_ad_asset_view_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_asset_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_ad_label_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_ad_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_audience_view_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_audience_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_bid_modifier_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_bid_modifier_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_criterion_label_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_criterion_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_criterion_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_criterion_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_criterion_simulation_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_criterion_simulation_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_extension_setting_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_extension_setting_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_feed_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_feed_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_label_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_simulation_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_simulation_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_parameter_service_pb2
from google.ads.google_ads.v2.proto.services import ad_parameter_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_schedule_view_service_pb2
from google.ads.google_ads.v2.proto.services import ad_schedule_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_service_pb2
from google.ads.google_ads.v2.proto.services import ad_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import age_range_view_service_pb2
from google.ads.google_ads.v2.proto.services import age_range_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import asset_service_pb2
from google.ads.google_ads.v2.proto.services import asset_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import bidding_strategy_service_pb2
from google.ads.google_ads.v2.proto.services import bidding_strategy_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import billing_setup_service_pb2
from google.ads.google_ads.v2.proto.services import billing_setup_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_audience_view_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_audience_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_bid_modifier_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_bid_modifier_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_budget_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_budget_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_criterion_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_criterion_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_criterion_simulation_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_criterion_simulation_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_draft_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_draft_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_experiment_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_experiment_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_extension_setting_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_extension_setting_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_feed_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_feed_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_label_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_shared_set_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_shared_set_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import carrier_constant_service_pb2
from google.ads.google_ads.v2.proto.services import carrier_constant_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import change_status_service_pb2
from google.ads.google_ads.v2.proto.services import change_status_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import click_view_service_pb2
from google.ads.google_ads.v2.proto.services import click_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import conversion_action_service_pb2
from google.ads.google_ads.v2.proto.services import conversion_action_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import conversion_adjustment_upload_service_pb2
from google.ads.google_ads.v2.proto.services import conversion_adjustment_upload_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import conversion_upload_service_pb2
from google.ads.google_ads.v2.proto.services import conversion_upload_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import custom_interest_service_pb2
from google.ads.google_ads.v2.proto.services import custom_interest_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_client_link_service_pb2
from google.ads.google_ads.v2.proto.services import customer_client_link_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_client_service_pb2
from google.ads.google_ads.v2.proto.services import customer_client_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_extension_setting_service_pb2
from google.ads.google_ads.v2.proto.services import customer_extension_setting_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_feed_service_pb2
from google.ads.google_ads.v2.proto.services import customer_feed_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_label_service_pb2
from google.ads.google_ads.v2.proto.services import customer_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_manager_link_service_pb2
from google.ads.google_ads.v2.proto.services import customer_manager_link_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_negative_criterion_service_pb2
from google.ads.google_ads.v2.proto.services import customer_negative_criterion_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_service_pb2
from google.ads.google_ads.v2.proto.services import customer_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import detail_placement_view_service_pb2
from google.ads.google_ads.v2.proto.services import detail_placement_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import display_keyword_view_service_pb2
from google.ads.google_ads.v2.proto.services import display_keyword_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import distance_view_service_pb2
from google.ads.google_ads.v2.proto.services import distance_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import domain_category_service_pb2
from google.ads.google_ads.v2.proto.services import domain_category_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import dynamic_search_ads_search_term_view_service_pb2
from google.ads.google_ads.v2.proto.services import dynamic_search_ads_search_term_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import expanded_landing_page_view_service_pb2
from google.ads.google_ads.v2.proto.services import expanded_landing_page_view_service_pb2_grpc
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import wrappers_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-ads',
).version
class ExpandedLandingPageViewServiceClient(object):
"""Service to fetch expanded landing page views."""
SERVICE_ADDRESS = 'googleads.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.ads.googleads.v2.services.ExpandedLandingPageViewService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ExpandedLandingPageViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def expanded_landing_page_view_path(cls, customer, expanded_landing_page_view):
"""Return a fully-qualified expanded_landing_page_view string."""
return google.api_core.path_template.expand(
'customers/{customer}/expandedLandingPageViews/{expanded_landing_page_view}',
customer=customer,
expanded_landing_page_view=expanded_landing_page_view,
)
def __init__(self, transport=None, channel=None, credentials=None,
client_config=None, client_info=None):
"""Constructor.
Args:
transport (Union[~.ExpandedLandingPageViewServiceGrpcTransport,
Callable[[~.Credentials, type], ~.ExpandedLandingPageViewServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning, stacklevel=2)
else:
client_config = expanded_landing_page_view_service_client_config.config
if channel:
warnings.warn('The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning, stacklevel=2)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=expanded_landing_page_view_service_grpc_transport.ExpandedLandingPageViewServiceGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.'
)
self.transport = transport
else:
self.transport = expanded_landing_page_view_service_grpc_transport.ExpandedLandingPageViewServiceGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION,
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME],
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def get_expanded_landing_page_view(
self,
resource_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns the requested expanded landing page view in full detail.
Args:
resource_name (str): The resource name of the expanded landing page view to fetch.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.ads.googleads_v2.types.ExpandedLandingPageView` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_expanded_landing_page_view' not in self._inner_api_calls:
self._inner_api_calls['get_expanded_landing_page_view'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_expanded_landing_page_view,
default_retry=self._method_configs['GetExpandedLandingPageView'].retry,
default_timeout=self._method_configs['GetExpandedLandingPageView'].timeout,
client_info=self._client_info,
)
request = expanded_landing_page_view_service_pb2.GetExpandedLandingPageViewRequest(
resource_name=resource_name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('resource_name', resource_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(routing_header)
metadata.append(routing_metadata)
return self._inner_api_calls['get_expanded_landing_page_view'](request, retry=retry, timeout=timeout, metadata=metadata)
| 60.048223 | 128 | 0.79306 |
ee28ed0a5dd378047e25f4232034ffc06e627708 | 9,157 | py | Python | ex43.py | Rajab322/lpthw | bde26ca21bd1c72807c93fff15a45a1154ba59d7 | [
"MIT"
] | 329 | 2017-02-25T15:06:58.000Z | 2022-03-31T18:22:21.000Z | ex43.py | dkorzhevin/learn-python3-thw-code | bea1e954d52ed845c3ade7ed87d7bef7de1651ad | [
"MIT"
] | 10 | 2017-02-26T13:55:38.000Z | 2020-02-20T06:10:26.000Z | ex43.py | dkorzhevin/learn-python3-thw-code | bea1e954d52ed845c3ade7ed87d7bef7de1651ad | [
"MIT"
] | 180 | 2017-02-25T20:42:03.000Z | 2022-02-09T05:21:40.000Z | ### @export "fake"
import fake_input
input, input = fake_input.create(['dodge!'])
### @export "imports"
from sys import exit
from random import randint
### @export "scene_class"
class Scene(object):
def enter(self):
print("This scene is not yet configured. Subclass it and implement enter().")
exit(1)
### @export "engine_class"
class Engine(object):
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
current_scene = self.scene_map.opening_scene()
last_scene = self.scene_map.next_scene('finished')
while current_scene != last_scene:
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
# be sure to print out the last scene
current_scene.enter()
### @export "death_scene"
class Death(Scene):
quips = [
"You died. You kinda suck at this.",
"Your mom would be proud...if she were smarter.",
"Such a luser.",
"I have a small puppy that's better at this."
]
def enter(self):
print(Death.quips[randint(0, len(self.quips)-1)])
exit(1)
### @export "central_corridor"
class CentralCorridor(Scene):
def enter(self):
print("The Gothons of Planet Percal #25 have invaded your ship and destroyed")
print("your entire crew. You are the last surviving member and your last")
print("mission is to get the neutron destruct bomb from the Weapons Armory,")
print("put it in the bridge, and blow the ship up after getting into an ")
print("escape pod.")
print("\n")
print("You're running down the central corridor to the Weapons Armory when")
print("a Gothon jumps out, red scaly skin, dark grimy teeth, and evil clown costume")
print("flowing around his hate filled body. He's blocking the door to the")
print("Armory and about to pull a weapon to blast you.")
action = input("> ")
if action == "shoot!":
print("Quick on the draw you yank out your blaster and fire it at the Gothon.")
print("His clown costume is flowing and moving around his body, which throws")
print("off your aim. Your laser hits his costume but misses him entirely. This")
print("completely ruins his brand new costume his mother bought him, which")
print("makes him fly into an insane rage and blast you repeatedly in the face until")
print("you are dead. Then he eats you.")
return 'death'
elif action == "dodge!":
print("Like a world class boxer you dodge, weave, slip and slide right")
print("as the Gothon's blaster cranks a laser past your head.")
print("In the middle of your artful dodge your foot slips and you")
print("bang your head on the metal wall and pass out.")
print("You wake up shortly after only to die as the Gothon stomps on")
print("your head and eats you.")
return 'death'
elif action == "tell a joke":
print("Lucky for you they made you learn Gothon insults in the academy.")
print("You tell the one Gothon joke you know:")
print("Lbhe zbgure vf fb sng, jura fur fvgf nebhaq gur ubhfr, fur fvgf nebhaq gur ubhfr.")
print("The Gothon stops, tries not to laugh, then busts out laughing and can't move.")
print("While he's laughing you run up and shoot him square in the head")
print("putting him down, then jump through the Weapon Armory door.")
return 'laser_weapon_armory'
else:
print("DOES NOT COMPUTE!")
return 'central_corridor'
### @export "game_scenes"
class LaserWeaponArmory(Scene):
def enter(self):
print("You do a dive roll into the Weapon Armory, crouch and scan the room")
print("for more Gothons that might be hiding. It's dead quiet, too quiet.")
print("You stand up and run to the far side of the room and find the")
print("neutron bomb in its container. There's a keypad lock on the box")
print("and you need the code to get the bomb out. If you get the code")
print("wrong 10 times then the lock closes forever and you can't")
print("get the bomb. The code is 3 digits.")
code = f"{randint(1,9)}{randint(1,9)}{randint(1,9)}"
guess = input("[keypad]> ")
guesses = 0
while guess != code and guesses < 10:
print("BZZZZEDDD!")
guesses += 1
guess = input("[keypad]> ")
if guess == code:
print("The container clicks open and the seal breaks, letting gas out.")
print("You grab the neutron bomb and run as fast as you can to the")
print("bridge where you must place it in the right spot.")
return 'the_bridge'
else:
print("The lock buzzes one last time and then you hear a sickening")
print("melting sound as the mechanism is fused together.")
print("You decide to sit there, and finally the Gothons blow up the")
print("ship from their ship and you die.")
return 'death'
class TheBridge(Scene):
def enter(self):
print("You burst onto the Bridge with the netron destruct bomb")
print("under your arm and surprise 5 Gothons who are trying to")
print("take control of the ship. Each of them has an even uglier")
print("clown costume than the last. They haven't pulled their")
print("weapons out yet, as they see the active bomb under your")
print("arm and don't want to set it off.")
action = input("> ")
if action == "throw the bomb":
print("In a panic you throw the bomb at the group of Gothons")
print("and make a leap for the door. Right as you drop it a")
print("Gothon shoots you right in the back killing you.")
print("As you die you see another Gothon frantically try to disarm")
print("the bomb. You die knowing they will probably blow up when")
print("it goes off.")
return 'death'
elif action == "slowly place the bomb":
print("You point your blaster at the bomb under your arm")
print("and the Gothons put their hands up and start to sweat.")
print("You inch backward to the door, open it, and then carefully")
print("place the bomb on the floor, pointing your blaster at it.")
print("You then jump back through the door, punch the close button")
print("and blast the lock so the Gothons can't get out.")
print("Now that the bomb is placed you run to the escape pod to")
print("get off this tin can.")
return 'escape_pod'
else:
print("DOES NOT COMPUTE!")
return "the_bridge"
class EscapePod(Scene):
def enter(self):
print("You rush through the ship desperately trying to make it to")
print("the escape pod before the whole ship explodes. It seems like")
print("hardly any Gothons are on the ship, so your run is clear of")
print("interference. You get to the chamber with the escape pods, and")
print("now need to pick one to take. Some of them could be damaged")
print("but you don't have time to look. There's 5 pods, which one")
print("do you take?")
good_pod = randint(1,5)
guess = input("[pod #]> ")
if int(guess) != good_pod:
print("You jump into pod {guess} and hit the eject button.")
print("The pod escapes out into the void of space, then")
print("implodes as the hull ruptures, crushing your body")
print("into jam jelly.")
return 'death'
else:
print("You jump into pod {guess} and hit the eject button.")
print("The pod easily slides out into space heading to")
print("the planet below. As it flies to the planet, you look")
print("back and see your ship implode then explode like a")
print("bright star, taking out the Gothon ship at the same")
print("time. You won!")
return 'finished'
class Finished(Scene):
def enter(self):
print("You won! Good job.")
return 'finished'
### @export "map_class"
class Map(object):
scenes = {
'central_corridor': CentralCorridor(),
'laser_weapon_armory': LaserWeaponArmory(),
'the_bridge': TheBridge(),
'escape_pod': EscapePod(),
'death': Death(),
'finished': Finished(),
}
def __init__(self, start_scene):
self.start_scene = start_scene
def next_scene(self, scene_name):
val = Map.scenes.get(scene_name)
return val
def opening_scene(self):
return self.next_scene(self.start_scene)
### @export "final_run"
a_map = Map('central_corridor')
a_game = Engine(a_map)
a_game.play()
| 38.154167 | 102 | 0.613738 |
e36c6805746db2427f71c3409a4ca8cfe7379717 | 1,882 | py | Python | backend/api/routes/favorite.py | Mozzo1000/product-database | 6af88397d0dc27b334e38a5d42f9dbfc396b784d | [
"Apache-2.0"
] | null | null | null | backend/api/routes/favorite.py | Mozzo1000/product-database | 6af88397d0dc27b334e38a5d42f9dbfc396b784d | [
"Apache-2.0"
] | 4 | 2021-12-30T12:09:35.000Z | 2022-01-20T15:51:44.000Z | backend/api/routes/favorite.py | Mozzo1000/product-database | 6af88397d0dc27b334e38a5d42f9dbfc396b784d | [
"Apache-2.0"
] | null | null | null | from flask import Blueprint, request, jsonify
from models import Favorite, FavoriteSchema, User, db
from flask_jwt_extended import jwt_required, get_jwt_identity
favorite_endpoint = Blueprint('favorite', __name__)
@favorite_endpoint.route("/v1/favorites")
@jwt_required()
def get_favorites():
current_user = User.find_by_email(get_jwt_identity())
favorite_schema = FavoriteSchema(many=True)
favorites = Favorite.query.filter_by(user_id=current_user.id).all()
return jsonify(favorite_schema.dump(favorites))
@favorite_endpoint.route("/v1/favorites/<id>")
@jwt_required()
def get_is_favorite(id):
current_user = User.find_by_email(get_jwt_identity())
print(current_user)
favorite = Favorite.query.filter_by(product_id=id, user_id=current_user.id).first()
if favorite:
return jsonify({'favorite': True}), 201
else:
return jsonify({'favorite': False}), 201
@favorite_endpoint.route("/v1/favorites", methods=["POST"])
@jwt_required()
def add_favorite():
if not "product_id" in request.json:
return jsonify({
"error": "Bad request",
"message": "product_id not given"
}), 400
current_user = User.find_by_email(get_jwt_identity())
new_favorite = Favorite(user_id=current_user.id, product_id=request.json["product_id"])
new_favorite.save_to_db()
return jsonify({'message': 'Favorite added'}), 201
@favorite_endpoint.route('/v1/favorites/<id>', methods=["DELETE"])
@jwt_required()
def remove_favorite(id):
current_user = User.find_by_email(get_jwt_identity())
favorite = Favorite.query.filter_by(product_id=id, user_id=current_user.id).first()
try:
db.session.delete(favorite)
db.session.commit()
return jsonify({'message': f'Favorite has been removed'}), 200
except:
return jsonify({'message': 'Something went wrong'}), 500 | 36.901961 | 91 | 0.714134 |
f4e486e26300f87fc2f81c32e82122516ceba447 | 560 | py | Python | src/openeo_grass_gis_driver/process_graph_db.py | AnikaBettge/openeo-grassgis-driver | ee7721c39c830181256a042ade721ab777ad108b | [
"Apache-2.0"
] | null | null | null | src/openeo_grass_gis_driver/process_graph_db.py | AnikaBettge/openeo-grassgis-driver | ee7721c39c830181256a042ade721ab777ad108b | [
"Apache-2.0"
] | null | null | null | src/openeo_grass_gis_driver/process_graph_db.py | AnikaBettge/openeo-grassgis-driver | ee7721c39c830181256a042ade721ab777ad108b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from .actinia_processing.config import Config as ActiniaConfig
from sqlitedict import SqliteDict
__license__ = "Apache License, Version 2.0"
__author__ = "Sören Gebbert"
__copyright__ = "Copyright 2018, Sören Gebbert, mundialis"
__maintainer__ = "Soeren Gebbert"
__email__ = "[email protected]"
class GraphDB(SqliteDict):
"""This is the storage of the process graphs that were commited for processing
"""
def __init__(self):
SqliteDict.__init__(self, filename=ActiniaConfig.GRAPH_DB, autocommit=True)
| 31.111111 | 83 | 0.757143 |
efd3b7277754d59bca734fb6994ce323e3e908f4 | 968 | py | Python | stroylux/main/core/utils/url.py | vladkoblynsky/shop | aaf027f4111605772624a868a0243b221b97c857 | [
"BSD-3-Clause"
] | null | null | null | stroylux/main/core/utils/url.py | vladkoblynsky/shop | aaf027f4111605772624a868a0243b221b97c857 | [
"BSD-3-Clause"
] | 7 | 2020-09-19T16:24:46.000Z | 2022-01-13T03:19:46.000Z | stroylux/main/core/utils/url.py | vladkoblynsky/shop | aaf027f4111605772624a868a0243b221b97c857 | [
"BSD-3-Clause"
] | null | null | null | from urllib.parse import urlparse
from django.conf import settings
from django.core.exceptions import ValidationError
from django.http.request import split_domain_port, validate_host
def validate_storefront_url(url):
"""Validate the storefront URL.
Raise ValidationError if URL isn't in RFC 1808 format
or it isn't allowed by ALLOWED_CLIENT_HOSTS in settings.
"""
try:
parsed_url = urlparse(url)
domain, _ = split_domain_port(parsed_url.netloc)
if not parsed_url.netloc:
raise ValidationError(
"Invalid URL. Please check if URL is in RFC 1808 format."
)
except ValueError as error:
raise ValidationError(error)
if not validate_host(domain, settings.ALLOWED_CLIENT_HOSTS):
error_message = (
f"{domain or url} is not allowed. Please check "
"`ALLOWED_CLIENT_HOSTS` configuration."
)
raise ValidationError(error_message) | 35.851852 | 73 | 0.686983 |