query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Send numerical value in CardHolder field
def test_28(self): assert 'False' == Api.requestBlock('test-28')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_card_value(self, card):\n if card >= 10:\n return 10\n if card == 1:\n return 11\n return card", "def __str__(self):\n return self.card_no", "def value(self, card):\n return self.valores[self.deck.index(card)]", "def __init__(self, cardname, am...
[ "0.6360001", "0.6286262", "0.6143093", "0.61399215", "0.61090577", "0.609702", "0.5950806", "0.58012486", "0.5681816", "0.566631", "0.564746", "0.55345756", "0.5511834", "0.55029047", "0.54479784", "0.5423257", "0.54220426", "0.54016936", "0.53752905", "0.536577", "0.53501254...
0.0
-1
Check length of CardHolder field (len = 30)
def test_29(self): assert 'True' == Api.requestBlock('test-29')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_length(verifield, required):\n if verifield is None: return True\n return len(verifield) <= required", "def min_length(verifield, required):\n if verifield is None: return True\n return len(verifield) >= required", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4...
[ "0.6906881", "0.64856213", "0.645354", "0.6432503", "0.6383984", "0.6370204", "0.63560504", "0.63330877", "0.6323368", "0.6306261", "0.62849426", "0.62839264", "0.6276887", "0.6276887", "0.62720317", "0.6252744", "0.6238115", "0.6228162", "0.6224199", "0.6197445", "0.6196399"...
0.0
-1
Check length of CardHolder field (len = 31)
def test_30(self): assert 'False' == Api.requestBlock('test-30')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_length(verifield, required):\n if verifield is None: return True\n return len(verifield) <= required", "def checkLength(ish, length):\n if ish == \"Visa\":\n ok = (13,16)\n elif ish == \"American Express\":\n ok = (15,)\n elif ish == \"MasterCard\":\n ok = (16,)\n e...
[ "0.6684012", "0.64644367", "0.6452459", "0.6451033", "0.63982964", "0.6384937", "0.63768864", "0.63765895", "0.6351771", "0.6343237", "0.6343237", "0.6326682", "0.6316303", "0.62818766", "0.6273519", "0.6246097", "0.62296754", "0.62225854", "0.6151962", "0.61194175", "0.61119...
0.0
-1
Check length of CardHolder field (len = 29)
def test_31(self): assert 'True' == Api.requestBlock('test-31')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_length(verifield, required):\n if verifield is None: return True\n return len(verifield) <= required", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \...
[ "0.6592967", "0.6412684", "0.6403937", "0.64020187", "0.63984334", "0.6374663", "0.6358877", "0.6322181", "0.63155454", "0.63155454", "0.6293918", "0.62928987", "0.62740266", "0.6272036", "0.62385446", "0.6238426", "0.62309074", "0.6216641", "0.6173284", "0.6161911", "0.61607...
0.0
-1
Send only spaces in CardHolder
def test_32(self): assert 'False' == Api.requestBlock('test-32')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def spaced(self, ctx: DogbotContext, *, text: clean_content):\n await ctx.send(text.replace('', ' ').strip())", "def test_send_strips(connection, writer, loop):\n loop.run_until_complete(connection.connect())\n connection.send(\" a b c | @#$ d \")\n assert writer.used\n assert writer....
[ "0.57420164", "0.5617736", "0.55529416", "0.55403346", "0.54575396", "0.53695005", "0.5285843", "0.52102894", "0.51688284", "0.5163198", "0.51460236", "0.5135173", "0.5118663", "0.5107931", "0.50539863", "0.5050567", "0.50373065", "0.5022836", "0.5017234", "0.50094473", "0.49...
0.0
-1
Send special characters in CardHolder
def test_33(self): assert 'False' == Api.requestBlock('test-33')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mecard(name, tel, email):\n return f'MECARD:N:{name};TEL:{tel};EMAIL:{email};'", "def display_content(com,message):\n #message = message.encode('utf-8')\n #message = message.decode('ascii', 'ignore')\n safeMsg = filter(lambda x: x in string.printable, message)\n safeMsg = safeMsg.replace('\\n'...
[ "0.6168183", "0.6085875", "0.5932747", "0.5859501", "0.55997914", "0.54294205", "0.5385542", "0.5369416", "0.53545773", "0.5346947", "0.5339747", "0.53114927", "0.52844477", "0.52821624", "0.5262677", "0.5241565", "0.52258605", "0.5219325", "0.52160716", "0.5215507", "0.52096...
0.0
-1
Send null value in SecureCode
def test_34(self): assert 'False' == Api.requestBlock('test-34')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encrypt():\n\tnull = 0", "def test_mask_secret_null():\n assert utils.mask_secrets(\"\", None) == \"\"", "def null() -> SetupVal:\n return NullVal()", "def noneType(value):\r\n return ''", "def silent_none(value):\n if value is None:\n return ''\n return value", "def nulltes...
[ "0.5724641", "0.56951576", "0.5571913", "0.55152303", "0.5499936", "0.54701316", "0.5447195", "0.5409918", "0.5399865", "0.5353087", "0.530565", "0.52624655", "0.5250316", "0.5222344", "0.52218187", "0.5196473", "0.51829845", "0.51713824", "0.5162897", "0.5159124", "0.5156829...
0.0
-1
Send special characters in SecureCode
def test_35(self): assert 'False' == Api.requestBlock('test-35')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(self, text):", "def encode_data ( data ) :\n firstpass = base64.b64encode( data )\n cipher = get_cipher( firstpass )\n\n index = 0\n datalen = len( firstpass )\n encoded_data = []\n while index < datalen :\n if index % 2 == 0 :\n encoded_data.append( chr( ord( firstpass[ index...
[ "0.5938938", "0.5815948", "0.5810851", "0.5708405", "0.56919646", "0.5627045", "0.5606181", "0.5592183", "0.55913097", "0.55869216", "0.55449724", "0.55205464", "0.550454", "0.54743934", "0.5459042", "0.544217", "0.5438045", "0.5424514", "0.5418585", "0.54139405", "0.5389448"...
0.0
-1
Check length of SecureCode (SecureCode= 1 )
def test_36(self): assert 'False' == Api.requestBlock('test-36')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def test_valid_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n ...
[ "0.641404", "0.6408166", "0.6400148", "0.63096166", "0.62442255", "0.62413037", "0.616265", "0.60951114", "0.606613", "0.60156256", "0.5987383", "0.5973827", "0.5954586", "0.58762735", "0.58288664", "0.5818708", "0.5816221", "0.580496", "0.5798687", "0.5756989", "0.5754382", ...
0.0
-1
Check length of SecureCode (SecureCode= 12 )
def test_37(self): assert 'False' == Api.requestBlock('test-37')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_l...
[ "0.65187556", "0.64862204", "0.64156276", "0.6369722", "0.635985", "0.6273854", "0.6231202", "0.61325234", "0.6130397", "0.6046088", "0.6041387", "0.6014855", "0.59593093", "0.59332305", "0.58832985", "0.587831", "0.5856602", "0.58545417", "0.58352524", "0.5809067", "0.579987...
0.0
-1
Check length of SecureCode (SecureCode= 123 )
def test_38(self): assert 'True' == Api.requestBlock('test-38')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_valid_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Con...
[ "0.6570652", "0.65147936", "0.6507782", "0.636752", "0.6305414", "0.6304687", "0.62863815", "0.6257223", "0.6186307", "0.6163863", "0.61181813", "0.6054365", "0.5972026", "0.5970523", "0.59098756", "0.5856316", "0.58449227", "0.58319175", "0.58306134", "0.5820003", "0.5758521...
0.0
-1
Check length of SecureCode (SecureCode= 1234 )
def test_39(self): assert 'True' == Api.requestBlock('test-39')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_valid_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Con...
[ "0.66026425", "0.65196085", "0.65004146", "0.64573944", "0.6349578", "0.63249177", "0.6300502", "0.6252688", "0.6208223", "0.61255306", "0.6125403", "0.6038623", "0.60023403", "0.59888995", "0.5983042", "0.5956225", "0.59148526", "0.586905", "0.585267", "0.58380944", "0.58245...
0.0
-1
Check length of SecureCode (SecureCode= 12345 )
def test_40(self): assert 'False' == Api.requestBlock('test-40')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def test_valid_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n ...
[ "0.6516901", "0.6493827", "0.6453173", "0.63646185", "0.63212717", "0.6252427", "0.62409246", "0.6239357", "0.6138511", "0.61252916", "0.6071785", "0.6037417", "0.597209", "0.5948118", "0.5946584", "0.5912807", "0.59077805", "0.5901145", "0.58799016", "0.5859251", "0.58228135...
0.0
-1
Send null value in PaytureId
def test_41(self): assert 'False' == Api.requestBlock('test-41')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_missing_id(self, data, **kwargs):\n if not data.get(\"project_id\"):\n data[\"project_id\"] = lambda: uuid.uuid4().hex\n\n return data", "def payment_id_leading(self) -> bool:", "def testNoneValue(self):\n objectID = uuid4()\n user = createUser(u'username', u'pass...
[ "0.5814802", "0.57321626", "0.5561772", "0.54671985", "0.5423538", "0.5402264", "0.5338469", "0.5287139", "0.5287139", "0.5287139", "0.5248321", "0.5226516", "0.5212841", "0.514347", "0.512409", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347...
0.0
-1
Check length of PaytureId (len= 50)
def test_42(self): assert 'True' == Api.requestBlock('test-42')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateID(id):\n\n if re.compile('[0-9]+').match(id) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' is not a valid Id. ID should be numeric with Length = '%s' \" \n\t\t\t% (id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n # Check for the lenght ...
[ "0.70664537", "0.68608546", "0.6378098", "0.6287845", "0.62220454", "0.6160857", "0.6048363", "0.60367453", "0.6018999", "0.59948397", "0.59662354", "0.59309196", "0.5913995", "0.5907712", "0.58846974", "0.58698547", "0.5848623", "0.58164203", "0.5786315", "0.57850075", "0.57...
0.0
-1
Check length of PaytureId (len= 51)
def test_43(self): assert 'False' == Api.requestBlock('test-43')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateID(id):\n\n if re.compile('[0-9]+').match(id) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' is not a valid Id. ID should be numeric with Length = '%s' \" \n\t\t\t% (id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n # Check for the lenght ...
[ "0.69983447", "0.68838894", "0.6375329", "0.6372531", "0.62937874", "0.62633044", "0.61286795", "0.6075512", "0.60735387", "0.60325694", "0.5966721", "0.5952522", "0.59324026", "0.5905336", "0.58905697", "0.58616316", "0.5856024", "0.58352464", "0.583216", "0.58215785", "0.58...
0.0
-1
Send null value in CustomerKey
def test_44(self): assert 'False' == Api.requestBlock('test-44')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _key(self):\n return None", "def key(self):\n return None", "def convert_nulls(dic, null_value):\n for key in dic.iterkeys():\n if dic[key] is None:\n dic[key] = null_value", "def naics_agg_key(record: dict) -> Optional[str]:\n if record[\"naics_code\"] is None:\n ...
[ "0.5777039", "0.5651661", "0.5604638", "0.548038", "0.5478544", "0.5453205", "0.5365021", "0.53444165", "0.5324741", "0.5306071", "0.5301674", "0.52935547", "0.52801406", "0.5263718", "0.52251005", "0.52210206", "0.5198161", "0.51958966", "0.5189117", "0.51726973", "0.5162670...
0.0
-1
Check length of CustomerKey (len= 50)
def test_45(self): assert 'True' == Api.requestBlock('test-45')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def test_rfc_nickkey_length(s):\n asser...
[ "0.751583", "0.6865551", "0.6805979", "0.6515183", "0.6490027", "0.64431405", "0.6335833", "0.6305529", "0.6286746", "0.6239916", "0.62253493", "0.61821294", "0.6133636", "0.60966367", "0.605465", "0.6052676", "0.60452414", "0.6023541", "0.6023541", "0.60178965", "0.6007776",...
0.0
-1
Check length of CustomerKey (len= 51)
def test_46(self): assert 'False' == Api.requestBlock('test-46')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def test_rfc_nickkey_length(s):\n asser...
[ "0.7524419", "0.68488765", "0.6717122", "0.65168095", "0.6435481", "0.6403643", "0.63951415", "0.63328946", "0.62844515", "0.6251781", "0.6235365", "0.61816084", "0.6115362", "0.6111987", "0.6067936", "0.6057888", "0.60268646", "0.60186607", "0.60172915", "0.59633636", "0.596...
0.0
-1
Send null value in IP
def test_47(self): assert 'False' == Api.requestBlock('test-47', CustomFields=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_noop(self):\n self.assertFalse(helpers.getBcastAddrforIPv4())\n self.assertIsNone(helpers.no_op())", "def ip_white(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_white\")", "def ip_white(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \...
[ "0.5996742", "0.586887", "0.586887", "0.5854512", "0.5837289", "0.577436", "0.5667391", "0.5623846", "0.56065077", "0.55768406", "0.5550488", "0.55322266", "0.54811656", "0.54618883", "0.54348487", "0.54248166", "0.541617", "0.54040885", "0.53847426", "0.53496665", "0.5349666...
0.0
-1
Send some letters value in IP
def test_48(self): assert 'False' == Api.requestBlock('test-48', CustomFields=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ip(self, mess, args):\n return '%s\\n' % urlgrabber.urlread('http://whatismyip.org')", "def _send(self, what, value, address='localhost:44818', **kwargs):\n\n tag_string = ''\n tag_string = EnipProtocol._tuple_to_cpppo_tag(what, value)\n # print 'DEBUG enip _send tag_strin...
[ "0.63720393", "0.6112773", "0.6034202", "0.6031415", "0.601407", "0.6007722", "0.6005478", "0.5853208", "0.5850585", "0.5793232", "0.57886994", "0.57657415", "0.5760936", "0.5697663", "0.5660143", "0.56552243", "0.564769", "0.5638525", "0.5633917", "0.5625542", "0.56003445", ...
0.0
-1
Send value without dots in IP
def test_49(self): assert 'False' == Api.requestBlock('test-49', CustomFields=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ip_dotted(self):\r\n return socket.inet_ntoa(struct.pack('>I', self.ip))", "def overlay_ip(ip):\n return \"192.168.{}.{}\".format( *ip.split(\".\")[2:])", "def send(value):\r\n return value", "def safe_addr(ip_addr):\n return '.'.join(ip_addr.split('.')[:2] + ['xxx', 'xxx'])", "def _sen...
[ "0.6917071", "0.61488545", "0.60871255", "0.58816665", "0.58673185", "0.5860521", "0.5847382", "0.5811409", "0.58054906", "0.5722551", "0.5714136", "0.57023436", "0.5676384", "0.56233644", "0.55774444", "0.5576495", "0.554843", "0.5535844", "0.55323035", "0.5489128", "0.54597...
0.0
-1
Send dots in wrong places e in IP
def test_50(self): assert 'False' == Api.requestBlock('test-50', CustomFields=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ip_dotted(self):\r\n return socket.inet_ntoa(struct.pack('>I', self.ip))", "def overlay_ip(ip):\n return \"192.168.{}.{}\".format( *ip.split(\".\")[2:])", "def defangIPaddr(address):\n address_as_list = list(address)\n length_of_address = len(address_as_list)\n for i in range(length_of_a...
[ "0.72598726", "0.66860425", "0.6652515", "0.6595804", "0.6511511", "0.6166377", "0.6115366", "0.6104999", "0.6004845", "0.5908386", "0.58287203", "0.5827624", "0.5757114", "0.56950575", "0.56721663", "0.55715144", "0.55706215", "0.556447", "0.5561804", "0.55530185", "0.545165...
0.0
-1
Send null value in Description
def test_51(self): assert 'False' == Api.requestBlock('test-51', CustomFields=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _description(self):\n return None", "def shortDescription(self):\n return None", "def get_description_value(obj):\n desc = None if obj is None else obj.GetObjectDescription()\n if desc == \"<nil>\":\n desc = None\n return desc", "def get_is_null_label(self):\n return ...
[ "0.71918756", "0.7042839", "0.66677594", "0.6598743", "0.65510976", "0.65307844", "0.65307844", "0.6500336", "0.6500336", "0.6499697", "0.64579976", "0.6434815", "0.64035326", "0.64035326", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0...
0.0
-1
Send null value in Cheque field
def test_52(self): assert 'False' == Api.requestBlock('test-52')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_blank_value_19(field):\n if field.null:\n return None\n else:\n return ''", "def _get_blank_value_18(field):\n if field.null:\n return None\n else:\n return field.value_to_string(None)", "def none_to_empty(data):\n return data if data is not None ...
[ "0.6952513", "0.69504404", "0.6532988", "0.6294436", "0.6274683", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", ...
0.0
-1
Get a specific role by id
def get(self, uuid): logger.info("Get a specific role by Id", data=uuid) role = Role.query.get(uuid) return role_schema.jsonify(role)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, role_id):\n # Right now the only way is to list them all, then iterate.\n # Perhaps a filter or new endpoint would be useful here.\n roles = self.list()\n for role in roles:\n if role.id == role_id:\n return role\n raise exc.HTTPNotFound()"...
[ "0.845486", "0.8306357", "0.8198853", "0.81536174", "0.8034468", "0.8029377", "0.77602434", "0.77323145", "0.7709774", "0.7558279", "0.7468724", "0.7445049", "0.7390456", "0.7355343", "0.7096366", "0.70292366", "0.6925993", "0.68034214", "0.6687161", "0.66808903", "0.66696924...
0.7384984
13
Initializes MissingDictKeys with an error message.
def __init__(self, msg: str): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _initialize_error_dictionaries(self):\n for task_id in self.task_ids.keys():\n self.training_errors[task_id] = []\n self.validation_errors[task_id] = []", "def test_missing_mandatory(self):\n try:\n CollectorUpdate()\n self.assertFalse(\"RuntimeError ...
[ "0.6115417", "0.6019149", "0.5846607", "0.58035153", "0.56639206", "0.56505346", "0.56354576", "0.56103414", "0.5606163", "0.5584499", "0.55501634", "0.5543524", "0.55091983", "0.55001175", "0.54870623", "0.5441635", "0.54101694", "0.5374049", "0.5367004", "0.5310237", "0.529...
0.0
-1
Initializes MissingGraphicSettings with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_invalidated(self):\n # GTK Settings for evogtk\n self.set_property('image',self.__errorimg)", "def init_load_working_config(self):\n try:\n self.currentconfig = remgeom.load(mustexist=True)\n except remgeom.RemGeomError:\n QtWidgets.QMessageBox.informatio...
[ "0.5554523", "0.52968895", "0.527939", "0.521656", "0.521091", "0.51314735", "0.50853", "0.5000523", "0.4994115", "0.49844408", "0.49832064", "0.49832064", "0.49832064", "0.49656707", "0.49567476", "0.4927834", "0.49192548", "0.49177274", "0.4886028", "0.48816097", "0.4875174...
0.0
-1
Initializes MissingGraphicField with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, message=\"\"):\n super(ValidationError, self).__init__(message)", "def __init__(self, msg):\n super(F5CcclSchemaError, self).__init__(msg)\n self.msg = 'Schema provided is invalid: ' + msg", "def test_model_custom_field_editing_attribute_missing(self):\n\n try:\n ...
[ "0.56805265", "0.56665206", "0.5585767", "0.5569392", "0.5561353", "0.5527256", "0.55132455", "0.54539883", "0.5443838", "0.5439777", "0.54118824", "0.54014623", "0.54014623", "0.54014623", "0.53982174", "0.5364648", "0.5359602", "0.5337795", "0.5311328", "0.5287791", "0.5287...
0.0
-1
Initializes FontNotFound with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadSystemFont(name, size):\n\n try:\n f = pygame.font.SysFont(name,size)\n except error, message:\n print \"Cannot load font: \", name\n raise SystemExit, message\n return f", "def loadDefaultFont(size):\n\n try:\n f = pygame.font.Font(None,size)\n except error, me...
[ "0.6292298", "0.58811384", "0.58719945", "0.5639031", "0.5628314", "0.5543856", "0.54289997", "0.5320911", "0.53068715", "0.5306131", "0.5293901", "0.5212471", "0.52075624", "0.5191999", "0.5174318", "0.5155786", "0.51508164", "0.5130566", "0.5123885", "0.5118684", "0.5100034...
0.0
-1
Initializes InvalidColorFormat with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_color__int_arg_invalid(self):\n with self.assertRaises(ValueError):\n color = pygame.Color(0x1FFFFFFFF)", "def throwColorError(type, r,g,b):\n\t\tif not (r >= 0): \n\t\t\tError.wrong_color_number(type, r)\n\t\telif not (g >= 0):\n\t\t\tError.wrong_color_number(type, g)\n\t\telse:\n\t\t...
[ "0.6307895", "0.5999552", "0.5958542", "0.56025517", "0.55953807", "0.5590387", "0.5570816", "0.55706704", "0.5564297", "0.55582446", "0.55362266", "0.55183357", "0.5498367", "0.5497162", "0.54941034", "0.5482115", "0.5468498", "0.5452355", "0.5448693", "0.54256946", "0.54187...
0.0
-1
Initializes InvalidFormatOption with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _invalid_option_error(self, option_name):\n msg = \"'{}' is not a valid option for the '{}' section.\".format(option_name, self._SECTION_NAME)\n raise ValueError(msg)", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended...
[ "0.59847456", "0.5878398", "0.5871973", "0.5698404", "0.5652391", "0.5610471", "0.5598633", "0.5526274", "0.5502418", "0.5481143", "0.54788643", "0.54560614", "0.5371777", "0.53679717", "0.53327733", "0.53201747", "0.52955824", "0.52891856", "0.5285651", "0.5220381", "0.51965...
0.0
-1
Initializes InvalidFieldLength with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_validation_fails_with_invalid_field_length(self):\n\n result = LandCompensationLandSoldValidator.validate(INVALID_FIELD_LENGTH, INVALID_FIELD_LENGTH)\n self.assertEqual(2, len(result.errors))\n self.assertEqual('Answer too long', result.errors['land-sold-description'].summary_message)...
[ "0.6513797", "0.6197296", "0.61887497", "0.6181867", "0.61449176", "0.60927874", "0.60907245", "0.6026868", "0.6024588", "0.59886456", "0.59638536", "0.5922339", "0.5841489", "0.5829803", "0.5813546", "0.57707214", "0.570892", "0.570892", "0.5679656", "0.5664138", "0.5654813"...
0.0
-1
Initializes InvalidTweetName with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, data_file_name: str, message: str) -> None:\n\n super().__init__(\n \"Invalid data error. \"\n \"The file '{}' contained data of the wrong format: {}\".format(\n data_file_name, message\n )\n )", "def test_fails_on_no_name(self):\n ...
[ "0.6010401", "0.5765059", "0.54735583", "0.544012", "0.54245514", "0.54195553", "0.5391037", "0.5378705", "0.53236824", "0.53205895", "0.5272788", "0.52640235", "0.524458", "0.52370137", "0.5229564", "0.5201824", "0.5157976", "0.51524526", "0.51459634", "0.51440966", "0.51425...
0.0
-1
Initializes InvalidUsername with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, username: str) -> None:\n try:\n check_string(username, 'username', 'a-z0-9', 100)\n except IllegalParameterError as e:\n raise IllegalUsernameError(e.message) from e\n self.name = username", "def username_error(self, msg):\n raise NotImplement...
[ "0.7104705", "0.6829881", "0.6761184", "0.6483541", "0.6251707", "0.6137439", "0.60849506", "0.6066154", "0.60328996", "0.60174", "0.5974511", "0.59486985", "0.59322745", "0.59095454", "0.5881767", "0.5851913", "0.58311576", "0.5790885", "0.5769815", "0.57687426", "0.57497245...
0.0
-1
Initializes InvalidUserTag with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error(tag, message=None):\n Log._post(\"error\", tag, message)", "def __init__(self, message=\"\"):\n super(ValidationError, self).__init__(message)", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def __init__(self, mess...
[ "0.56462985", "0.54964924", "0.536261", "0.5255886", "0.52409583", "0.5214799", "0.51638514", "0.51309276", "0.512227", "0.5116152", "0.50880295", "0.5065294", "0.50539815", "0.5043654", "0.50230527", "0.5009636", "0.49937543", "0.499186", "0.49844167", "0.49798706", "0.49764...
0.0
-1
Initializes InvalidProfilePicturePath with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_user_profile_picture_invalid_image_fails(self):\n image_upload_url = PROCEDURE_URL\n\n payload = {\n 'name': 'temp',\n 'speciality': [self.speciality.pk],\n 'image': 'invalid image',\n 'overview': 'bla bla bla'\n }\n\n res = self.clie...
[ "0.5651921", "0.5624119", "0.56222874", "0.5336274", "0.52178127", "0.52178127", "0.52178127", "0.51103395", "0.5086884", "0.5018844", "0.49850813", "0.49842182", "0.49580494", "0.49038544", "0.49034742", "0.48773718", "0.48606312", "0.48144424", "0.47821972", "0.4751558", "0...
0.0
-1
Initializes InvalidTweetText with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_from_text(self, text):\n self.code = \"Error\"\n self.message = text", "def set_error(self, code: Optional[int] = None, text: Optional[str] = None) -> None:\n if code is not None:\n self.error_code = code\n if text is not None:\n self.error_text = text"...
[ "0.6875406", "0.5868215", "0.5819681", "0.5728066", "0.562525", "0.55098635", "0.5487254", "0.54616547", "0.54316974", "0.5398727", "0.5387163", "0.5380475", "0.53396827", "0.53311336", "0.5290807", "0.52907974", "0.52791065", "0.525219", "0.525219", "0.5239347", "0.5238812",...
0.0
-1
Performs the scaled all_reduce operation on the provided tensors. The input tensors are modified inplace. Currently supports only the sum reduction operator. The reduced values are scaled by the inverse size of the process group.
def scaled_all_reduce(tensors: List[Tensor], num_gpus: int) -> List[Tensor]: # There is no need for reduction in the single-proc case if num_gpus == 1: return tensors # Queue the reductions reductions = [] for tensor in tensors: reduction = torch.distributed.all_reduce(tensor, async_op=True) reductions.append(reduction) # Wait for reductions to finish for reduction in reductions: reduction.wait() # Scale the results for tensor in tensors: tensor.mul_(1.0 / num_gpus) return tensors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allreduce_hook(state: AllReduceState, grad: torch.Tensor):\n if state.gradient_predivide_factor > 1:\n grad.div_(state.gradient_predivide_factor)\n dist.all_reduce(grad, group=state.process_group)\n if state.gradient_postdivide_factor > 1:\n grad.div_(state.gradient_postdivide_factor)", ...
[ "0.6388613", "0.60503924", "0.5885183", "0.5827911", "0.5778461", "0.55323684", "0.5511012", "0.5458916", "0.54437757", "0.5417451", "0.5380227", "0.5299009", "0.5289442", "0.52552223", "0.52131027", "0.5203694", "0.5203694", "0.5196382", "0.515265", "0.5149693", "0.5122873",...
0.8066509
0
Computes precise BN stats on training data.
def update_bn_stats( model: nn.Module, loader: DataLoader, num_samples: int = 8192, logger: Optional[Union[logging.Logger, str]] = None) -> None: if is_model_wrapper(model): model = model.module # get dist info rank, world_size = mmengine.dist.get_dist_info() # Compute the number of mini-batches to use, if the size of dataloader is # less than num_iters, use all the samples in dataloader. num_iter = num_samples // (loader.batch_size * world_size) num_iter = min(num_iter, len(loader)) # Retrieve the BN layers bn_layers = [ m for m in model.modules() if m.training and isinstance(m, (_BatchNorm)) ] if len(bn_layers) == 0: print_log('No BN found in model', logger=logger, level=logging.WARNING) return print_log( f'{len(bn_layers)} BN found, run {num_iter} iters...', logger=logger) # Finds all the other norm layers with training=True. other_norm_layers = [ m for m in model.modules() if m.training and isinstance(m, (_InstanceNorm, GroupNorm)) ] if len(other_norm_layers) > 0: print_log( 'IN/GN stats will not be updated in PreciseHook.', logger=logger, level=logging.INFO) # Initialize BN stats storage for computing # mean(mean(batch)) and mean(var(batch)) running_means = [torch.zeros_like(bn.running_mean) for bn in bn_layers] running_vars = [torch.zeros_like(bn.running_var) for bn in bn_layers] # Remember momentum values momentums = [bn.momentum for bn in bn_layers] # Set momentum to 1.0 to compute BN stats that reflect the current batch for bn in bn_layers: bn.momentum = 1.0 # Average the BN stats for each BN layer over the batches if rank == 0: prog_bar = ProgressBar(num_iter) for data in itertools.islice(loader, num_iter): data = model.data_preprocessor(data, False) model(**data) for i, bn in enumerate(bn_layers): running_means[i] += bn.running_mean / num_iter running_vars[i] += bn.running_var / num_iter if rank == 0: prog_bar.update() # Sync BN stats across GPUs (no reduction if 1 GPU used) running_means = scaled_all_reduce(running_means, world_size) running_vars = scaled_all_reduce(running_vars, world_size) # Set BN stats and restore original momentum values for i, bn in enumerate(bn_layers): bn.running_mean = running_means[i] bn.running_var = running_vars[i] bn.momentum = momentums[i]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_precise_bn_stats(model, loader):\n # Compute the number of minibatches to use\n num_iter = min(cfg.BN.NUM_SAMPLES_PRECISE // loader.batch_size, len(loader))\n # Retrieve the BN layers\n bns = [m for m in model.modules() if isinstance(m, torch.nn.BatchNorm2d)]\n # Initialize stats storage...
[ "0.6964537", "0.6422349", "0.63577896", "0.6318661", "0.6236532", "0.6216655", "0.6173939", "0.60070693", "0.59611785", "0.5951854", "0.58218586", "0.5805479", "0.5786752", "0.57796615", "0.57740426", "0.57325435", "0.57291657", "0.571751", "0.56999123", "0.5690688", "0.56904...
0.6554783
1
Calculate prcise BN and broadcast BN stats across GPUs.
def after_train_epoch(self, runner: Runner) -> None: # if use `EpochBasedTrainLoop``, do perform precise every # `self.interval` epochs. if isinstance(runner.train_loop, EpochBasedTrainLoop) and self.every_n_epochs( runner, self.interval): self._perform_precise_bn(runner)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_precise_bn_stats(model, loader):\n # Compute the number of minibatches to use\n num_iter = min(cfg.BN.NUM_SAMPLES_PRECISE // loader.batch_size, len(loader))\n # Retrieve the BN layers\n bns = [m for m in model.modules() if isinstance(m, torch.nn.BatchNorm2d)]\n # Initialize stats storage...
[ "0.6702836", "0.60820144", "0.57397383", "0.5513961", "0.5449203", "0.5389235", "0.53726816", "0.5317791", "0.5255269", "0.5254749", "0.5192296", "0.517288", "0.51403135", "0.5121367", "0.51208645", "0.51208645", "0.511794", "0.51084936", "0.5106065", "0.5099034", "0.5092202"...
0.0
-1
Calculate prcise BN and broadcast BN stats across GPUs.
def after_train_iter(self, runner, batch_idx: int, data_batch: DATA_BATCH = None, outputs: Optional[dict] = None) -> None: # if use `IterBasedTrainLoop``, do perform precise every # `self.interval` iters. if isinstance(runner.train_loop, IterBasedTrainLoop) and self.every_n_train_iters( runner, self.interval): self._perform_precise_bn(runner)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_precise_bn_stats(model, loader):\n # Compute the number of minibatches to use\n num_iter = min(cfg.BN.NUM_SAMPLES_PRECISE // loader.batch_size, len(loader))\n # Retrieve the BN layers\n bns = [m for m in model.modules() if isinstance(m, torch.nn.BatchNorm2d)]\n # Initialize stats storage...
[ "0.6702836", "0.60820144", "0.57397383", "0.5513961", "0.5449203", "0.5389235", "0.53726816", "0.5317791", "0.5255269", "0.5254749", "0.5192296", "0.517288", "0.51403135", "0.5121367", "0.51208645", "0.51208645", "0.511794", "0.51084936", "0.5106065", "0.5099034", "0.5092202"...
0.0
-1
Computes the time_steps/ctc_input_length after convolution.
def compute_length_after_conv(max_time_steps, ctc_time_steps, input_length): max_time_steps = tf.cast(max_time_steps, dtype=tf.float32) ctc_input_length = tf.cast(tf.multiply(input_length, ctc_time_steps), dtype=tf.float32) return tf.cast(tf.floordiv(ctc_input_length, max_time_steps), dtype=tf.int32)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_length_after_conv(max_time_steps, ctc_time_steps, input_length) -> tf.Tensor:\n return tf.to_int32(tf.floordiv(\n tf.to_float(tf.multiply(input_length, ctc_time_steps)), tf.to_float(max_time_steps)))", "def _calc_ctc_input_length(args):\n # py2 needs explicit tf import for keras La...
[ "0.73321956", "0.72471476", "0.60040087", "0.58424336", "0.5774148", "0.57553667", "0.5680954", "0.56687814", "0.5620562", "0.55967027", "0.5519298", "0.5454149", "0.5431821", "0.5431813", "0.54018116", "0.5376901", "0.5364593", "0.5363788", "0.53633916", "0.52863103", "0.525...
0.7509102
0
Computes the ctc loss for the current batch of predictions.
def ctc_loss(label_length, ctc_input_length, labels, probs): label_length = tf.cast(tf.squeeze(label_length), dtype=tf.int32) ctc_input_length = tf.cast(tf.squeeze(ctc_input_length), dtype=tf.int32) sparse_labels = tf.cast(tf.keras.backend.ctc_label_dense_to_sparse(labels, label_length), dtype=tf.int32) y_pred = tf.log(tf.transpose(probs, perm=[1, 0, 2]) + tf.keras.backend.epsilon()) return tf.expand_dims(tf.nn.ctc_loss(labels=sparse_labels, inputs=y_pred, sequence_length=ctc_input_length), axis=1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_objectives(self, predictions, batch, stage):\n\n p_ctc, wav_lens = predictions\n\n ids = batch.id\n tokens_eos, tokens_eos_lens = batch.tokens_eos\n tokens, tokens_lens = batch.tokens\n\n loss = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)\n\n if...
[ "0.7081912", "0.70582753", "0.6970499", "0.6948209", "0.69290686", "0.68720865", "0.6859095", "0.67881185", "0.65927947", "0.6434992", "0.6425474", "0.64251554", "0.6351377", "0.6332072", "0.619035", "0.6178352", "0.61739546", "0.6164014", "0.61629915", "0.61627495", "0.61150...
0.68982947
5
Evaluate the model performance using WER anc CER as metrics.
def evaluate_model(estimator: es.Estimator, speech_labels: List[str], entries, input_fn_eval) -> Dict[str, float]: # Get predictions predictions = estimator.predict(input_fn=input_fn_eval) # Get probabilities of each predicted class probs = [pred["probabilities"] for pred in predictions] num_of_examples = len(probs) targets = [entry[1] for entry in entries] # The ground truth transcript total_wer, total_cer = 0., 0. greedy_decoder = decoder.DeepSpeechDecoder(speech_labels, blank_index=28) for prob, target in zip(probs, targets): decode = greedy_decoder.decode(prob) total_cer += greedy_decoder.cer(decode, target) total_wer += greedy_decoder.wer(decode, target) total_cer /= num_of_examples total_wer /= num_of_examples global_step = estimator.get_variable_value(tf.GraphKeys.GLOBAL_STEP) eval_results = { _WER_KEY: total_wer, _CER_KEY: total_cer, tf.GraphKeys.GLOBAL_STEP: global_step } return eval_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(model, train_corpus, test_coprus, vocab=idx2word,\r\n num_docs_test=num_docs_test, tc=tc, td=td,\r\n eval_batch_size=_eval_batch_size,\r\n vocab_size=vocab_size,\r\n bow_norm=bow_norm):\r\n\r\n model.eval() # set model in evaluation mode\r\n with ...
[ "0.61214685", "0.6103847", "0.6043671", "0.59896857", "0.5839277", "0.5747045", "0.5735802", "0.5731427", "0.57268673", "0.5685036", "0.56741154", "0.56643283", "0.5655421", "0.565319", "0.56433403", "0.56417257", "0.56286067", "0.56258726", "0.56154364", "0.5613228", "0.5611...
0.5991093
3
Define model function for deep speech model.
def model_fn(features: Dict, labels, mode, params: Dict): global FLAGS num_classes = params["num_classes"] input_length = features["input_length"] label_length = features["label_length"] features = features["features"] # Create model model = deep_speech_model.DeepSpeech2( num_rnn_layers=FLAGS.rnn_hidden_layers, rnn_type=FLAGS.rnn_type, is_bidirectional=FLAGS.is_bidirectional, rnn_hidden_size=FLAGS.rnn_hidden_size, num_classes=num_classes, use_bias=FLAGS.use_bias) # predict mode if mode == es.ModeKeys.PREDICT: logits = model(features, training=False) predictions = { "logits": logits, "classes": tf.argmax(logits, axis=2), "probabilities": tf.nn.softmax(logits) } return es.EstimatorSpec(mode=mode, predictions=predictions) # train / eval mode logits = model(features, training=True) probs = tf.nn.softmax(logits) ctc_input_length = compute_length_after_conv(tf.shape(features)[1], tf.shape(probs)[1], input_length) loss = tf.reduce_mean( ctc_loss(label_length=label_length, ctc_input_length=ctc_input_length, labels=labels, probs=probs)) opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) global_step = tf.train.get_or_create_global_step() minimize_op = opt.minimize(loss, global_step=global_step) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) train_op = tf.group(minimize_op, update_ops) return es.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_model_fn(self):", "def build_model():", "def model_fn(self, features, labels, mode, params, config):\n raise NotImplementedError()", "def load_deepspeech(model_name):\n\n # For reference:\n # from deepspeech_pytorch.model import DeepSpeech\n # from torch.utils.model_zoo import load_...
[ "0.71115047", "0.60863584", "0.60634947", "0.6042872", "0.5957492", "0.589929", "0.5818367", "0.5788677", "0.5773995", "0.57248014", "0.57248014", "0.57248014", "0.57248014", "0.57248014", "0.57153296", "0.56870234", "0.5674797", "0.56720144", "0.5628486", "0.5593021", "0.558...
0.5734031
9
Generate a speech dataset.
def generate_dataset(data_dir: str, partition: str) -> dataset.DeepSpeechDataset: global FLAGS audio_conf = dataset.AudioConfig( sample_rate=FLAGS.sample_rate, window_ms=FLAGS.window_ms, stride_ms=FLAGS.stride_ms, normalize=True) data_conf = dataset.DatasetConfig( audio_config=audio_conf, data_path=data_dir, vocab_file_path=FLAGS.vocab_file, sortagrad=FLAGS.sortagrad) speech_dataset = dataset.DeepSpeechDataset(dataset_config=data_conf, partition=partition, seed=FLAGS.seed) return speech_dataset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i...
[ "0.64441484", "0.63849944", "0.62619036", "0.6223697", "0.6217612", "0.6186893", "0.61415535", "0.61275953", "0.6118419", "0.6101546", "0.606264", "0.6042652", "0.6013309", "0.59911335", "0.5944916", "0.5930794", "0.5930794", "0.5919748", "0.59102166", "0.5889595", "0.5887039...
0.65369
0
Run deep speech training and eval loop.
def run_deep_speech(): global FLAGS tf.set_random_seed(FLAGS.seed) # Data precessing tf.logging.info("Data Processing...") train_speech_dataset = generate_dataset(FLAGS.data_dir, partition="train") eval_speech_dataset = generate_dataset(FLAGS.data_dir, partition="dev") # Number of label classes. Label string is "[a-z]' -" num_classes = len(train_speech_dataset.speech_labels) # not available in 1.4 distribution_strategy = distribution_utils.get_distribution_strategy(num_gpus=FLAGS.num_gpus) run_config = es.RunConfig(train_distribute=distribution_strategy, session_config=get_session_config()) estimator = es.Estimator( model_fn=model_fn, model_dir=FLAGS.model_dir, config=run_config, params={"num_classes": num_classes}) run_params = { "batch_size": FLAGS.batch_size, "train_epochs": FLAGS.train_epochs, "rnn_hidden_size": FLAGS.rnn_hidden_size, "rnn_hidden_layers": FLAGS.rnn_hidden_layers, "rnn_type": FLAGS.rnn_type, "is_bidirectional": FLAGS.is_bidirectional, "use_bias": FLAGS.use_bias } benchmark_logger = logger.get_benchmark_logger() benchmark_logger.log_run_info( model_name="deep_speech", dataset_name="LibriSpeech", run_params=run_params, test_id=FLAGS.benchmark_test_id) train_hooks = hooks_helper.get_train_hooks(FLAGS.hooks, model_dir=FLAGS.model_dir, batch_size=FLAGS.batch_size) per_replica_batch_size = distribution_utils.per_replica_batch_size(FLAGS.batch_size, FLAGS.num_gpus) def input_fn_train(): return train_speech_dataset.input_fn(batch_size=per_replica_batch_size) def input_fn_eval(): return eval_speech_dataset.input_fn(batch_size=per_replica_batch_size) # total_training_cycle = FLAGS.train_epochs // FLAGS.epochs_between_evals total_training_cycle = FLAGS.train_epochs for cycle_index in range(total_training_cycle): tf.logging.info(f"Starting train cycle: {cycle_index + 1} / {total_training_cycle}") # Perform batch_wise dataset shuffling train_speech_dataset.batch_wise_shuffle(FLAGS.batch_size) # Train estimator.train(input_fn=input_fn_train, hooks=train_hooks) # Evaluation tf.logging.info("Starting to evaluate...") eval_results = evaluate_model(estimator, speech_labels=eval_speech_dataset.speech_labels, entries=eval_speech_dataset.entries, input_fn_eval=input_fn_eval) # Log the WER and CER results. benchmark_logger.log_evaluation_result(eval_results) tf.logging.info( f"Iteration {cycle_index + 1}: WER = {eval_results[_WER_KEY]:.2f}, CER = {eval_results[_CER_KEY]:.2f}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n from audio import AudioRecorder\n\n loader = SingleInputLoader(128)\n recorder = AudioRecorder()\n\n with tf.Session() as sess:\n model = create_default_model('record', 128, loader)\n model.restore(sess, 'train/best-weights')\n \n while True:\n print('Listeni...
[ "0.6943727", "0.6593037", "0.6503868", "0.6496803", "0.64616853", "0.64566225", "0.64433736", "0.6342452", "0.6339075", "0.6338328", "0.6310606", "0.6282557", "0.62653345", "0.6262677", "0.6250832", "0.6228017", "0.6227637", "0.6218155", "0.6213814", "0.61772126", "0.6165034"...
0.7373323
0
Factory function for creating a trainer for supervised segmentation models.
def create_supervised_trainer( model, optimizer, loss_fn, prepare_batch, device=None, non_blocking=False, output_transform=lambda x, y, y_pred, loss: {"loss": loss.item()}, ): if device: model.to(device) def _update(engine, batch): model.train() optimizer.zero_grad() x, y, ids, patch_locations = prepare_batch(batch, device=device, non_blocking=non_blocking) y_pred = model(x) y_pred = _upscale_model_output(y_pred, y) loss = loss_fn(y_pred.squeeze(1), y.squeeze(1)) loss.backward() optimizer.step() return output_transform(x, y, y_pred, loss) return Engine(_update)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, trainer):\n self.trainer = trainer", "def trainer_factory(\n algorithm: Algorithm, config_overrides: dict, env: Optional[str] = None\n) -> Trainer:\n ensure_moab_envs_register()\n trainer_cls = algorithm.get_trainer_cls()\n default_config = algorithm.get_default_config()\n ...
[ "0.654834", "0.6447384", "0.636751", "0.6288383", "0.61592156", "0.6154132", "0.6132767", "0.6132448", "0.6092635", "0.59407616", "0.59181774", "0.5789498", "0.5706349", "0.5698838", "0.5692006", "0.56582886", "0.5657751", "0.56492114", "0.56327677", "0.56257826", "0.562098",...
0.5852004
11
Factory function for creating an evaluator for supervised segmentation models.
def create_supervised_evaluator( model, prepare_batch, metrics=None, device=None, non_blocking=False, output_transform=val_transform, ): metrics = metrics or {} if device: model.to(device) def _inference(engine, batch): model.eval() with torch.no_grad(): x, y, ids, patch_locations = prepare_batch(batch, device=device, non_blocking=non_blocking) y_pred = model(x) y_pred = _upscale_model_output(y_pred, x) return output_transform(x, y, y_pred, ids, patch_locations) engine = Engine(_inference) for name, metric in metrics.items(): metric.attach(engine, name) return engine
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_s...
[ "0.71334535", "0.71291125", "0.6881836", "0.6608917", "0.634653", "0.61560285", "0.5989048", "0.5923213", "0.5881174", "0.5840506", "0.58325845", "0.5785326", "0.5723227", "0.57039267", "0.5621444", "0.55563307", "0.5549682", "0.5542596", "0.55241853", "0.53976136", "0.535089...
0.5575173
15
Create and return a stub test.
def CreateStubTest(phases=None): # pylint: disable=invalid-name test_metadata = phase_data.TestMetadata('foo') return phase_data.phase_data(test_metadata, phases or [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CreateStubTest(phases=None, params=None): # pylint: disable=invalid-name\n test_metadata = htftest.TestMetadata('foo')\n # pylint: disable=protected-access\n if params is not None:\n test_metadata._parameter_list = (\n parameters.TestParameterList(params.parameters))\n return htftest.HTFTest(tes...
[ "0.74640894", "0.68726027", "0.65086746", "0.6226904", "0.59744567", "0.5875409", "0.5875409", "0.5869115", "0.5800327", "0.57175136", "0.5705263", "0.5691941", "0.5661378", "0.5658473", "0.5648882", "0.5639574", "0.5630987", "0.5609541", "0.55923575", "0.55062956", "0.549285...
0.7049483
1
Checks whether the parameters sent in the array param correspond to a physical model (i.e. if one of the minima are close to the Higgs v (experimentally verified) and if one of the masses correspond to the Higgs mass (exp. verified as well).
def CheckCouplings(params, verbose=False): l1 = params[0] l2 = params[1] l3 = params[2] gx = params[3] m = model_2f(l1, l2, l3, y_t_interpol(np.log(v/mz)), gx) minima, success = m.findMinimum() #the boolean success is added because we cannot trust the minima if numpy.optimize.minimize has failed if not verbose: tolvevh = 2.0 tolmh = 2.0 condition0 = abs(minima-v) < tolvevh if condition0.any() and success: ddVtot = nd.Hessian(m.Vtot_0T) hess = ddVtot(minima) masses = np.linalg.eigvalsh(hess) #computes masses... positive_condition = masses > 0 if(positive_condition.all()): #we will only check them IF they are positive masses = np.sqrt(np.abs(masses)) condition1 = abs(masses-mh) < tolmh if condition1.any(): stability = m.CheckStability() #we check the stability of the model f = open(file_name, 'a') line0 = str(l1)+' '+str(l2)+' '+str(l3)+' '+str(gx)+' '+str(minima[0])+' '+str(minima[1])+' '+str(masses[0])+' '+str(masses[1]) #we print everything line0 = line0 + ' '+str(stability) f.write(line0+'\n') f.write('-'*90+'\n') f.close() else: """ Just checks the minima of the model m, the masses of the particles and whether it is stable or not Output: prints the information """ print "Minimum at T = 0.0: ", minima, success print "Masses: " ddVtot = nd.Hessian(m.Vtot_0T) hess = ddVtot(minima) print np.sqrt(np.linalg.eigvalsh(hess)) print 'Stable: ', m.CheckStability()==1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matches(self, aModel):\n\n params_bis = list(map(aModel.string_to_param,self.grid_params))\n for param1, param2 in zip(self.params, params_bis):\n if (abs(param1/param2 - 1.0) > eps): return False\n return True", "def check_params(self, model_params):\n\n comm = self.co...
[ "0.63886017", "0.6310672", "0.6199619", "0.6192125", "0.6176082", "0.61694586", "0.613159", "0.6075696", "0.60116005", "0.60115665", "0.5992061", "0.5970435", "0.59513295", "0.59481025", "0.5932262", "0.5915817", "0.58968824", "0.5866027", "0.58604413", "0.58546996", "0.58375...
0.53065354
80
We get points to check, and parallelize the checking of the points. This is the function that does all the work in terms of exploring the parameter space.
def FindCouplings(): l1v = np.linspace(l1min, l1max, num=48) l2v = np.logspace(np.log10(l2min), np.log10(l2max), num=48) l3v = np.linspace(l3min, l3max, num=48) gxv = np.linspace(gxmin, gxmax, num=48) p = multiprocessing.Pool() f = open(file_name, 'w+') line = '|l1--l2--l3--gx--minima--mass1--mass2--stable|' f.write(line+'\n') f.write('-'*90+'\n') f.close() for l1 in l1v: for l2 in l2v: start_time_loop = time.time() params = cartesian((l1, -l2, l3v, gxv)) print params.shape p.map(CheckCouplings, params) print("--- Loop has taken: %s seconds ---" % (time.time() - start_time_loop))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_parallel_computing(self, example_staypoints):\n sp = example_staypoints\n\n # without parallel computing code\n sp_ori, locs_ori = sp.as_staypoints.generate_locations(\n method=\"dbscan\", epsilon=10, num_samples=2, distance_metric=\"haversine\", agg_level=\"user\", n_jobs=...
[ "0.63042134", "0.6114993", "0.60271513", "0.57289815", "0.5670463", "0.56503326", "0.5637356", "0.56303936", "0.56074744", "0.55963916", "0.55750257", "0.5551854", "0.55409104", "0.55060744", "0.5505066", "0.5496916", "0.5489299", "0.5472598", "0.5471783", "0.5469413", "0.545...
0.0
-1
Genera una cadena aleatoria de caracteres, que puede contener numeros, letras mayusculas y letras minusculas, el primer caracter sera siempre una letra mayuscula.
def random_string(length=1, uppercase=False, lowercase=False): characters = digits code = str() if uppercase: characters += ascii_uppercase if lowercase: characters += ascii_lowercase while len(code) < length: character = choice(characters) if len(code) == 0: character = choice(ascii_uppercase) code += character return code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intercambiar_mayusculas_minusculas(cad):\n\n nueva_cad = \"\"\n\n for i in cad:\n if ord(i) < 64 or ord(i) > 122:\n nueva_cad = nueva_cad + i\n elif ord(i) < 97:\n nueva_cad = nueva_cad + chr(ord(i) + 32)\n else:\n nueva_cad = nueva_cad + chr(ord(i) -...
[ "0.6781998", "0.64925814", "0.6049585", "0.5974472", "0.5921979", "0.58175975", "0.58117867", "0.58117867", "0.5776275", "0.5770409", "0.57647777", "0.57624525", "0.5754226", "0.57396114", "0.573307", "0.5721849", "0.5701658", "0.5699352", "0.5680936", "0.56807214", "0.566013...
0.0
-1
Job handler. Returns True on success, False on failure or raise exception. Depending on result, on_success, on_failure or on_exception
def handler(self, *args, **kwargs): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _handle_success(self, result_ttl: int, pipeline: 'Pipeline'):\n # self.log.debug('Setting job %s status to finished', job.id)\n self.set_status(JobStatus.FINISHED, pipeline=pipeline)\n # Result should be saved in job hash only if server\n # doesn't support Redis streams\n inc...
[ "0.6380917", "0.630514", "0.6203368", "0.6140142", "0.6020525", "0.5998409", "0.5967726", "0.5962727", "0.59392214", "0.59327286", "0.59168214", "0.59048873", "0.5869822", "0.5861403", "0.58348876", "0.57895267", "0.57549196", "0.5748723", "0.5741472", "0.5715548", "0.5704743...
0.0
-1
Called when handler returns True
def on_success(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handler(self, *args, **kwargs):\n return True", "def on_success(self) -> None:", "def handle(self) -> None:", "def ok_callback(self):\n pass", "def event_handler(self, response):\n pass", "def justhandle(self, rawdata):\r\n\r\n return self.__handler(rawdata)", "def r...
[ "0.8078143", "0.6831724", "0.6803624", "0.6726275", "0.67239153", "0.67041284", "0.66923994", "0.6612446", "0.659153", "0.6590828", "0.6533328", "0.6532542", "0.6480587", "0.6457898", "0.63366073", "0.6306607", "0.63060313", "0.6250639", "0.62399155", "0.62257975", "0.6218537...
0.67066246
5
Called when handler returns False
def on_failure(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handler(self, *args, **kwargs):\n return True", "def dummy_callback_handler(self, ret):\n pass", "def unhandled(self):\n return True", "def set_as_handled(self):\n self.not_handled = False", "async def unhandled_response(self, pkt, source):\n if False:\n yi...
[ "0.72471005", "0.70438576", "0.690345", "0.6730309", "0.65689135", "0.65600246", "0.65534455", "0.6529977", "0.64647585", "0.62942225", "0.6265559", "0.62107646", "0.61744297", "0.614441", "0.61417645", "0.61417645", "0.6119187", "0.6103069", "0.6037326", "0.59561646", "0.591...
0.5970655
19
Called when handler raises an error
def on_exception(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error(self, handler):\n pass", "def handle_err(self):\n pass", "def error_received(self, exc):\n print('Error received:', exc)", "def handle_expt(self):\r\n self._perform_on_error_handling()", "def _call_error_handler(self, event, err, **kwargs):\n if self._on_error_h...
[ "0.8809893", "0.7991302", "0.7537642", "0.7489565", "0.74614865", "0.7459694", "0.74516124", "0.74178", "0.7346542", "0.7311401", "0.73027474", "0.7302706", "0.72894347", "0.72691995", "0.72537273", "0.72459257", "0.72391665", "0.7171314", "0.7167411", "0.7127044", "0.7114514...
0.7198904
17
Called by scheduler to get next run time
def get_schedule(self, status): if status == self.S_LATE and self.delay_interval: return (datetime.datetime.now() + datetime.timedelta( seconds=random.random() * self.delay_interval)) return None # Remove schedule on complete
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_run_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"next_run_time\")", "def next_run_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"next_run_time\")", "def get_curr_exec_time(self):\n if self.type == 'normal':\n try:\n self.cu...
[ "0.777297", "0.777297", "0.6952469", "0.6825533", "0.67416143", "0.6597113", "0.6557749", "0.65416026", "0.65218025", "0.65074617", "0.64740384", "0.64740384", "0.6465585", "0.64539963", "0.6424806", "0.64134806", "0.638315", "0.6380247", "0.63793695", "0.63596106", "0.632563...
0.0
-1
Return managed object instance or id (applicable only when map_task is not None)
def get_managed_object(self): return self.key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def managed_object_id(self):\n o = self._data[\"managed_object\"]\n if type(o) in (int, long):\n return o\n return o.id", "def _get_instance_id(self):\n return self.__instance_id", "def object_id(self) -> Optional[str]:\n return pulumi.get(self, \"object_id\")", "def...
[ "0.6440617", "0.6409251", "0.62016565", "0.62016565", "0.62016565", "0.6111818", "0.60688394", "0.60688394", "0.60688394", "0.60688394", "0.60688394", "0.60688394", "0.5931027", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", ...
0.6038846
12
Return dict containing job's MRT params (applicable only when map_task is not None)
def get_map_task_params(self): return {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_powermax_job_parameters():\n return dict(\n job_id=dict(type='str', required=True)\n )", "def construct_job_params(self, hook: Any) -> dict[Any, Any | None]:\n missing_params = self.required_params - set(self.job_params)\n if missing_params:\n raise AirflowException(...
[ "0.63929814", "0.6278747", "0.6234184", "0.60798496", "0.601743", "0.59797996", "0.5893281", "0.5834644", "0.5736956", "0.5709796", "0.5683366", "0.5677564", "0.559777", "0.5590839", "0.5587835", "0.556893", "0.55425924", "0.5519106", "0.55032396", "0.54824823", "0.54682976",...
0.8269762
0
Get dereference query condition. Called by dereference()
def get_defererence_query(self): return {"id": self.key}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def condition(self) -> global___Expression:", "def condition(self) -> global___Expression:", "def get_predicate(self):\n return self._predicate", "def query (node, grounding, db):\n return db.funcVal(grounding.groundNode(node))", "def condition(self) -> ExpressionNode:\n return self.__cond...
[ "0.5894116", "0.5894116", "0.5690017", "0.56574285", "0.5489223", "0.53180575", "0.52761585", "0.5190257", "0.51548445", "0.5123056", "0.5117761", "0.5093818", "0.5087869", "0.50837064", "0.5081821", "0.50449884", "0.5042481", "0.5036811", "0.5036811", "0.5036811", "0.5007558...
0.49621782
22
Check wrether the job can be launched
def can_run(self): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lantern_check():\n if not app.config.get(\"ENABLE_LANTERN\", False):\n print \"[{x}] Not checking Lantern jobs - interface disabled\".format(x=dates.now())\n return\n print \"[{x}] Checking Lantern jobs\".format(x=dates.now())\n LanternApi.check_jobs()", "def check_env():\n job_file...
[ "0.6642071", "0.66208327", "0.66090924", "0.6534082", "0.6497826", "0.64233065", "0.63549906", "0.6345807", "0.63409173", "0.6323821", "0.63086534", "0.6279424", "0.62567234", "0.62546295", "0.62468517", "0.621095", "0.619517", "0.61762893", "0.6153152", "0.61371064", "0.6133...
0.66355747
1
Return Q(s,a) based on current Q >>> q = TabularQ([0,1,2,3],['b','c']) >>> q.set(0, 'b', 5) >>> q.set(0, 'c', 10) >>> q_star = value(q,0) >>> q_star 10
def value(q, s): # Your code here return max(q.get(s,a) for a in q.actions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getq(self, q=None):\n if q is None:\n return self.q\n elif isvector(q, self.n):\n return getvector(q, self.n)\n else:\n return getmatrix(q, (None, self.n))", "def calc_q_values(self, state):\n return self.q_values_func([state])[0]", "def getQVal...
[ "0.62951297", "0.6088538", "0.6056516", "0.5923299", "0.5866351", "0.5854476", "0.57913595", "0.57575697", "0.57466906", "0.5684224", "0.5621595", "0.55795395", "0.55610085", "0.55451214", "0.54641014", "0.5457697", "0.544126", "0.5425461", "0.5354141", "0.53406006", "0.53403...
0.53853166
18
Return pi(s) based on a greedy strategy. >>> q = TabularQ([0,1,2,3],['b','c']) >>> q.set(0, 'b', 5) >>> q.set(0, 'c', 10) >>> q.set(1, 'b', 2) >>> greedy(q, 0) 'c' >>> greedy(q, 1) 'b'
def greedy(q, s): # Your code here return argmax(q.actions,lambda a:q.get(s,a))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def epsilon_greedy(q, s, eps = 0.5):\n if random.random()<eps:\n return uniform_dist(q.actions).draw()\n else:\n return greedy(q,s)", "def greedy():\n return constant(0)", "def greedy_policy(self):\n return defaultdict(lambda: 0)", "def greedy(initial_state, heuristic, dimension...
[ "0.5804398", "0.5722934", "0.56879693", "0.5585851", "0.55798864", "0.5474166", "0.5431628", "0.5398444", "0.52860045", "0.51859754", "0.51723653", "0.5162084", "0.5161777", "0.5136731", "0.5130759", "0.51121217", "0.5099559", "0.50086784", "0.49864772", "0.49803564", "0.4977...
0.54330295
6
Return an action. >>> q = TabularQ([0,1,2,3],['b','c']) >>> q.set(0, 'b', 5) >>> q.set(0, 'c', 10) >>> q.set(1, 'b', 2) >>> eps = 0. >>> epsilon_greedy(q, 0, eps) greedy 'c' >>> epsilon_greedy(q, 1, eps) greedy 'b'
def epsilon_greedy(q, s, eps = 0.5): if random.random()<eps: return uniform_dist(q.actions).draw() else: return greedy(q,s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eps_greedy(Q, epsilon, num_actions):\n if np.random.uniform(0,1,1) > epsilon:\n action = np.argmax(Q)\n else:\n action = np.random.randint(low=0, high=num_actions)\n \n Q_value = Q[action]\n return action, Q_value", "def epsilon_greedy(Q, epsilon, n_actions, s, train=False):\n ...
[ "0.7148608", "0.64981556", "0.63126504", "0.62986714", "0.62594926", "0.6258449", "0.61423904", "0.6026595", "0.60167265", "0.59709555", "0.5897679", "0.5888596", "0.58737326", "0.58079225", "0.58033264", "0.5781088", "0.5732777", "0.5710433", "0.57000035", "0.5699515", "0.56...
0.6940285
1
The channel does not maintain a queue. The phy/mac must do that.
def __init__(self, layer_delay=_default_layer_delay): self._attached_phys = [] self._layer_delay = layer_delay
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tryToSend( self, channel, value ):\n if self.free:\n self.free = False\n self.writeToSerial( channel, value )\n elif len( self.queue ) > MAX_QUEUE_SIZE:\n raise DCBoxError( 2 )\n else: self.queue.append( ( channel, value ) )", "def on_queue_declared(frame...
[ "0.65623856", "0.6367049", "0.6290647", "0.62499535", "0.6225105", "0.6223285", "0.6223285", "0.61602104", "0.6132086", "0.61052483", "0.60849404", "0.60789603", "0.60609317", "0.6031437", "0.6011403", "0.59847355", "0.59847355", "0.59652287", "0.5922787", "0.59155333", "0.59...
0.0
-1
Attach the given phy layer to the chanel. After a short delay, the phy_layer should receive a `BusyIndication` or `IdleIndication` SDU
def attach(self, phy_layer): self._attached_phys.append(phy_layer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, layer_delay=_default_layer_delay):\n self._attached_phys = []\n self._layer_delay = layer_delay", "def add_layer(self, layer: Union[CommandLayer, Type[CommandLayer]],\n active: Optional[bool] = True):\n if issubclass(layer, CommandLayer):\n layer = la...
[ "0.5359344", "0.51606864", "0.4981618", "0.49739328", "0.49689108", "0.49641663", "0.482585", "0.4784602", "0.47822216", "0.47765702", "0.4733921", "0.4723825", "0.4697315", "0.46468836", "0.46253222", "0.46140853", "0.46140155", "0.46135888", "0.45908052", "0.45793292", "0.4...
0.6782705
0
Detach the given phy_layer from the channel
def detach(self, phy_layer): self._attached_phys.remove(phy_layer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_layer(self, layer: CommandLayer):\n try:\n index = self.command_layers.index(layer)\n except ValueError:\n return\n\n if layer.active: # Transfer the active status to another layer\n if index < 0: # ... to the previous layer in the stack\n ...
[ "0.5844689", "0.5619299", "0.55078894", "0.54105484", "0.5367822", "0.5262961", "0.52497536", "0.51190823", "0.51019", "0.5091018", "0.508095", "0.5041397", "0.5038449", "0.50244045", "0.5019844", "0.49974367", "0.4993187", "0.49843723", "0.4972273", "0.49417922", "0.49223176...
0.84005505
0
delay the SDU by the channel time, then broadcast to all attached phys (including the sender)
def _receive_request(self, sdu): if self._busy: raise RuntimeError("Channel busy") self._busy = True self._propagate(sdu)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def udelay(us: int, /) -> None:", "def declare_sample_delay(self, *args):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_declare_sample_delay(self, *args)", "def declare_sample_delay(self, *args):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_declare_sample_delay(self, *args)", "def bro...
[ "0.5880643", "0.56571025", "0.55984133", "0.5559285", "0.5533061", "0.54948366", "0.54706526", "0.5467734", "0.5466112", "0.54435146", "0.5429417", "0.5395709", "0.53661245", "0.5351861", "0.5349273", "0.53445977", "0.5340805", "0.533384", "0.5331035", "0.5317365", "0.5315561...
0.0
-1
Set up an Unifi Protect Switch.
async def async_setup_platform(hass, config, async_add_entities, _discovery_info=None): upv = hass.data[UPV_DATA]["upv"] coordinator = hass.data[UPV_DATA]["coordinator"] if not coordinator.data: return ir_on = config.get(CONF_IR_ON) if ir_on == "always_on": ir_on = "on" ir_off = config.get(CONF_IR_OFF) if ir_off == "led_off": ir_off = "autoFilterOnly" elif ir_off == "always_off": ir_off = "off" switches = [] for switch_type in config.get(CONF_MONITORED_CONDITIONS): for camera in coordinator.data: switches.append( UnifiProtectSwitch(coordinator, upv, camera, switch_type, ir_on, ir_off) ) async_add_entities(switches, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, coordinator, upv, camera, switch_type, ir_on, ir_off):\n self.coordinator = coordinator\n self.upv = upv\n self._camera_id = camera\n self._camera = self.coordinator.data[camera]\n self._name = \"{0} {1} {2}\".format(\n DOMAIN.capitalize(), SWITCH_TY...
[ "0.681985", "0.6236986", "0.60262203", "0.5953998", "0.574082", "0.5638578", "0.55229837", "0.5518489", "0.54835725", "0.53193676", "0.5306003", "0.529105", "0.5281412", "0.5253174", "0.5229308", "0.5203549", "0.5202759", "0.5167008", "0.51573527", "0.5147676", "0.5139711", ...
0.5260423
13
Initialize an Unifi Protect Switch.
def __init__(self, coordinator, upv, camera, switch_type, ir_on, ir_off): self.coordinator = coordinator self.upv = upv self._camera_id = camera self._camera = self.coordinator.data[camera] self._name = "{0} {1} {2}".format( DOMAIN.capitalize(), SWITCH_TYPES[switch_type][0], self._camera["name"] ) self._unique_id = self._name.lower().replace(" ", "_") self._icon = "mdi:{}".format(SWITCH_TYPES.get(switch_type)[1]) self._ir_on_cmd = ir_on self._ir_off_cmd = ir_off self._camera_type = self._camera["type"] self._attr = SWITCH_TYPES.get(switch_type)[2] self._switch_type = SWITCH_TYPES.get(switch_type)[2] _LOGGER.debug("UnifiProtectSwitch: %s created", self._name) _LOGGER.debug( "UnifiProtectSwitch: IR_ON %s IR_OFF %s", self._ir_on_cmd, self._ir_off_cmd )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, data, camera, switch_type):\n self.data = data\n self._camera_id = camera\n self._camera = self.data.devices[camera]\n self._name = \"{0} {1} {2}\".format(DOMAIN.capitalize(), SWITCH_TYPES[switch_type][0], self._camera[\"name\"])\n self._unique_id = self._name....
[ "0.668283", "0.6222391", "0.6021048", "0.592539", "0.57956624", "0.5783978", "0.5776359", "0.5716881", "0.5700141", "0.56954783", "0.5673299", "0.5581777", "0.55620766", "0.55259913", "0.5523816", "0.5519903", "0.5512638", "0.55101365", "0.5487694", "0.5478698", "0.5470831", ...
0.708648
0
Poll for status regularly.
def should_poll(self): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def poll(self):\n self.poll_function(self.connection)", "def wait_for_status(self, status):\n code = self.instance.state['Code']\n while code != status:\n time.sleep(3)\n self.instance.reload()\n code = self.instance.state['Code']", "def refresh_status() ->...
[ "0.7244833", "0.6906433", "0.6889742", "0.6884497", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68553823", ...
0.66181207
49
Return the name of the device if any.
def name(self): return self._name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def device_name(self) -> Optional[str]:\n return pulumi.get(self, \"device_name\")", "def device_name(self) -> Optional[str]:\n return pulumi.get(self, \"device_name\")", "def name(self):\n return self._device.name", "def name(self):\n return self._device.name", "def name(self):...
[ "0.89734465", "0.89734465", "0.8584627", "0.8584627", "0.8584627", "0.8502048", "0.84944665", "0.84194607", "0.83955586", "0.83955586", "0.8379756", "0.83165", "0.8306566", "0.8161454", "0.81564134", "0.8090303", "0.8087526", "0.8073198", "0.8006941", "0.7999654", "0.79544145...
0.0
-1
Return true if device is on.
def is_on(self): camera = self.coordinator.data[self._camera_id] if self._switch_type == "record_motion": enabled = True if camera["recording_mode"] == TYPE_RECORD_MOTION else False elif self._switch_type == "record_always": enabled = True if camera["recording_mode"] == TYPE_RECORD_ALLWAYS else False else: enabled = True if camera["ir_mode"] == self._ir_on_cmd else False return enabled
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_on(self) -> bool:\n return self._device.is_on", "def is_on(self):\n return self._device.is_on", "def is_on(self):\n return self._device.is_on", "def is_on(self):\n return self._device.is_on", "def is_on(self):\n return self._device.state", "def is_on(self) -> boo...
[ "0.87892145", "0.86683935", "0.86683935", "0.86683935", "0.83668846", "0.8228278", "0.80222344", "0.7944196", "0.7889458", "0.7790521", "0.7790521", "0.76525116", "0.7474403", "0.7448818", "0.74451506", "0.74371916", "0.73949003", "0.7393098", "0.7387312", "0.7380527", "0.736...
0.0
-1
Icon to use in the frontend, if any.
def icon(self): return self._icon
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n ...
[ "0.8216609", "0.8216609", "0.8216609", "0.8216609", "0.8216609", "0.8216609", "0.8216609", "0.8216609", "0.8216609", "0.8216609", "0.8134013", "0.8134013", "0.80181646", "0.7946183", "0.79384285", "0.79384285", "0.79349846", "0.7925307", "0.7891739", "0.78858066", "0.7807477"...
0.75128
40
Return the device state attributes.
def device_state_attributes(self): attrs = {} attrs[ATTR_ATTRIBUTION] = DEFAULT_ATTRIBUTION attrs[ATTR_BRAND] = DEFAULT_BRAND attrs[ATTR_CAMERA_TYPE] = self._camera_type return attrs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def device_state_attributes(self):\r\n return self.attributes", "def device_state_attributes(self):\r\n return self._attributes", "def device_state_attributes(self):\n return self._attrs", "def device_state_attributes(self):\n return self.attr", "def device_state_attributes(self...
[ "0.93173087", "0.9245708", "0.92207897", "0.92147964", "0.92147964", "0.9210798", "0.9210798", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", ...
0.8087728
66
When entity is added to hass.
async def async_added_to_hass(self): self.async_on_remove( self.coordinator.async_add_listener(self.async_write_ha_state) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def async_added_to_hass(self):\n self.hass.data[DOMAIN].add_entity_id(self.entity_id)\n self.hass.data[DOMAIN].add_sensor(self)", "async def async_added_to_hass(self):\n await super().async_added_to_hass()\n self.coordinator.entities.append(self)", "async def async_added_to_ha...
[ "0.79615426", "0.7715403", "0.73743874", "0.72151524", "0.69275665", "0.69275665", "0.6916353", "0.6870173", "0.6820395", "0.6781351", "0.6752743", "0.6735904", "0.67260987", "0.67226434", "0.67097443", "0.6699443", "0.66656816", "0.6656795", "0.66509074", "0.6563511", "0.654...
0.66790694
20
Turn the device on.
async def async_turn_on(self, **kwargs): if self._switch_type == "record_motion": _LOGGER.debug("Turning on Motion Detection") await self.upv.set_camera_recording(self._camera_id, TYPE_RECORD_MOTION) elif self._switch_type == "record_always": _LOGGER.debug("Turning on Constant Recording") await self.upv.set_camera_recording(self._camera_id, TYPE_RECORD_ALLWAYS) else: _LOGGER.debug("Turning on IR") await self.upv.set_camera_ir(self._camera_id, self._ir_on_cmd) await self.coordinator.async_request_refresh()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def turn_on(self, **kwargs):\n self._state = True\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device,'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":1 }', 5)\n if(self._device['type'] == '_REALY2' or s...
[ "0.8281669", "0.7963511", "0.7818252", "0.7757262", "0.7712496", "0.7595535", "0.7588195", "0.7578901", "0.751521", "0.7514764", "0.7510081", "0.75018376", "0.74641955", "0.74501693", "0.7437987", "0.7409389", "0.7357094", "0.73516375", "0.73505986", "0.73295283", "0.7277296"...
0.6726692
56
Turn the device off.
async def async_turn_off(self, **kwargs): if self._switch_type == "ir_mode": _LOGGER.debug("Turning off IR") await self.upv.set_camera_ir(self._camera_id, self._ir_off_cmd) else: _LOGGER.debug("Turning off Recording") await self.upv.set_camera_recording(self._camera_id, TYPE_RECORD_NEVER) await self.coordinator.async_request_refresh()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._de...
[ "0.8548868", "0.8254268", "0.8161882", "0.8142198", "0.8119367", "0.807117", "0.7936932", "0.7904594", "0.7890579", "0.78688365", "0.7838398", "0.7834393", "0.78312963", "0.77885693", "0.77876323", "0.7755004", "0.77309066", "0.77295303", "0.7704694", "0.76726454", "0.7648420...
0.69212335
85
Given an Item Number and library data dict Return the name of the library it's found in, and the symbol name
def findSymbolByItemnum(itemnum, libs_dict): e_itemnum = re.escape(itemnum) for libname, dat in libs_dict.items(): m = re.search(r'^DEF ([^ ]*) .*(?:\n[^\$].+)+\nF ?\d+ "'+e_itemnum+r'".* "Item Number"\n', dat, re.MULTILINE) try: symname = m.group(1) return libname, symname except: continue return None, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_name_with_lib(datablock):\r\n text = datablock.name\r\n if datablock.library:\r\n # text += ' (Lib: \"%s\")' % datablock.library.name\r\n text = \"L \" + text\r\n return text", "def get_symbols(doc, lib):\n\n basename = lib.replace(\".dll\", \"\").lower()\n filename = os.path...
[ "0.6351624", "0.588709", "0.558512", "0.5506981", "0.5491768", "0.5448772", "0.5388454", "0.5337695", "0.53156734", "0.5299146", "0.5281417", "0.5229371", "0.52056694", "0.5185004", "0.51806813", "0.51693463", "0.5163847", "0.51593566", "0.5133286", "0.51013744", "0.5091456",...
0.78984857
0
Test on a data set of songs and known genres.
def test(self, songs, genres): logging.info('Starting testing.') num_matches = 0 confusion_matrix = ConfusionMatrix(genres) for song, actual_genre in zip(songs, genres): predicted_genre = self.classify(song) logging.info('Actual genre: {}, predicted genre: {}'.format(actual_genre, predicted_genre)) confusion_matrix.add_genres(actual_genre, predicted_genre) if actual_genre == predicted_genre: num_matches += 1 return num_matches, confusion_matrix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_songs_by_genre(self, track_elms, service_config, request):\n genre_id = uuid.UUID(avalon.compat.to_uuid_input('c12d2a49-d086-43d6-953d-b870deb24228'))\n service_config.track_store.get_by_genre.return_value = track_elms\n service_config.id_cache.get_genre_id.return_value = genre_id...
[ "0.6808168", "0.65337896", "0.63130933", "0.61084867", "0.5960939", "0.59207577", "0.57667565", "0.5645742", "0.56377244", "0.5593208", "0.55835515", "0.5554038", "0.5543787", "0.551411", "0.55078", "0.5503138", "0.5479609", "0.54730666", "0.54501826", "0.5445314", "0.5428625...
0.65475684
1
Classify a particular song.
def classify(self, song): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def classify_playlist(classifier, playlist_feature_data, playlist_data_dict):\n\n clf, clf_name = classifier\n\n playlist_features, playlist_song_ids = playlist_feature_data\n # run classifier on playlist songs\n results = clf.predict_all(playlist_features)\n liked_songs = [playlist_song_ids[i] for ...
[ "0.6249288", "0.5961244", "0.57986456", "0.56695867", "0.5515968", "0.5402372", "0.53549737", "0.52264583", "0.52200854", "0.52137727", "0.5092305", "0.50832874", "0.50748336", "0.50739837", "0.5049888", "0.5019744", "0.5016217", "0.50088155", "0.49824685", "0.49791542", "0.4...
0.82713926
0
Predict the genres of all songs in the given directory, saving this data in a file. Note that the genres of the songs are not known beforehand.
def predict_directory(self, directory_name, result_file_name): logging.info('Starting prediction.') with open(result_file_name, 'ab') as f: writer = csv.writer(f) writer.writerow(('id', 'category')) for song_id in os.listdir(directory_name): song = pd.read_csv('{}{}'.format(directory_name, song_id)).values predicted_genre = self.classify(song) logging.info('Predicted genre: {}'.format(predicted_genre)) writer.writerow((song_id, predicted_genre))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveTmdbGenres():\n \n listGenres = tmdb.Genres().list()[\"genres\"]\n \n genres = { _format(g[\"name\"]):i for i, g in enumerate(listGenres) }\n\n np.save(GENRES_FILE, np.asarray([genres]))", "def random_by_genre_list(self):\n\n for genre in self.connection.walk_genres():\n ...
[ "0.60163605", "0.60031205", "0.59263384", "0.588661", "0.58185124", "0.58125246", "0.5753232", "0.5715541", "0.56986034", "0.56940067", "0.56717575", "0.5634953", "0.5573612", "0.5497498", "0.5478532", "0.5429409", "0.5406123", "0.53959244", "0.53919566", "0.5375165", "0.5352...
0.6846945
0
Compute the euclidean distance between two numpy vectors
def euclidean_distance(a, b): return np.linalg.norm(a - b)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def euclidean_distance(vec1, vec2):\n return numpy.linalg.norm(vec1 - vec2)", "def euclidean_distance_no_np(vector_1: Vector, vector_2: Vector) -> VectorOut:\n return sum((v1 - v2) ** 2 for v1, v2 in zip(vector_1, vector_2)) ** (1 / 2)", "def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n ...
[ "0.8293001", "0.82679814", "0.8241531", "0.8164159", "0.8139716", "0.8110727", "0.8093996", "0.8087907", "0.80677295", "0.7933276", "0.79193497", "0.7915708", "0.7905538", "0.7787108", "0.7761666", "0.7725438", "0.76735663", "0.7641406", "0.75959605", "0.75740075", "0.7565347...
0.8229706
3
Should call Module.train() on each torch.nn.Module, if present
def train(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self) -> None:\n for module in self.modules.values():\n module.train()\n return", "def to_train(self):\n for _m in self.modules.values():\n _m.train()", "def set_train(self):\n for m in self.models.values():\n m.train()", "def train(self):\n ...
[ "0.8094452", "0.72317255", "0.69105303", "0.6737151", "0.6711554", "0.6629512", "0.6621208", "0.6594015", "0.65665567", "0.6528837", "0.64615065", "0.6457623", "0.6428052", "0.641126", "0.64072895", "0.6387566", "0.6358818", "0.634253", "0.63398683", "0.63197255", "0.63110536...
0.6031432
65
Should call Module.eval() on each torch.nn.Module, if present
def eval(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval(self) -> None:\n for module in self.modules.values():\n module.eval()\n return", "def to_eval(self):\n for _m in self.modules.values():\n _m.eval()", "def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = g...
[ "0.7058148", "0.6562479", "0.6410497", "0.6410497", "0.64048153", "0.6211592", "0.61744463", "0.6138237", "0.60468954", "0.5828812", "0.5767713", "0.57668567", "0.57438034", "0.57431364", "0.57200557", "0.5707766", "0.5693356", "0.56603515", "0.56447244", "0.5637777", "0.5625...
0.0
-1
Parse command line arguments.
def parse_args(): parser = argparse.ArgumentParser(description="Hyper parameter") parser.add_argument( "--model", help="Model to use", default="All", type=str) return parser.parse_args()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_arguments(args):", "def parse_command_line(self, argv):\n from optparse import OptionParser\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n\n (options, args) = parser.parse_args(argv)", "def parseArguments(self):\n iterator = iter(sys.argv[1:])...
[ "0.807208", "0.74767", "0.73985106", "0.73932225", "0.73173845", "0.72599804", "0.7232453", "0.7222555", "0.713585", "0.7102058", "0.71020466", "0.7096281", "0.7093058", "0.70890576", "0.7080071", "0.70745754", "0.70641047", "0.70633066", "0.70568484", "0.7051162", "0.7048470...
0.0
-1
Finds the dimension of the points in the file.
def find_dimesion(filename): file = open(filename,"r") line = file.readline() file.close() return len(line.split())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dimensions ( file_in, separator ) :\n try :\n logger.info ( \"Extract dimensions from xyz file \" + str(file_in) ) \n d = {}\n first_row = True\n d[NOPS] = 0\n file = open(file_in, 'r')\n for line in file :\n d[NOPS] = d[NOPS] + 1\n l = lin...
[ "0.6812365", "0.6810448", "0.68029106", "0.6689416", "0.6590357", "0.6507463", "0.64789444", "0.6432954", "0.6427297", "0.63916624", "0.6285233", "0.6277324", "0.62739605", "0.6271587", "0.6249749", "0.6227997", "0.621909", "0.6207011", "0.6204155", "0.6199409", "0.6199409", ...
0.6965317
0
Fills the list of points to be used.
def fill_points_list(filename): f = open(input_file_test(filename), "r") dimension = find_dimesion(filename) points = list() line_count = 1 flag = False for line in f: current_point = line.split() if dimension == len(current_point): check_if_number(current_point) point = Point(points=current_point, line=line_count) points.append(point) line_count += 1 else: flag=True break if flag: print PointError() sys.exit() if len(points) ==1: print NotEnoughPointError() sys.exit() f.close() return points
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh(self):\n self._list_of_points = []\n self._add_points()", "def fill_step(self):\n while len(self.x_values) < self.num_points\n x_step = self.get_step()\n y_step = self.get_step()\n if x_step == 0 and y_step == 0:\n continue\n ...
[ "0.6888405", "0.6835494", "0.673464", "0.6689272", "0.6534429", "0.64859575", "0.6433665", "0.6427808", "0.6359302", "0.6349332", "0.63460994", "0.6302308", "0.6204271", "0.617405", "0.61379343", "0.6130424", "0.6078132", "0.6070575", "0.60653776", "0.60491824", "0.6038039", ...
0.61397636
14
Checks whether a given list is in the correct format.
def check_if_number(list): for item in list: try: float(item) except ValueError as e: print WrongTypePointError(item) sys.exit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_in_list(l, valid_l):\n\n for elem in l:\n if Settings._is_primitive(elem):\n if not Settings._is_in_prim(elem, valid_l):\n return False\n elif Settings._is_list(elem):\n valid_lists = [l for l in valid_l if isinstance(l, list)]\n if not Se...
[ "0.7171144", "0.7102635", "0.70879775", "0.7007761", "0.6736717", "0.67267084", "0.6715916", "0.6705785", "0.6677589", "0.66702265", "0.6668745", "0.6659545", "0.6606097", "0.65899694", "0.6580848", "0.6565618", "0.65465", "0.65330225", "0.65142834", "0.65126836", "0.6511854"...
0.0
-1
Calculate the distance between two points with same dimensions.
def distance_between(point_one, point_two): sum = 0 for d1,d2 in zip(point_one,point_two): sum += math.pow(float(d1) - float(d2), 2) return math.sqrt(sum)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distance(self, point_a, point_b):\n distance = 0.0\n if len(self.dimensions) > 1:\n for a, b, dim in zip(point_a, point_b, self.dimensions):\n distance += dim.distance(a, b)\n\n if len(self.dimensions) == 1:\n distance += self.dimensions[0].distance(poi...
[ "0.80836755", "0.79572546", "0.77294564", "0.7520677", "0.75188065", "0.75087476", "0.7456917", "0.74458915", "0.74410594", "0.7438067", "0.7431563", "0.74289346", "0.7414903", "0.7412151", "0.7408342", "0.74023426", "0.7388828", "0.73793226", "0.7354223", "0.73516434", "0.73...
0.0
-1
Finds the closest points in a given list of Point objects. There are two for loops because I imagined the Point list this way. y > represents the rows x > represents the columns
def find_closest_points(points): closest_dist = float("inf") closest_points = None, None for y, point_one in enumerate(points): for x, point_two in enumerate(points): if x > y: dist= distance_between(point_one.points,point_two.points) if dist < closest_dist: closest_dist = dist closest_points= point_one, point_two return closest_points
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closest_points(self, points, maxdist=None):\n return [self.closest_point(point, maxdist) for point in points]", "def closest_dist(x, y, x_list, y_list):\n points = np.array([x, y]).T\n points_list = np.array([x_list, y_list]).T\n\n dpt0 = points_list[:, 0] - points[:, 0, np.newaxis]\n dpt1...
[ "0.7746659", "0.7575003", "0.7565495", "0.74907386", "0.73184353", "0.72928536", "0.7258349", "0.722991", "0.6869522", "0.67983764", "0.67398", "0.67391354", "0.67248935", "0.6715504", "0.6708824", "0.66605735", "0.661304", "0.6607124", "0.6598075", "0.659383", "0.65821815", ...
0.81107926
0
Tests whether the input file is empty. If input is not empty, it returns the file; otherwise it exits from the system.
def input_file_test(input): try: if os.stat(input).st_size == 0: print ("The input file %s is empty, exiting." % input) sys.exit() except OSError as e: print(e) sys.exit() return input
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_empty_file(self):\n\t\tmain.Main(['input/empty.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/empty.csv'))", "def is_file_empty(file_name):\n # open ile in read mode\n with open(file_name, 'r') as read_obj:\n # read first character\n one_char = read_obj.re...
[ "0.68083346", "0.68074083", "0.6786199", "0.67231107", "0.6606845", "0.6573466", "0.65498596", "0.6435035", "0.6296949", "0.6270471", "0.6231167", "0.62046707", "0.61650205", "0.61614615", "0.5990059", "0.58732206", "0.5847673", "0.58417505", "0.5835229", "0.5767335", "0.5743...
0.88009804
0
Arguments speeds speed to move the light across the entire string
def __init__(self, layout, *, count=3, speeds=DEFAULT_SPEED, bounds=None, positions=None, colors=None, widths=None, shapes='linear', accelerations=None, background_color=util.colors.Black, **kwds): super().__init__(layout, **kwds) self.background_color = background_color self.count = count if not positions: if count == 1: positions = [1 / 2] else: positions = [i / (count) for i in range(count)] if not widths: widths = [1 / (2 * count)] accelerations = accelerations or [0] if not isinstance(speeds, (list, tuple)): speeds = [speeds] if not isinstance(accelerations, (list, tuple)): accelerations = [accelerations] if not isinstance(widths, (list, tuple)): widths = [widths] if not isinstance(shapes, (list, tuple)): shapes = [shapes] if not isinstance(positions, (list, tuple)): positions = [positions] if not colors: if count == 1: colors = [util.colors.Yellow] else: colors = [wheel.wheel_helper(p, 1, 0) for p in positions] colors = [(2 * r, 2 * g, 2 * b) for r, g, b in colors] n = len(self.color_list) bounds = bounds or [(0, 1)] A = speeds, accelerations, bounds, positions, colors, widths, shapes def make_light(i): return light.Light(self.color_list, *[a[i % len(a)] for a in A]) self.lights = [make_light(i) for i in range(count)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def speed(self, s=0):", "def speed(self, speed: int, time: int = 0, /) -> None:", "def walk(self):\n self.speed = self.speed + (0.2 * self.legs)", "def set_speed():\n pass", "def set_speed(self,speed):\n self.speed = speed", "def set_speed(speed):\n if speed >255:\n speed =255\...
[ "0.6296124", "0.6269911", "0.6195199", "0.61470234", "0.6099951", "0.6089762", "0.6049702", "0.6044615", "0.6016867", "0.5855935", "0.58457875", "0.5815366", "0.5801311", "0.5796839", "0.5796374", "0.57753795", "0.57381445", "0.5728528", "0.57273424", "0.57228905", "0.5700719...
0.0
-1
Create an empty TextModel.
def __init__(self): # # Create dictionaries for each characteristic # self.words = {} # For counting words self.wordlengths = {} # For counting word lengths self.stems = {} # For counting stems self.sentencelengths = {} # For counting sentence lengths # # Create another of your own # self.gerund = {} # For counting words with ing self.text = ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_model(self):\n self.model = None\n pass", "def __init__(self, text=None, model=None, voice_test_kind=None): # noqa: E501 # noqa: E501\n\n self._text = None\n self._model = None\n self._voice_test_kind = None\n self.discriminator = None\n\n self.text =...
[ "0.6320198", "0.6107785", "0.59958667", "0.59958667", "0.59898424", "0.5779712", "0.5769155", "0.569185", "0.56886506", "0.5644959", "0.5625166", "0.55378115", "0.55203444", "0.5467053", "0.54275054", "0.5417462", "0.5403038", "0.5393681", "0.5377207", "0.5369345", "0.5360774...
0.0
-1
Display the contents of a TextModel.
def __repr__(self): s = 'Words:\n' + str(self.words) + '\n\n' s += 'Word lengths:\n' + str(self.wordlengths) + '\n\n' s += 'Stems:\n' + str(self.stems) + '\n\n' s += 'Sentence lengths:\n' + str(self.sentencelengths) + '\n\n' s += 'Gerunds:\n' + str(self.gerund) return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show(self):\n self.set_text(self.read())", "def send_text(self):\n def f():\n self.highlight_input()\n text = self.text_transfer.get()[self.counter]\n self.model = text\n # print(\"yep\")\n self.parent.update_model(self.model.upper())\n ...
[ "0.64818233", "0.6356436", "0.6119905", "0.5938623", "0.5938623", "0.5938623", "0.5938623", "0.5938623", "0.5912198", "0.5831747", "0.5817723", "0.5801984", "0.57914543", "0.5775937", "0.5744499", "0.57407993", "0.5716162", "0.5654628", "0.5648894", "0.5643585", "0.56414855",...
0.0
-1
takes file and turns it into a str
def readTextFromFile(self, filename): f = open(filename) self.text = f.read() f.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file2str(file):\n with open(file, \"r\") as textFile:\n return textFile.read()", "def file_to_string(path_to_file):\n\t\twith open(path_to_file, 'r') as f:\n\t\t\tcontent = f.read()\n\t\treturn content", "def txt2str(file: str) -> str:\n return get_first_line(file)", "def file_to_str(fname):...
[ "0.8525866", "0.7717556", "0.77115536", "0.7649846", "0.7578865", "0.7578865", "0.7481903", "0.7253718", "0.70882875", "0.708055", "0.704386", "0.67967176", "0.677502", "0.67743015", "0.67640656", "0.6590976", "0.65571094", "0.64584035", "0.64482987", "0.6414025", "0.64015496...
0.0
-1
takes str from self.text and creats dict of sentence length freq.
def makeSentenceLengths(self): count = 0 LoW = self.text.split() list = [] for x in range(len(LoW)): if '.' in LoW[x] or '?' in LoW[x] or '!' in LoW[x] : length = x list += [len(LoW[count: x+1])] count = length + 1 for x in list: if x not in self.sentencelengths : self.sentencelengths[x] = 1 else: self.sentencelengths[x] += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_freq_dict(text):\n freq_dict = {}\n for i in text:\n if i not in freq_dict:\n freq_dict[i] = 1\n else:\n freq_dict[i] += 1\n return freq_dict", "def make_frequency_dict(self, text):\n\t\t\tfrequency = {}\n\t\t\t#tomamos los numeros como caracteres entonces el...
[ "0.7311082", "0.7273472", "0.719954", "0.7128146", "0.7035594", "0.6915477", "0.6807058", "0.67538345", "0.6753401", "0.6745445", "0.6740302", "0.67023945", "0.66921777", "0.6651242", "0.66414374", "0.6606957", "0.65921336", "0.6576811", "0.6570049", "0.6568746", "0.6565606",...
0.6100995
65
takes string s and remove all punctions and changes all caps to low
def cleanString(self, s): s = s.lower() for x in s: if x in punctuation: s = s.replace(x, '') return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cap_case_str(s):\n return re.sub(r'(?:[_\\-\\s]+|^)(.)', lambda m: m.group(1).upper(), s)", "def change_title(s):\n\ts = re.sub(r\"[A-Za-z]+('[A-Za-z]+)?\",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),s)\n\ts = s.split(\" \")\n\tfor i in range(len(...
[ "0.7568107", "0.7516122", "0.74232686", "0.7276909", "0.7276909", "0.7258973", "0.72260845", "0.7212958", "0.71921355", "0.71753323", "0.71484214", "0.70824426", "0.7060332", "0.70213103", "0.7021105", "0.7014167", "0.7006824", "0.6992608", "0.6962558", "0.6950061", "0.694671...
0.0
-1