query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
See ticket 2107 for the bug that this tickles.
def test_do_manga_dither_after_sequence(self): sopTester.updateModel('mcp', TestHelper.mcpState['boss_science']) dither = 'N' cmdState = self.actorState.doMangaSequence cmdState.reinitialize(self.cmd) cmdState.count = 1 cmdState.dithers = 'NSE' cmdState.reset_dith...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tick(self):", "def tick(self):\r\n pass", "def tick(self):\n pass", "def tick(self):\n pass", "def tick_descent(self):\n pass", "def lastTick():", "def tick(self, tick):\n pass", "def tick(self, tick):\n pass", "def tick_skipped(self):\n pass", ...
[ "0.68404317", "0.67841446", "0.67153627", "0.67153627", "0.6649946", "0.6539368", "0.6539086", "0.6539086", "0.6154324", "0.60770965", "0.6065959", "0.6026124", "0.5937391", "0.58856446", "0.580046", "0.5741288", "0.5730749", "0.57282233", "0.5695636", "0.56728476", "0.564394...
0.0
-1
See ticket 2107 for the bug that this tickles.
def test_do_apogeemanga_dither_after_sequence(self): sopTester.updateModel('mcp', TestHelper.mcpState['apogee_science']) sopTester.updateModel('apogee', TestHelper.apogeeState['B_open']) sopTester.updateModel('platedb', TestHelper.platedbState['apgoeemangaDither']) self._update_cart(1, '...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tick(self):", "def tick(self):\r\n pass", "def tick(self):\n pass", "def tick(self):\n pass", "def tick_descent(self):\n pass", "def tick(self, tick):\n pass", "def tick(self, tick):\n pass", "def lastTick():", "def tick_skipped(self):\n pass", ...
[ "0.6841335", "0.67850286", "0.67163193", "0.67163193", "0.665169", "0.65407777", "0.65407777", "0.6539678", "0.61551094", "0.6077852", "0.6066676", "0.6026107", "0.5938928", "0.58871347", "0.57996696", "0.57409745", "0.57303774", "0.57297117", "0.5696213", "0.56716204", "0.56...
0.0
-1
For 371, not closing FFS for bias/dark.
def test_do_boss_calibs_one_bias_ffs_open(self): sopTester.updateModel('mcp', TestHelper.mcpState['boss_science']) cmdState = CmdState.DoBossCalibsCmd() cmdState.nBias = 1 self._do_boss_calibs(4, 25, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close():\n\trfm.close()", "def close_trace(self):\n if self.State==1:\n self.tf.close()\n self.State = 0 \n else:\n print \"Tracefile not open...\"", "def safeClose():\n # outputToggle(ledPin, False)\n # outputToggle(auxlightPin, False)\n camera.stop_...
[ "0.63533455", "0.5993525", "0.59140307", "0.5904497", "0.5810159", "0.5796537", "0.5787865", "0.57800907", "0.5778978", "0.577066", "0.57619506", "0.57507795", "0.5708684", "0.5704931", "0.56797045", "0.5677076", "0.56575465", "0.56262064", "0.56262064", "0.5623001", "0.56024...
0.0
-1
For 371, not closing FFS for bias/dark.
def test_do_boss_calibs_one_dark_ffs_open(self): sopTester.updateModel('mcp', TestHelper.mcpState['boss_science']) cmdState = CmdState.DoBossCalibsCmd() cmdState.nDark = 1 self._do_boss_calibs(4, 25, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close():\n\trfm.close()", "def close_trace(self):\n if self.State==1:\n self.tf.close()\n self.State = 0 \n else:\n print \"Tracefile not open...\"", "def safeClose():\n # outputToggle(ledPin, False)\n # outputToggle(auxlightPin, False)\n camera.stop_...
[ "0.6353356", "0.5994088", "0.5915026", "0.59051275", "0.58110243", "0.5796057", "0.57873064", "0.57794714", "0.5778692", "0.57710266", "0.5761392", "0.57499737", "0.5708475", "0.57054716", "0.5680105", "0.56762487", "0.5658499", "0.5626224", "0.5626224", "0.5622663", "0.56034...
0.0
-1
coobserving carts should close the apogee shutter first.
def test_do_boss_calibs_one_flat_coobserve(self): cmdState = CmdState.DoBossCalibsCmd() cmdState.nFlat = 1 sopTester.updateModel('guider', TestHelper.guiderState['apogeemangaDitherLoaded']) sopTester.updateModel('mcp', TestHelper.mcpState['apogee_parked']) sopTester.updateModel('...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cart_flushed(self):\n self.fill_session_cart()\n\n session = self.client.session\n self.assertNotEqual(session['cart'], {})\n self.assertNotEqual(session['cart_cost'], 0)\n\n self.client.post(self.CHECKOUT_URL, self.build_checkout_form())\n\n session = self.client...
[ "0.615932", "0.57919484", "0.5666151", "0.5642451", "0.56404835", "0.55797815", "0.55783755", "0.5517197", "0.55146784", "0.54833806", "0.5479677", "0.5449196", "0.54028076", "0.5392104", "0.5381052", "0.53170824", "0.52840286", "0.52400047", "0.5232583", "0.5214879", "0.5207...
0.0
-1
Coobserving carts should not bother with the apogee shutter when the gang connector is not at the cart.
def test_do_boss_calibs_one_flat_coobserve_gangPodium(self): cmdState = CmdState.DoBossCalibsCmd() cmdState.nFlat = 1 sopTester.updateModel('guider', TestHelper.guiderState['apogeemangaDitherLoaded']) sopTester.updateModel('mcp', TestHelper.mcpState['all_off']) sopTester.updateMo...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_basicNoSalePC(self):\n # Basic price check\n self.log.info(\"Price checking Generic Item via speedkey\")\n pos.click(\"Price Check\")\n pos.click_speed_key(\"Generic Item\")\n \n # Confirm the right item, at the right price\n self.read_price_check(\"Generic...
[ "0.613266", "0.6027911", "0.5957148", "0.58498114", "0.5780373", "0.57664615", "0.57352144", "0.5670634", "0.5603377", "0.5565635", "0.55271673", "0.5519921", "0.55182767", "0.55163413", "0.54893744", "0.54880553", "0.54810375", "0.54696536", "0.5443818", "0.543117", "0.53950...
0.0
-1
coobserving carts should close the apogee shutter first.
def test_do_boss_calibs_one_arc_coobserve(self): cmdState = CmdState.DoBossCalibsCmd() cmdState.nArc = 1 sopTester.updateModel('guider', TestHelper.guiderState['apogeemangaDitherLoaded']) sopTester.updateModel('mcp', TestHelper.mcpState['apogee_parked']) sopTester.updateModel('ap...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cart_flushed(self):\n self.fill_session_cart()\n\n session = self.client.session\n self.assertNotEqual(session['cart'], {})\n self.assertNotEqual(session['cart_cost'], 0)\n\n self.client.post(self.CHECKOUT_URL, self.build_checkout_form())\n\n session = self.client...
[ "0.6157707", "0.579232", "0.5664724", "0.5641533", "0.5639043", "0.55788285", "0.5577999", "0.5516711", "0.5514344", "0.54846823", "0.5480096", "0.5448836", "0.54019326", "0.53919315", "0.5380245", "0.53151876", "0.5282395", "0.52394223", "0.52324045", "0.5215534", "0.5207088...
0.0
-1
Unwraps the private key into an asn1crypto.keys.RSAPrivateKey, asn1crypto.keys.DSAPrivateKey or asn1crypto.keys.ECPrivateKey object
def unwrap(self): if self.algorithm == 'rsa': return self.asn1['private_key'].parsed if self.algorithm == 'dsa': params = self.asn1['private_key_algorithm']['parameters'] return DSAPrivateKey({ 'version': 0, 'p': params['p'], ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _unwrap_private_key_info(key_info):\n\n key_alg = key_info.algorithm\n\n if key_alg == 'rsa' or key_alg == 'rsassa_pss':\n return key_info['private_key'].parsed\n\n if key_alg == 'dsa':\n params = key_info['private_key_algorithm']['parameters']\n parsed = key_info['private_key'].p...
[ "0.7709716", "0.7091911", "0.6661885", "0.6650223", "0.6593615", "0.6553231", "0.64998823", "0.6498649", "0.6429571", "0.64050627", "0.6381211", "0.6249152", "0.6246417", "0.6217567", "0.62138116", "0.6204138", "0.6198371", "0.6193991", "0.6141875", "0.6141632", "0.6117683", ...
0.7861159
0
Unwraps a public key into an asn1crypto.keys.RSAPublicKey, asn1crypto.core.Integer (for DSA) or asn1crypto.keys.ECPointBitString object
def unwrap(self): if self.algorithm == 'ec': return self.asn1['public_key'] return self.asn1['public_key'].parsed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode_credential_public_key(\n key: bytes,\n) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:\n # Occassionally we might be given a public key in an \"uncompressed\" format,\n # typically from older U2F security keys. As per the FIDO spec this is indicated by\n # a leading...
[ "0.70528996", "0.682243", "0.6711425", "0.67085093", "0.6610189", "0.65048105", "0.6489869", "0.6488379", "0.64881927", "0.64246345", "0.6423231", "0.64138883", "0.6409382", "0.6395284", "0.63761365", "0.6347127", "0.6329381", "0.6321539", "0.62870216", "0.6245045", "0.619662...
0.71075326
0
Creates a fingerprint that can be compared with a private key to see if the two form a pair. This fingerprint is not compatible with fingerprints generated by any other software.
def fingerprint(self): if self._fingerprint is None: self._fingerprint = _fingerprint(self.asn1, None) return self._fingerprint
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fingerprint(key_object, load_private_key):\n\n if isinstance(key_object, PrivateKeyInfo):\n key = key_object['private_key'].parsed\n\n if key_object.algorithm == 'rsa':\n to_hash = '%d:%d' % (\n key['modulus'].native,\n key['public_exponent'].native,\n...
[ "0.6485189", "0.6272822", "0.6004521", "0.5935738", "0.5886129", "0.58848107", "0.5814873", "0.57364976", "0.57348734", "0.5725082", "0.5690334", "0.56683075", "0.56541723", "0.5592357", "0.5590496", "0.5577048", "0.55707747", "0.5545947", "0.5468767", "0.54511124", "0.544233...
0.0
-1
Unwraps an asn1crypto.keys.PrivateKeyInfo object into an asn1crypto.keys.RSAPrivateKey, asn1crypto.keys.DSAPrivateKey or asn1crypto.keys.ECPrivateKey.
def _unwrap_private_key_info(key_info): key_alg = key_info.algorithm if key_alg == 'rsa' or key_alg == 'rsassa_pss': return key_info['private_key'].parsed if key_alg == 'dsa': params = key_info['private_key_algorithm']['parameters'] parsed = key_info['private_key'].parsed ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params[...
[ "0.72001606", "0.61105424", "0.59807235", "0.59552276", "0.5781235", "0.5733041", "0.5708165", "0.56828797", "0.5617318", "0.56119716", "0.55409443", "0.55051327", "0.54653615", "0.54501706", "0.5382498", "0.53593355", "0.53350115", "0.5326151", "0.5310217", "0.5288465", "0.5...
0.82866263
0
Returns a fingerprint used for correlating public keys and private keys
def _fingerprint(key_object, load_private_key): if isinstance(key_object, PrivateKeyInfo): key = key_object['private_key'].parsed if key_object.algorithm == 'rsa': to_hash = '%d:%d' % ( key['modulus'].native, key['public_exponent'].native, ) ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fingerprint(public_key):\r\n\r\n return hashlib.new('ripemd160', hashlib.sha256(public_key).digest()).digest()[:4]", "def fingerprint_key(key):\n try: key = key.public_key()\n except: pass\n\n serialized = key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format ...
[ "0.787024", "0.7284799", "0.72343296", "0.705091", "0.6990618", "0.6967752", "0.6874012", "0.6863513", "0.68291414", "0.6696847", "0.66854084", "0.6662513", "0.6511653", "0.6490654", "0.6474358", "0.64719266", "0.6441411", "0.64376676", "0.63801914", "0.63442427", "0.63356423...
0.70989317
3
Loads a public key from a DER or PEMformatted file. Supports RSA, DSA and EC public keys. For RSA keys, both the old RSAPublicKey and SubjectPublicKeyInfo structures are supported. Also allows extracting a public key from an X.509 certificate.
def parse_public(data): if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) key_type = None # Appears to be PEM formatted if re.match(b'\\s*-----', data) is no...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_public_key(file_path: str, encoding: Encoding = None) -> PublicKey:\n real_encoding = encoding or _get_encoding_type(file_path)\n\n def solve(key_data: bytes) -> PublicKey:\n \"\"\"Determine the type of data and perform loading based on data type.\n\n :param key_data: given public keys...
[ "0.7472575", "0.72257024", "0.71392846", "0.68894285", "0.67382544", "0.6495051", "0.6290962", "0.62046856", "0.61463255", "0.6132795", "0.6115434", "0.6094741", "0.608693", "0.6077499", "0.6075479", "0.59709907", "0.5938014", "0.5919355", "0.59184045", "0.5892657", "0.585911...
0.6618405
5
Loads a certificate from a DER or PEMformatted file. Supports X.509 certificates only.
def parse_certificate(data): if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) key_type = None # Appears to be PEM formatted if re.match(b'\\s*-----', data) ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_cert(file, format=FORMAT_PEM):\n bio = BIO.openfile(file)\n if format == FORMAT_PEM:\n return load_cert_bio(bio)\n elif format == FORMAT_DER:\n cptr = m2.d2i_x509(bio._ptr())\n if cptr is None:\n raise X509Error(Err.get_error())\n return X509(cptr, _pyfree=1...
[ "0.80966467", "0.78768235", "0.7402788", "0.7182695", "0.690147", "0.6856863", "0.675995", "0.65883523", "0.65731525", "0.65720236", "0.65545803", "0.6515523", "0.64636666", "0.64080656", "0.6395369", "0.6282713", "0.627722", "0.6143954", "0.6116484", "0.6039491", "0.6023128"...
0.62535036
17
Loads a private key from a DER or PEMformatted file. Supports RSA, DSA and
def parse_private(data, password=None): if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) if password is not None: if not isinstance(password, byte_cls): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_private_key(file_path: str, password: bytes = None,\n encoding: Encoding = None) -> PrivateKey:\n real_encoding = encoding or _get_encoding_type(file_path)\n\n def solve(key_data: bytes) -> PrivateKey:\n \"\"\"Determine the type of data and perform loading based on data ty...
[ "0.7638723", "0.760737", "0.75808924", "0.7359124", "0.7319375", "0.73121125", "0.72465986", "0.72324586", "0.711873", "0.6725466", "0.6643382", "0.65556437", "0.65554005", "0.65311825", "0.6508552", "0.6455299", "0.6412754", "0.639861", "0.63618493", "0.63402176", "0.6329978...
0.6443896
16
Removes PEMencoding from a public key, private key or certificate. If the private key is encrypted, the password will be used to decrypt it.
def _unarmor_pem(data, password=None): object_type, headers, der_bytes = unarmor(data) type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)' armor_type = re.match(type_regex, object_type) if not armor_type: raise ValueError(pretty_me...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def key_to_pem(key, password=None):\n if password:\n enc = BestAvailableEncryption(as_bytes(password))\n else:\n enc = NoEncryption()\n return key.private_bytes(Encoding.PEM, PrivateFormat.PKCS8, enc)", "def strip_begin_end_public_key(key):\n return key.replace(\"\\n\", \"\")\\\n ...
[ "0.5717775", "0.5633551", "0.55747586", "0.55693495", "0.55062234", "0.5487441", "0.544285", "0.5425336", "0.5414394", "0.5376748", "0.53717756", "0.53678894", "0.53630817", "0.53443956", "0.5333764", "0.53271896", "0.532627", "0.5300595", "0.5287096", "0.5276774", "0.5239591...
0.5872964
0
Parses a PKCS1 private key, or encrypted private key
def _unarmor_pem_openssl_private(headers, data, password): enc_algo = None enc_iv_hex = None enc_iv = None if 'DEK-Info' in headers: params = headers['DEK-Info'] if params.find(',') != -1: enc_algo, enc_iv_hex = params.strip().split(',') else: enc_algo =...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)", "def parse_private(data, password=None):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ...
[ "0.7084714", "0.6720833", "0.6673835", "0.6610752", "0.6331672", "0.6264508", "0.6190057", "0.6077617", "0.6024118", "0.6018048", "0.5971257", "0.59107363", "0.5894486", "0.58921105", "0.58741915", "0.5793097", "0.57621163", "0.5711211", "0.5711122", "0.5708818", "0.570693", ...
0.5370357
48
Parses a PKCS12 ANS.1 DERencoded structure and extracts certs and keys
def _parse_pkcs12(data, password, load_private_key): if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) if password is not None: if not isinstance(password, by...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parseBinary(self, bytes):\r\n\r\n self.bytes = bytearray(bytes)\r\n p = ASN1Parser(bytes)\r\n\r\n #Get the tbsCertificate\r\n tbsCertificateP = p.getChild(0)\r\n\r\n #Is the optional version field present?\r\n #This determines which index the key is at.\r\n if t...
[ "0.60229003", "0.5695678", "0.5526086", "0.54503", "0.5424802", "0.51653993", "0.51222205", "0.5089913", "0.5076887", "0.50647706", "0.5058496", "0.4928613", "0.4890625", "0.48888293", "0.48255894", "0.47993195", "0.47745132", "0.47668105", "0.47628716", "0.47628716", "0.4759...
0.67407256
0
Parses a SafeContents PKCS12 ANS.1 structure and extracts certs and keys
def _parse_safe_contents(safe_contents, certs, private_keys, password, load_private_key): if isinstance(safe_contents, byte_cls): safe_contents = SafeContents.load(safe_contents) for safe_bag in safe_contents: bag_value = safe_bag['bag_value'] if isinstance(bag_value, CertBag): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_pkcs12(data, password, load_private_key):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinsta...
[ "0.6905935", "0.519228", "0.5144184", "0.5090635", "0.4969031", "0.49095032", "0.48951414", "0.48807597", "0.4877804", "0.48451734", "0.4826676", "0.48249158", "0.48138803", "0.4775471", "0.47672594", "0.4758738", "0.47264582", "0.47241712", "0.46494445", "0.4630516", "0.4615...
0.6486718
1
Decrypts encrypted ASN.1 data
def _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password): decrypt_func = crypto_funcs[encryption_algorithm_info.encryption_cipher] # Modern, PKCS#5 PBES2-based encryption if encryption_algorithm_info.kdf == 'pbkdf2': if encryption_algorithm_info.encryption_cipher == 'rc...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrypt(self, data):", "def rsa_pkcs1v15_decrypt(self, data):\n pass", "def decode(self, crypto):", "def decrypt(self, data):\n if not data:\n return ''\n data = self._crypt(data, self.DECRYPT)\n return self._unpad_data(data)", "def decrypted(data: str) -> str:\n\...
[ "0.76379395", "0.72864676", "0.681302", "0.6796825", "0.67245364", "0.67193043", "0.66916806", "0.66666394", "0.6586855", "0.6586855", "0.6545387", "0.6543017", "0.64966065", "0.6484453", "0.6446464", "0.643858", "0.6381135", "0.6379265", "0.63670003", "0.63490975", "0.633111...
0.5746602
71
Make a model where each trial has its own regressor using least squares all (LSA)
def _lsa_events_converter(events_file): import pandas as pd events = pd.read_csv(events_file, sep='\t') events['original_trial_type'] = events['trial_type'] for cond, cond_df in events.groupby('trial_type'): cond_idx = cond_df.index for i_trial, trial_idx in enumerate(cond_idx): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def optimize(self, trial):\r\n num_leaves = trial.suggest_int(\"num_leaves\", 6, 50)\r\n min_child_samples = trial.suggest_int(\"min_child_samples\", 100, 500)\r\n min_child_weight = trial.suggest_uniform(\"min_child_weight\", 1, 7)\r\n subsample = trial.suggest_uniform(\"subsample\", 0...
[ "0.6492245", "0.64286274", "0.6315411", "0.62603295", "0.62471396", "0.62323004", "0.61873966", "0.61866516", "0.61369425", "0.6030381", "0.6029556", "0.60007995", "0.5999436", "0.5996053", "0.5956116", "0.5954355", "0.59472334", "0.5938865", "0.59243846", "0.5919388", "0.590...
0.0
-1
Make a model for each trial using least squares separate (LSS)
def _lss_events_iterator(events_file): import pandas as pd import numpy as np events = pd.read_csv(events_file, sep='\t') trial_counter = dict([(t, 0) for t in np.unique(events['trial_type'])]) for trial_id in range(len(events)): trial_type = events.loc[trial_id, 'trial_type'] # mak...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_SLM():\n samples = 10\n predictors = 3\n\n grid = list(create_parameter_grid(samples, predictors))\n Y = np.random.rand(samples, 10242, predictors)\n\n for i in range(len(grid)):\n # Skip exceptions that we know error.\n if grid[i][\"surf\"] is None:\n if grid[i][\"...
[ "0.67663735", "0.65927625", "0.65322", "0.6456039", "0.6399128", "0.6376486", "0.6376346", "0.6321311", "0.62462187", "0.6168609", "0.6121046", "0.60952455", "0.60772586", "0.60441196", "0.6029791", "0.60081315", "0.5985883", "0.59815985", "0.5979063", "0.59740895", "0.595946...
0.0
-1
Process and return selected confounds from the confounds file
def _select_confounds(confounds_file, selected_confounds): import pandas as pd import numpy as np confounds_df = pd.read_csv(confounds_file, sep='\t', na_values='n/a') # fill the first value of FramewiseDisplacement with the mean. if 'FramewiseDisplacement' in selected_confounds: confounds_...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _select_confounds(confounds_file, selected_confounds):\n import pandas as pd\n import numpy as np\n import re\n\n confounds_df = pd.read_csv(confounds_file, sep='\\t', na_values='n/a')\n # regular expression to capture confounds specified at the command line\n confound_expr = re.compile(r\"|\...
[ "0.65061325", "0.5781993", "0.5781708", "0.56171685", "0.55821574", "0.55591077", "0.55106205", "0.5485491", "0.5484373", "0.5472426", "0.54602283", "0.54551107", "0.5445359", "0.5434916", "0.5391505", "0.5335677", "0.53314865", "0.53281003", "0.5295112", "0.52804834", "0.527...
0.6170222
1
Create sinusoidal timestep embeddings.
def timestep_embedding(timesteps, dim, max_period=10000): half = dim // 2 freqs = torch.exp( -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half ).to(device=timesteps.device) args = timesteps[:, None].float() * freqs[None] embedding = t...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def timestep_embedding(timesteps, dim, max_period=10000):\n half = dim // 2\n freqs = paddle.exp(-math.log(max_period) * paddle.arange(start=0, end=half, dtype=paddle.float32) / half)\n args = paddle.cast(timesteps[:, None], 'float32') * freqs[None]\n embedding = paddle.concat([paddle.cos(args), paddle...
[ "0.66800493", "0.66085994", "0.60627913", "0.59043974", "0.5894194", "0.5874639", "0.58046216", "0.5689095", "0.5540392", "0.5476205", "0.54213727", "0.53775567", "0.53557706", "0.53395146", "0.5328101", "0.53141636", "0.53015023", "0.52998585", "0.52972686", "0.52835363", "0...
0.65616596
2
Return a cached copy of TestShib's metadata by reading it from disk
def metadata_callback(_request, _uri, headers): return (200, headers, self.read_data_file('testshib_metadata.xml')) # lint-amnesty, pylint: disable=no-member
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_metadata(self):\n return copy.copy(self.metadata)", "def get_and_update_metadata():\n if not os.path.exists('.git') and os.path.exists(METADATA_FILENAME):\n with open(METADATA_FILENAME) as fh:\n metadata = json.load(fh)\n else:\n git = Git()\n revision = os.en...
[ "0.6429627", "0.63208026", "0.6306309", "0.62240237", "0.6207978", "0.6194947", "0.6191376", "0.61298776", "0.61292166", "0.6115907", "0.60866165", "0.60811806", "0.6044135", "0.6037326", "0.6034143", "0.6017177", "0.5996634", "0.59623575", "0.594425", "0.5941117", "0.5909399...
0.0
-1
Return a cached copy of TestShib's metadata with a cacheDuration attribute
def cache_duration_metadata_callback(_request, _uri, headers): return (200, headers, self.read_data_file('testshib_metadata_with_cache_duration.xml')) # lint-amnesty, pylint: disable=no-member
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_metadata(self) -> Metadata:\n manifest = self._get_manifest()\n\n return Metadata(**manifest[\"metadata\"])", "def get_metadata(self):\n return copy.copy(self.metadata)", "def cache_data(self):\n # Initialize key variables\n result = self.data['cache_data']\n ...
[ "0.5946828", "0.5924196", "0.5874222", "0.58641565", "0.58568573", "0.5794843", "0.57618964", "0.5748162", "0.5736788", "0.5663821", "0.5662813", "0.5655036", "0.5647174", "0.56243145", "0.55787057", "0.5567786", "0.55402327", "0.5527116", "0.55239946", "0.55070686", "0.54851...
0.6988418
0
Mock the current time for SAML, so we can replay canned requests/responses
def _freeze_time(self, timestamp): now_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.now', return_value=timestamp) now_patch.start() self.addCleanup(now_patch.stop) # lint-amnesty, pylint: disable=no-member
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mocked_time():\n return datetime.datetime(2017, 10, 27, 22, 54, 56, 566179)", "def test_method(self):\n response = self.app.get('/')\n assert isinstance(response.raw['now'], datetime.datetime)", "def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n ...
[ "0.6736465", "0.6140573", "0.60731506", "0.60279644", "0.59742117", "0.5914771", "0.5881534", "0.5857517", "0.5802075", "0.57887524", "0.57644403", "0.5713352", "0.56917495", "0.5675409", "0.56512123", "0.5646837", "0.56387556", "0.5580659", "0.557747", "0.557065", "0.5564709...
0.586782
7
Enable and configure the TestShib SAML IdP as a third_party_auth provider
def _configure_testshib_provider(self, **kwargs): fetch_metadata = kwargs.pop('fetch_metadata', True) assert_metadata_updates = kwargs.pop('assert_metadata_updates', True) kwargs.setdefault('name', self.PROVIDER_NAME) kwargs.setdefault('enabled', True) kwargs.setdefault('visible'...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enable_third_party_auth():\r\n\r\n from third_party_auth import settings as auth_settings\r\n auth_settings.apply_settings(settings.THIRD_PARTY_AUTH, settings)", "def init_saml_auth(saml_prepared_flask_request):\n return OneLogin_Saml2_Auth(saml_prepared_flask_request, custom_base_path=app.config.ge...
[ "0.67878383", "0.63547695", "0.63465583", "0.61199355", "0.59225214", "0.58825934", "0.56425726", "0.563755", "0.562901", "0.5535802", "0.5476775", "0.53583604", "0.531819", "0.5295527", "0.52504843", "0.51937705", "0.5125788", "0.50825155", "0.5081821", "0.5010863", "0.49874...
0.7070957
0
Gets dict (string > object) of merged data about the user.
def get_response_data(self): response_data = dict(self.TOKEN_RESPONSE_DATA) response_data.update(self.USER_RESPONSE_DATA) return response_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_user_data(self):\n return {\"key\": self._key}", "def get_dictionary(self):\n data = {\n \"user_first_name\": self.user.first_name,\n \"user_last_name\": self.user.last_name,\n }\n dct = provider.Provider.get_dictionary(self)\n dct.upd...
[ "0.67946744", "0.673818", "0.6689629", "0.6652367", "0.66178507", "0.66178507", "0.6612004", "0.6559976", "0.6502731", "0.6489732", "0.6368647", "0.63625497", "0.63397974", "0.6320728", "0.6294594", "0.6281204", "0.62439865", "0.62202954", "0.6208482", "0.6208218", "0.6160015...
0.6040271
28
Configure TestShib before running the login test
def test_login(self): self._configure_testshib_provider() self._test_login()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register(self):\n self._configure_testshib_provider()\n self._test_register()", "def set_up_login():\n\n bitool.app.testing = True\n bitool.app.config['TESTING'] = True\n bitool.app.login_manager.init_app(bitool.app)\n app = bitool.app.test_client()\n\n return app", "def c...
[ "0.68922627", "0.6773958", "0.6753465", "0.6616434", "0.65026134", "0.6428037", "0.6423407", "0.6388931", "0.63668215", "0.6346543", "0.63392085", "0.6336799", "0.62982786", "0.62645006", "0.62591666", "0.62502235", "0.62502235", "0.62462556", "0.62462556", "0.62462556", "0.6...
0.81601787
0
Configure TestShib before running the register test
def test_register(self): self._configure_testshib_provider() self._test_register()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_test(self, test, config_json):\n pass", "def test_register():\n plug.manager.register(junit4)", "def setUpConfig(self):\n pass", "def test_login(self):\n self._configure_testshib_provider()\n self._test_login()", "def setup_method(self, test_method):\n se...
[ "0.7068733", "0.6525172", "0.6467879", "0.63710594", "0.6313307", "0.6234248", "0.6212751", "0.60972005", "0.6043006", "0.6039801", "0.5995316", "0.5995316", "0.5995316", "0.5995316", "0.5992337", "0.5989009", "0.59598655", "0.5953964", "0.5928903", "0.59063", "0.5904783", ...
0.83610606
0
Test that attributes sent by a SAML provider are stored in the UserSocialAuth table.
def test_login_records_attributes(self): self.test_login() record = UserSocialAuth.objects.get( user=self.user, provider=self.PROVIDER_BACKEND, uid__startswith=self.PROVIDER_IDP_SLUG ) attributes = record.extra_data assert attributes.get('urn:oid:1.3.6.1.4.1.5923.1.1....
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_insufficient_sapsf_metadata(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings='{\"key_i_dont_need\":\"value_i_also_dont_need\"}',\n )\n # Becau...
[ "0.6245322", "0.59301543", "0.5911321", "0.58538973", "0.58124703", "0.57879347", "0.56024635", "0.5550127", "0.55485487", "0.54875433", "0.54671925", "0.54621214", "0.54423463", "0.5395432", "0.5389309", "0.5383121", "0.5311547", "0.5295665", "0.5293737", "0.52753067", "0.52...
0.7325418
0
Test SAML login logs with debug mode enabled or not
def test_debug_mode_login(self, debug_mode_enabled): self._configure_testshib_provider(debug_mode=debug_mode_enabled) with patch.object(saml_log, 'info') as mock_log: self._test_login() if debug_mode_enabled: # We expect that test_login() does two full logins, and each at...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_logging_running(self):\n tester = app.test_client(self)\n response = tester.get('/login', content_type='html/text')\n self.assertTrue(b'PLEASE LOGIN' in response.data)", "def test_logging(self):\n self._verify_logging()", "def test_successful_login(self):\n pass", "def...
[ "0.6547012", "0.61618876", "0.6142526", "0.6016165", "0.5957221", "0.59324735", "0.5907222", "0.58748484", "0.58701736", "0.5860925", "0.582712", "0.5819717", "0.57623357", "0.5748084", "0.57179964", "0.56889266", "0.56684595", "0.56650877", "0.5635419", "0.5612174", "0.55851...
0.8257416
0
Enable and configure the TestShib SAML IdP as a third_party_auth provider
def test_configure_testshib_provider_with_cache_duration(self): kwargs = {} kwargs.setdefault('name', self.PROVIDER_NAME) kwargs.setdefault('enabled', True) kwargs.setdefault('visible', True) kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG) kwargs.setdefault('entity_id',...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault...
[ "0.70712364", "0.67885256", "0.6354537", "0.6346624", "0.61197853", "0.5923575", "0.5643076", "0.563825", "0.5628388", "0.55354863", "0.54767126", "0.5359936", "0.5318166", "0.5296389", "0.5250402", "0.5193824", "0.512464", "0.508345", "0.5081745", "0.50114036", "0.49877226",...
0.58824545
6
Test that when we have a TPA provider which as an explicit maximum session length set, waiting for longer than that between requests results in us being logged out.
def test_login_with_testshib_provider_short_session_length(self): # Configure the provider with a 10-second timeout self._configure_testshib_provider(max_session_length=10) now = datetime.datetime.utcnow() with freeze_time(now): # Test the login flow, adding the user in the ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_inactive_session_timeout(self):\r\n email, password = self.STUDENT_INFO[0]\r\n self.login(email, password)\r\n\r\n # make sure we can access courseware immediately\r\n resp = self.client.get(reverse('dashboard'))\r\n self.assertEquals(resp.status_code, 200)\r\n\r\n ...
[ "0.67007613", "0.6563989", "0.6529781", "0.63517386", "0.6243885", "0.6232972", "0.6106514", "0.6026753", "0.5952208", "0.59390306", "0.5926259", "0.58408374", "0.58186436", "0.5785342", "0.57843", "0.57781994", "0.57300466", "0.572285", "0.57226133", "0.5715555", "0.57084894...
0.77491003
0
Mock out HTTP calls to various endpoints using httpretty.
def setUp(self): super().setUp() # Mock the call to the SAP SuccessFactors assertion endpoint SAPSF_ASSERTION_URL = 'http://successfactors.com/oauth/idp' def assertion_callback(_request, _uri, headers): """ Return a fake assertion after checking that the input i...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def httpretty():\n import httpretty\n httpretty.enable()\n yield httpretty\n httpretty.disable()", "def test_get(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n ...
[ "0.7003493", "0.6884988", "0.6701029", "0.66182", "0.6446721", "0.6388721", "0.6341374", "0.62644625", "0.62517893", "0.625138", "0.62413937", "0.6225622", "0.6221288", "0.6194367", "0.61357135", "0.61174285", "0.6117166", "0.609931", "0.60828453", "0.607748", "0.6064459", ...
0.5968934
27
Return a fake assertion after checking that the input is what we expect.
def assertion_callback(_request, _uri, headers): assert b'private_key=fake_private_key_here' in _request.body assert b'user_id=myself' in _request.body assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body assert b'client_id=TatVotSEiCMteSNW...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_func(input, expected):\n from parenthetics import paren\n assert paren(input) == expected", "def test_example():\n answer = True\n expected = True\n assert answer == expected", "def expected_value(expected, actual):\n assert expected == actual", "def validate_Assert(result, _dummy_...
[ "0.6541787", "0.647255", "0.6471959", "0.64491946", "0.64080536", "0.62548614", "0.62316114", "0.6227208", "0.61887175", "0.6130757", "0.61267287", "0.6095201", "0.60947305", "0.6071231", "0.60602826", "0.6057711", "0.6053218", "0.60242915", "0.6014357", "0.5987886", "0.59692...
0.0
-1
Return a 404 error when someone tries to call the URL.
def bad_callback(_request, _uri, headers): return (404, headers, 'NOT AN ASSERTION')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def not_found():\n return HttpError(404)", "def error_404(error):\n return 'Bummer, there is nothing at this URL.'", "def not_found():\n raise cherrypy.HTTPError(404, \"Not Found.\")", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry...
[ "0.77854127", "0.7611726", "0.7589559", "0.7564481", "0.7564481", "0.7564481", "0.7564481", "0.7564481", "0.7564481", "0.7564481", "0.7528948", "0.7528948", "0.7528948", "0.7528948", "0.7528948", "0.7528948", "0.7528948", "0.7528948", "0.74066204", "0.73611426", "0.73296165",...
0.0
-1
Return a fake assertion after checking that the input is what we expect.
def token_callback(_request, _uri, headers): assert b'assertion=fake_saml_assertion' in _request.body assert b'company_id=NCC1701D' in _request.body assert b'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Asaml2-bearer' in _request.body assert b'client_id=TatVotSEiC...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_func(input, expected):\n from parenthetics import paren\n assert paren(input) == expected", "def test_example():\n answer = True\n expected = True\n assert answer == expected", "def expected_value(expected, actual):\n assert expected == actual", "def validate_Assert(result, _dummy_...
[ "0.65425646", "0.64745665", "0.64722836", "0.644988", "0.64093584", "0.62563604", "0.6232764", "0.6228062", "0.61891055", "0.6131768", "0.61279905", "0.60960275", "0.6095586", "0.6073744", "0.6061325", "0.6057807", "0.6056131", "0.6024911", "0.60140353", "0.59895396", "0.5970...
0.0
-1
Mock an error response when calling the OData API for user details.
def _mock_odata_api_for_error(self, odata_api_root_url, username): def callback(request, uri, headers): # lint-amnesty, pylint: disable=unused-argument """ Return a 500 error when someone tries to call the URL. """ headers['CorrelationId'] = 'aefd38b7-c92c-445a-...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_response_error(self):\n r = mock.Mock(spec=requests.Response)\n r.content = \"{'normal': 'resource'}\"\n\n f = Fitbit(**self.client_kwargs)\n f.client._request = lambda *args, **kwargs: r\n\n r.status_code = 404\n self.assertRaises(exceptions.HTTPNotFound, f.user_...
[ "0.68724155", "0.6683057", "0.6637301", "0.6578398", "0.6559852", "0.6474468", "0.6464813", "0.6425861", "0.638332", "0.63573205", "0.6336519", "0.6328632", "0.63244635", "0.62987155", "0.6267484", "0.62195075", "0.61980534", "0.6191649", "0.61694646", "0.614538", "0.61373806...
0.73752326
0
Return a 500 error when someone tries to call the URL.
def callback(request, uri, headers): # lint-amnesty, pylint: disable=unused-argument headers['CorrelationId'] = 'aefd38b7-c92c-445a-8c7a-487a3f0c7a9d' headers['RequestNo'] = '[787177]' # This is the format SAPSF returns for the transaction request number return 500, headers, 'Failu...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def internal_error():\n return HttpError(500)", "def view_500(request, url = None):\n res = render_to_response(\"500.html\", context_instance=RequestContext(request))\n res.status_code = 500\n return res", "def server_error(e):\n return 'Error while serving request', 500", "def internal_server...
[ "0.7490835", "0.74022293", "0.70942414", "0.70713156", "0.7046498", "0.7032829", "0.70033985", "0.69633645", "0.6963141", "0.6925367", "0.6915124", "0.69088185", "0.6898223", "0.68840873", "0.6861838", "0.68376124", "0.6830139", "0.68263555", "0.6814626", "0.67630464", "0.675...
0.0
-1
Configure the provider such that it doesn't have enough details to contact the SAP SuccessFactors API, and test that it falls back to the data it receives from the SAML assertion.
def test_register_insufficient_sapsf_metadata(self): self._configure_testshib_provider( identity_provider_type='sap_success_factors', metadata_source=TESTSHIB_METADATA_URL, other_settings='{"key_i_dont_need":"value_i_also_dont_need"}', ) # Because we're gettin...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault...
[ "0.697038", "0.6617586", "0.6555091", "0.5951466", "0.5813457", "0.5800178", "0.5706513", "0.57041866", "0.5636675", "0.56051576", "0.56023586", "0.55914366", "0.5573786", "0.5566785", "0.55414", "0.55256534", "0.5518977", "0.5488716", "0.5402298", "0.5390696", "0.5382451", ...
0.6533367
3
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is ...
def test_register_sapsf_metadata_present(self): expected_country = 'AU' provider_settings = { 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/', 'sapsf_private_key': 'fake_private_key_here', 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/', ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_priva...
[ "0.7055314", "0.6939393", "0.66155446", "0.63423544", "0.576975", "0.5697624", "0.5679179", "0.55829513", "0.5382714", "0.5345334", "0.5325174", "0.53106457", "0.5242109", "0.52059597", "0.5203102", "0.5186651", "0.5169315", "0.51328266", "0.51315385", "0.5126731", "0.5115003...
0.50434846
26
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is ...
def test_register_sapsf_with_value_default(self): # Mock the call to the SAP SuccessFactors OData user endpoint ODATA_USER_URL = ( 'http://api.successfactors.com/odata/v2/User(userId=\'myself\')' '?$select=firstName,country,lastName,defaultFullName,email' ) def u...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_priva...
[ "0.67629653", "0.6631736", "0.6281163", "0.5879345", "0.5806605", "0.55553335", "0.5497894", "0.546793", "0.54649425", "0.54343826", "0.5406209", "0.5285645", "0.5254529", "0.52485114", "0.5243045", "0.523051", "0.5200763", "0.5126939", "0.5116786", "0.5110514", "0.51018703",...
0.63661134
2
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is ...
def test_register_sapsf_metadata_present_override_relevant_value(self): value_map = {'country': {'Australia': 'NZ'}} expected_country = 'NZ' provider_settings = { 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/', 'sapsf_private_key': 'fake_private_key_here', ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_priva...
[ "0.70553404", "0.66161525", "0.63435036", "0.57704574", "0.56966025", "0.5679671", "0.55836266", "0.53828114", "0.53427476", "0.5324517", "0.53106654", "0.52406067", "0.52070177", "0.5202646", "0.5187437", "0.51687354", "0.51323", "0.5131696", "0.5125414", "0.51162493", "0.50...
0.69396245
1
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is ...
def test_register_sapsf_metadata_present_override_other_value(self): value_map = {'country': {'United States': 'blahfake'}} expected_country = 'AU' provider_settings = { 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/', 'sapsf_private_key': 'fake_private_key_her...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_sapsf_metadata_present_override_relevant_value(self):\n value_map = {'country': {'Australia': 'NZ'}}\n expected_country = 'NZ'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_...
[ "0.6940789", "0.6617741", "0.6345586", "0.57743275", "0.5698771", "0.56804734", "0.55835336", "0.5381514", "0.5341755", "0.5321272", "0.5309637", "0.5242255", "0.5205009", "0.5200889", "0.5184601", "0.51705694", "0.51325846", "0.5132514", "0.5125601", "0.51126003", "0.5099695...
0.70560694
0
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is ...
def test_register_sapsf_metadata_present_empty_value_override(self): value_map = {'country': {}} expected_country = 'AU' provider_settings = { 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/', 'sapsf_private_key': 'fake_private_key_here', 'odata_api...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_priva...
[ "0.7055314", "0.6939393", "0.63423544", "0.576975", "0.5697624", "0.5679179", "0.55829513", "0.5382714", "0.5345334", "0.5325174", "0.53106457", "0.5242109", "0.52059597", "0.5203102", "0.5186651", "0.5169315", "0.51328266", "0.51315385", "0.5126731", "0.5115003", "0.50994205...
0.66155446
2
Ensure that if there's an HTTP failure while fetching metadata, we continue, using the metadata from the SAML assertion.
def test_register_http_failure(self): self._configure_testshib_provider( identity_provider_type='sap_success_factors', metadata_source=TESTSHIB_METADATA_URL, other_settings=json.dumps({ 'sapsf_oauth_root_url': 'http://successfactors.com/oauth-fake/', ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_metadata_saml_not_authorized():\n\n responses.add(\n responses.GET,\n f\"{SERVICE_URL}/$metadata\",\n content_type='text/html; charset=utf-8',\n status=200)\n\n with pytest.raises(HttpError) as e_info:\n pyodata.Client(SERVICE_URL, requests)\n\n assert str(e_inf...
[ "0.6340328", "0.6015547", "0.5980344", "0.58028024", "0.57724714", "0.5630444", "0.5626857", "0.55482787", "0.5516507", "0.5385363", "0.5247033", "0.51967573", "0.5134511", "0.51058185", "0.50990194", "0.5066909", "0.5056259", "0.5038707", "0.5012003", "0.49886125", "0.496665...
0.0
-1
Ensure that if there's an HTTP failure while fetching user details from SAP SuccessFactors OData API.
def test_register_http_failure_in_odata(self): # Because we're getting details from the assertion, fall back to the initial set of details. self.USER_EMAIL = "myself@testshib.org" self.USER_NAME = "Me Myself And I" self.USER_USERNAME = "myself" odata_company_id = 'NCC1701D' ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mock_odata_api_for_error(self, odata_api_root_url, username):\n\n def callback(request, uri, headers): # lint-amnesty, pylint: disable=unused-argument\n \"\"\"\n Return a 500 error when someone tries to call the URL.\n \"\"\"\n headers['CorrelationId'] = 'ae...
[ "0.6441003", "0.60609096", "0.6001092", "0.5984341", "0.5984341", "0.59653944", "0.5955787", "0.5928276", "0.59214807", "0.59023386", "0.5876574", "0.5876278", "0.58514905", "0.58513963", "0.5837443", "0.583195", "0.5824492", "0.5800869", "0.57902473", "0.577529", "0.5762384"...
0.58489734
14
Method handling all item specific processing. Returns dictionary containing scraped report.
def process_item(self, item, spider): # Memory has extra postfix letters and they need to be remove # and then converted into actual integer numeric = RE_MATCH.match(item['Memory']).group(0) item['Memory'] = int(numeric) # The same case as above but here the value is a float ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processItem(self):\r\n self.extract()\r\n self.mergeLanguageClaims()\r\n self.validateClaims()\r\n self.mergeWithWikidata()\r\n self.writeToWikidata()\r\n self.log()", "def _parse_item(self, item):\n result = {}\n for f in self._invoice_report_item_fiel...
[ "0.6547524", "0.6395213", "0.619723", "0.6070499", "0.6036251", "0.6004193", "0.59119064", "0.58983403", "0.58883923", "0.5818666", "0.5750651", "0.5722851", "0.5683043", "0.5669266", "0.56607234", "0.5656393", "0.56268835", "0.5572283", "0.5531241", "0.551983", "0.5516696", ...
0.5546761
18
Given inputs, take move and return outputs.
def move(self, env2): output = dict() if self.state == 0: if (env2 == 0): self.state = 0 output["loc"] = 16 output["stage"] = 1 elif (env2 == 1): self.state = 33 output["loc"] = 17 o...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transduce(self,inputs):\n self.start()\n return [self.step(inp) for inp in inputs]", "def api_make_move(self, move_input):\n return self.board.attempt_move(move_input)", "def move(self, *args, **kw):\n return self.execute_action('move', *args, **kw)", "def move(self, output_fi...
[ "0.62765896", "0.5847411", "0.58237934", "0.5772807", "0.5766127", "0.56973183", "0.56731117", "0.5606184", "0.54958546", "0.5494064", "0.546905", "0.5464436", "0.54533064", "0.5435174", "0.54249644", "0.54240054", "0.5407606", "0.5406141", "0.5403741", "0.5398427", "0.538479...
0.0
-1
Create source links to github
def linkcode_resolve(domain, info): if domain != 'py' or not info['module']: return None filename = info['module'].replace('.', '/') return "https://github.com/mathcamp/flywheel/blob/%s/%s.py" % (version_data['ref'], filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def repo_link(repo):\n return \"https://github.com/\" + repo", "def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return bas...
[ "0.6921794", "0.68528706", "0.6557395", "0.6166538", "0.6133582", "0.6098029", "0.6074224", "0.6037421", "0.6030705", "0.6019107", "0.59831655", "0.596135", "0.59046274", "0.58846396", "0.58756024", "0.58383036", "0.5835905", "0.5808838", "0.5806285", "0.579724", "0.5761678",...
0.5238986
86
Accept the path to the text files with lake ice fraction timeseries
def get_ts_from_file(path="", start_year=-np.Inf, end_year=np.Inf) -> pd.DataFrame: df = pd.DataFrame.from_csv(path, sep="\s+") cnames = df.columns[:] for c in cnames: y = int(c) if y < start_year or y > end_year: df.drop(c, axis=1, inplace=True) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_local_20Hz_files(**kwargs):\n pathlst = kwargs.get('pathlst')\n product = kwargs.get('product')\n varalias = kwargs.get('varalias')\n sdate = kwargs.get('sdate')\n edate = kwargs.get('edate')\n twin = kwargs.get('twin')\n\n # establish coords if defined in config file\n timestr = s...
[ "0.5803808", "0.57068276", "0.56613946", "0.5642458", "0.562522", "0.55623674", "0.55397063", "0.5512734", "0.5485281", "0.5476176", "0.5475341", "0.54747045", "0.54190755", "0.54162836", "0.53791934", "0.5370917", "0.5362841", "0.535579", "0.5337363", "0.5330271", "0.5322317...
0.0
-1
Test case for get_chain_by_id
def test_get_chain_by_id(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_chains(self):\n pass", "def get_chain(self, chain_id):\n if self.chain_dict.has_key(chain_id):\n return self.chain_dict[chain_id]\n return None", "def test_solareclipses_id_get(self):\n pass", "def sample_chains():\n c = chain(add.s(1, 1), add.s(1), add....
[ "0.66562283", "0.6344267", "0.60350573", "0.6012869", "0.5983662", "0.5644152", "0.5638522", "0.56080955", "0.55986637", "0.5579138", "0.54949653", "0.5461469", "0.54595417", "0.5459411", "0.5446837", "0.5446837", "0.54265", "0.53988206", "0.5392831", "0.5390298", "0.5332432"...
0.93600637
0
Test case for get_chains
def test_get_chains(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_chain_by_id(self):\n pass", "def get_chains (structure):\n chains=[]\n for chain in structure[0]:\n chains.append(chain)\n return chains", "def iter_chains(self):\n if self.default_model:\n return iter(self.default_model.chain_list)\n return iter(lis...
[ "0.6914287", "0.67781395", "0.6644035", "0.6538033", "0.652312", "0.6489876", "0.64195883", "0.63450104", "0.62671566", "0.61932814", "0.60854757", "0.6066701", "0.605091", "0.6029899", "0.6004666", "0.60018355", "0.5970353", "0.59533113", "0.5910838", "0.5885802", "0.5868115...
0.94273794
0
Test case for post_chain
def test_post_chain(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_post_chain_search(self):\n pass", "def test_rewrite_chains_cover(self):\n cb = Mock()\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n callback=cb,\n )\n self...
[ "0.72168523", "0.65788776", "0.6171648", "0.6129438", "0.61099774", "0.61021096", "0.60098636", "0.5998518", "0.59712416", "0.5869386", "0.58414704", "0.5824033", "0.5820647", "0.5788468", "0.5751005", "0.5730567", "0.57289803", "0.57287365", "0.5715039", "0.5704063", "0.5691...
0.9139945
0
Test case for post_chain_search
def test_post_chain_search(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_post_chain(self):\n pass", "def test_post_foods_search(self):\n pass", "def test_search(self):\n pass", "def test_search(self):\n pass", "def test_search(self):\n pass", "def test_search_systems_post(self):\n pass", "def test_search_organizations_post(...
[ "0.7415727", "0.6913196", "0.66014063", "0.66014063", "0.66014063", "0.65811795", "0.6300726", "0.615566", "0.5992487", "0.5984323", "0.59211963", "0.58675545", "0.58613753", "0.58335793", "0.58033735", "0.5775473", "0.57752", "0.5766603", "0.57302696", "0.57134694", "0.56959...
0.9217483
0
Build a networkx graph object from variables and relations.
def as_networkx_graph(variables, relations): graph = nx.Graph() # One node for each variables graph.add_nodes_from([v.name for v in variables]) for r in relations: for p in all_pairs([e.name for e in r.dimensions]): graph.add_edge(*p) return graph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def as_networkx_bipartite_graph(variables, relations):\n graph = nx.Graph()\n\n # One node for each variables\n graph.add_nodes_from([v.name for v in variables], bipartite=0)\n graph.add_nodes_from([r.name for r in relations], bipartite=1)\n\n for r in relations:\n for e in r.dimensions:\n ...
[ "0.73159355", "0.67514896", "0.6639457", "0.663262", "0.6631219", "0.6617338", "0.65870404", "0.6585843", "0.65561634", "0.64985657", "0.64818686", "0.64802456", "0.64401877", "0.64243275", "0.64049304", "0.6382079", "0.63409054", "0.6319503", "0.6298616", "0.6286521", "0.627...
0.81062376
0
Build a networkx graph object from variables and relations.
def as_networkx_bipartite_graph(variables, relations): graph = nx.Graph() # One node for each variables graph.add_nodes_from([v.name for v in variables], bipartite=0) graph.add_nodes_from([r.name for r in relations], bipartite=1) for r in relations: for e in r.dimensions: graph...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def as_networkx_graph(variables, relations):\n graph = nx.Graph()\n\n # One node for each variables\n graph.add_nodes_from([v.name for v in variables])\n\n for r in relations:\n for p in all_pairs([e.name for e in r.dimensions]):\n graph.add_edge(*p)\n return graph", "def initial...
[ "0.81066954", "0.6750233", "0.6639164", "0.66322136", "0.6630946", "0.6617218", "0.6586329", "0.65858966", "0.65559703", "0.6498455", "0.64820826", "0.6480336", "0.6440278", "0.64248806", "0.6404701", "0.6381441", "0.63398075", "0.6318121", "0.62991834", "0.6286606", "0.62709...
0.73165965
1
Display the variables and relation as a graph, using networkx and matplotlib.
def display_graph(variables, relations): graph = as_networkx_graph(variables, relations) # Do not crash if matplotlib is not installed try: import matplotlib.pyplot as plt nx.draw_networkx(graph, with_labels=True) # nx.draw_random(graph) # nx.draw_circular(graph) # ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visualize_dependency_graph(self):\n if self.graph is None:\n self.logger.error(\"Graph value none cannot be plotted\")\n return\n\n nx.draw(self.graph, cmap=plt.get_cmap('jet'), with_labels=True)\n plt.show()", "def plot_graph(self) -> None:", "def display_biparti...
[ "0.75382435", "0.7278241", "0.7211741", "0.71995574", "0.70474607", "0.69660336", "0.6962253", "0.68822443", "0.6876223", "0.68665344", "0.685607", "0.6749594", "0.67388505", "0.67268574", "0.6711935", "0.6700764", "0.6693562", "0.6683998", "0.6630944", "0.6603868", "0.655565...
0.8590499
0
Display the variables and relation as a graph, using networkx and matplotlib.
def display_bipartite_graph(variables, relations): graph = as_networkx_bipartite_graph(variables, relations) # Do not crash if matplotlib is not installed try: import matplotlib.pyplot as plt pos = nx.drawing.spring_layout(graph) variables = set(n for n, d in graph.nodes(data=True)...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_graph(variables, relations):\n graph = as_networkx_graph(variables, relations)\n\n # Do not crash if matplotlib is not installed\n try:\n import matplotlib.pyplot as plt\n\n nx.draw_networkx(graph, with_labels=True)\n # nx.draw_random(graph)\n # nx.draw_circular(gra...
[ "0.8590499", "0.75382435", "0.7278241", "0.71995574", "0.70474607", "0.69660336", "0.6962253", "0.68822443", "0.6876223", "0.68665344", "0.685607", "0.6749594", "0.67388505", "0.67268574", "0.6711935", "0.6700764", "0.6693562", "0.6683998", "0.6630944", "0.6603868", "0.655565...
0.7211741
3
Compute the graph diameter(s). If the graph contains several independent sub graph, returns a list the diamater of each of the subgraphs.
def graph_diameter(variables, relations): diams = [] g = as_networkx_graph(variables, relations) components = (g.subgraph(c).copy() for c in nx.connected_components(g)) for c in components: diams.append(nx.diameter(c)) return diams
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def diameter(self):\n\n v = self.vertices()\n pairs = [ (v[i],v[j]) for i in range(len(v)-1) for j in range(i+1, len(v))]\n smallest_paths = []\n for (s,e) in pairs:\n paths = self.find_all_path(s,e)\n smallest = sorted(paths, key=len)[0]\n smallest_path...
[ "0.700528", "0.64061725", "0.6378661", "0.6351375", "0.6088722", "0.5843668", "0.5843668", "0.5829243", "0.57851386", "0.56062824", "0.55966824", "0.55673695", "0.55673695", "0.55229545", "0.5520967", "0.5497087", "0.54773235", "0.54561347", "0.5399723", "0.5395755", "0.53897...
0.7837539
0
Generate all possible pairs from the list of given elements.
def all_pairs(elements): if len(elements) < 2: return [] elif len(elements) == 2: return [(elements[0], elements[1])] else: new_pairs = [] for elt in elements[1:]: new_pairs.append((elements[0], elt)) return all_pairs(elements[1:]) + new_pairs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pairs(lst):\r\n\tfor i in range(1, len(lst), 2):\r\n\t\tyield lst[i-1], lst[i]", "def all_pairs(items, sort=False):\n if sort:\n items = sorted(items)\n for i, ni in enumerate(items):\n for j, nj in enumerate(items):\n if j > i: yield ni, nj", "def __unordered_pairs(l):\n\n ...
[ "0.7500274", "0.6851043", "0.6840614", "0.68141717", "0.6754132", "0.669949", "0.6683781", "0.6671177", "0.6668704", "0.65523124", "0.64457446", "0.64399433", "0.6394851", "0.636628", "0.6352524", "0.6351291", "0.6324047", "0.6315178", "0.6308765", "0.63008136", "0.62359875",...
0.8119166
0
Takes an optimisation step by calculating gradients given the loss and then updating the parameters.
def take_optimisation_step( self, optimizer, network, loss, clipping_norm=None, retain_graph=False ): if not isinstance(network, list): network = [network] # reset gradients to 0 optimizer.zero_grad() # this calculates the gradients loss.backward(retain_graph=retain_g...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_parameters(self, loss):\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()", "def update_params(self, loss, step_size=0.5, first_order=False):\n #grads = torch.autograd.grad(loss, self.parameters(),\n # create_graph=not first_order)\n s...
[ "0.7796381", "0.7476913", "0.7210945", "0.71824896", "0.7161772", "0.7142518", "0.7097461", "0.7039974", "0.7000394", "0.6955703", "0.69353473", "0.6932707", "0.6929035", "0.6923562", "0.68890387", "0.68879765", "0.68699014", "0.6863703", "0.6860742", "0.68581945", "0.684478"...
0.0
-1
Returns an encoded sequence from contents in msg
def encode(msg): #Corner cases: if msg == '': return '' # the empty string yields an empty string if not isinstance(msg, str): return '' # What to do on non-strings (isinstance also allows for subclasses) if msg == None: return '' # If None (void) is passed in res =...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(rosMsg): #@NoSelf", "def encode(self, seq):", "def _to_cpp(self, msg):\n buf = BytesIO()\n msg.serialize(buf)\n value = buf.getvalue()\n return value", "def chunkify(msg):\n return [\"%s %s\" % (i, msg[i*158 : (i+1)*158]) for i in range(len(msg)/158 + 1)]", "def en...
[ "0.6726", "0.64823467", "0.6431121", "0.6076899", "0.60573506", "0.6021212", "0.6009153", "0.59417653", "0.5894444", "0.58700716", "0.5841319", "0.5840961", "0.5782641", "0.5758987", "0.572157", "0.57201093", "0.5666906", "0.56631124", "0.56581855", "0.5646804", "0.56445134",...
0.5922205
8
Decodes a Runlength encoded sequence such as '2k3b' into 'kkbbb'
def decode(seq): # Handle some corner cases if (not seq) or (not isinstance(seq, str)): return '' # Return empty string on non-strings and all non-true values (empty string, None, 0, ...) # Use regex to match patterns, t is then a list of tuples (if any patterns found) # '2k3b' -> [('2','k')...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode(k, key_length):\n key = k[:key_length]\n val_length, ber_length = decode_ber(k[key_length:])\n value = k[key_length + ber_length : key_length + ber_length + val_length]\n return key, value", "def decode(b):\n\n if b.startswith(\"0z\"):\n b = b[2:]\n\n l, i, v = len(b), 0, 0\n ...
[ "0.69079274", "0.67722535", "0.6671197", "0.6628837", "0.6628837", "0.6597548", "0.6395014", "0.6347437", "0.63195705", "0.62489367", "0.6163923", "0.6139675", "0.6125669", "0.609444", "0.6063985", "0.6007184", "0.59968084", "0.59952885", "0.59780675", "0.5940352", "0.5912996...
0.63877225
7
type + sequence_number + key_size + key + value_size + value 1bit 63bit 32bit varlength 32bit varlength
def __init__(self, key, sequence_number, type=KeyType.PUT, value=None): assert key is not None assert sequence_number >= 0 self.type = type self.sequence_number = sequence_number self.key = key self.value = value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_meta_chunk(key, value):\n bkey = key.encode(\"utf-8\")\n bvalue = value.encode(\"utf-8\")\n return (wozardry.to_uint32(len(bkey) + len(bvalue) + 2) + bkey + b'\\x09' + bvalue + b'\\x0A').hex()", "def _pack_dict( self, table, pad = False ) :\r\n\r\n keys, values = zip( *table...
[ "0.5809457", "0.5597021", "0.55033654", "0.5478504", "0.5475797", "0.5431724", "0.54011375", "0.53711766", "0.535695", "0.53174025", "0.53174025", "0.5285025", "0.5257738", "0.52197987", "0.5217525", "0.52163196", "0.51897675", "0.51784354", "0.5164822", "0.5161085", "0.51330...
0.48401326
43
serialize internal keyvalue pair to byte_array, only pickle objects when necessary
def serialize(self): byte_array = bytearray() header = ( self.sequence_number | (1 << 63) if self.type == KeyType.PUT else self.sequence_number ) # append header first byte_array.extend(byte_utils.integer_to_n_bytes_array(header, 8)) pi...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize(self, value) -> bytes:\n pass", "def _encode_value(self, value):\n return pickle.dumps(value)", "def serialize(obj):\n return pickle.dumps(obj)", "def dump_object(self, value):\n return pickle.dumps(value)", "def __bytes__(self):\n byteout = bytearray()\n f...
[ "0.64888334", "0.6360432", "0.6315611", "0.6277576", "0.6177769", "0.6171004", "0.6164912", "0.6144653", "0.6144653", "0.61439574", "0.6126626", "0.6126626", "0.60924256", "0.60020936", "0.59987134", "0.59793663", "0.5974535", "0.59639865", "0.5963505", "0.5941758", "0.593160...
0.7145172
0
return None is parsing failed
def deserialize(file_io): header = file_io.read(8) if len(header) != 8: return None # parsing header header = byte_utils.byte_array_to_integer(header) type = KeyType.PUT if (header & (1 << 63)) else KeyType.DELETE sequence_number = header & ((1 << 63) - 1) ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse(self):\n pass", "def test_simple_parse(self):\n pass", "def parse(self) -> None:\n pass", "def parse(self):", "def parse(self, text):\n node = self.match(text)\n if node is None or node.end - node.start != len(text): # TODO: Why not test just end here? Are we ...
[ "0.6728034", "0.65789205", "0.6491963", "0.64315605", "0.6299717", "0.6294331", "0.6294331", "0.6294331", "0.6294331", "0.6265237", "0.62311304", "0.60790735", "0.60173225", "0.6007963", "0.6007318", "0.6002458", "0.5985229", "0.59831566", "0.5947476", "0.5941684", "0.5936475...
0.0
-1
Import ASHRAE data from a directory containing the .csv files.
def import_data(ashrae_dir, filenames=const.NAMES): print('Importing data from csv') ashrae_dir = pathlib.Path(ashrae_dir) data = {name: pd.read_csv((ashrae_dir / name).with_suffix('.csv')) for name in filenames} return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd...
[ "0.64119494", "0.62574255", "0.6136558", "0.60596466", "0.6012882", "0.5962187", "0.5958087", "0.5912588", "0.58974314", "0.5878545", "0.58628714", "0.5797354", "0.57774633", "0.577703", "0.5762553", "0.57592994", "0.57388955", "0.57192475", "0.57123953", "0.5707139", "0.5690...
0.7007926
0
Import ASHRAE data with optional caching mechanism.
def get_raw_data(ashrae_dir, cache_file=None, filenames=const.NAMES): cache_file = pathlib.Path(cache_file) if cache_file is not None and cache_file.exists(): data = import_dict_from_cached(cache_file, filenames) else: data = import_data(ashrae_dir) _cache_data(data, cache_file) ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_(self, data):\n return self.__import(data)", "def load_data(self) -> None:", "def load_data(data_set_key: str, a2e_data_path: str = '../../../a2e-data/data', cache_dir: str = None) -> BearingDataSet:\n\n if a2e_data_path is not None and not a2e_data_path.startswith('http') and not a2e_data...
[ "0.5765304", "0.5686181", "0.56845134", "0.56063354", "0.56054366", "0.55914676", "0.5575657", "0.55459553", "0.5527377", "0.54661703", "0.5434593", "0.54312104", "0.5410122", "0.5381183", "0.5379974", "0.5346656", "0.5326773", "0.5326303", "0.52563024", "0.52543855", "0.5239...
0.61214995
0
Return the number of timestamps missing
def count_missing_timestamps(df): no_of_timestamps = len(df.timestamp) no_of_sites = len(set(df.site_id)) full_date_range = pd.date_range(start=min(df.timestamp), end=max(df.timestamp), freq='H') no_of_missing_timestamps = no_of_sites * len(full_date_range) - no_of_timestamps print(f'There are {no_o...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_no_missing_timesteps(timesteps, verbose=True):\n timesteps = _check_timesteps(timesteps)\n # Check if there are data\n if timesteps.size == 0:\n raise ValueError(\"No data available !\")\n # Check if missing timesteps\n dt = np.diff(timesteps)\n dts, counts = np.unique(dt, return...
[ "0.65900165", "0.6369049", "0.6316393", "0.6256028", "0.62290597", "0.617701", "0.61646646", "0.6154375", "0.60366976", "0.6026818", "0.6009626", "0.5978013", "0.59768033", "0.5961701", "0.5955014", "0.58601505", "0.5835052", "0.58279943", "0.5825557", "0.5720708", "0.5682689...
0.8279923
0
Add missing timestamps to weather data and interpolate to fill in the data return df with missing times and weather data filled in
def add_missing_weather_data(df): full_date_range = pd.date_range(start=min(df.timestamp), end=max(df.timestamp), freq='H') sites = list(set(df.site_id)) full_data_site_range = pd.DataFrame(itertools.product(sites, full_date_range), columns=['site_id', 'timestamp']) ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def auto_fillna(ts: TimeSeries,\n **interpolate_kwargs) -> TimeSeries:\n\n ts_temp = ts.pd_dataframe()\n\n # pandas interpolate wrapper, with chosen `method`\n if 'limit_direction' not in interpolate_kwargs:\n interpolate_kwargs['limit_direction'] = 'both'\n interpolate_kwargs['in...
[ "0.7080858", "0.68412477", "0.6801546", "0.67576146", "0.6569322", "0.6546012", "0.6509167", "0.6458176", "0.6420076", "0.6409065", "0.6344805", "0.6284957", "0.62729025", "0.6232209", "0.6207936", "0.6158501", "0.61142576", "0.6104147", "0.606371", "0.5919506", "0.59161085",...
0.8054154
0
Convert timestamps to timestamp objects, fill in blanks in weather data, add names of meter types
def clean_data(raw_data, names=const.NAMES, meter_map=const.METER_MAP): cleaned_data = {} local_names = names.copy() if 'building_metadata' in local_names: local_names.remove('building_metadata') for name in local_names: print(f'Cleaning {name} dataset') df = raw_data[name] ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess_time(data, metadata):\n timestamp_name = metadata[\"timestamp_name\"]\n if timestamp_name == \"\":\n timestamp_name = \"fake_ts\"\n data[timestamp_name] = data.index\n\n data[timestamp_name] = pd.to_datetime(data[timestamp_name])\n data.sort_values(by=[timestamp_name], inpl...
[ "0.5928694", "0.5771849", "0.57145715", "0.56722647", "0.5632749", "0.55614084", "0.55107826", "0.55105793", "0.5509269", "0.55014396", "0.5444029", "0.5409909", "0.5388422", "0.5376563", "0.5362756", "0.535319", "0.5340124", "0.5325145", "0.53104377", "0.5308769", "0.5271506...
0.5138391
31
Join together the meter data, weather data and building metadata into one df data = dict of df's (keys are'building_metadata', 'weather_train', 'weather_test', 'train','test') dataset_name = 'train' or 'test' returns a merged df which includes building_metadata, weather_train (or weather_test) and train (or test)
def join_input_data_and_multi_index(data, dataset_name): meter_df = data[dataset_name] building_df = data['building_metadata'] weather_df = data['weather_' + dataset_name] # join meter and weather data building_n_meter = meter_df.merge(building_df, on='building_id', how='left') joined_data = b...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression...
[ "0.59869367", "0.57781816", "0.5758608", "0.563408", "0.56181586", "0.55629724", "0.5561206", "0.5549236", "0.5505305", "0.55047166", "0.54905367", "0.54782295", "0.54737127", "0.5469189", "0.544329", "0.54359186", "0.54143375", "0.5409169", "0.5407379", "0.53848493", "0.5383...
0.69934803
0
Split the joined data into a dict with a df for each meter type
def split_on_meter_type(joined_data, meter_types): joined_data_dict = {meter_type: joined_data[joined_data['meter_type'] == meter_type] for meter_type in meter_types} return joined_data_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_meter_data_for_time_slice(apt_no, start_time, end_time):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n\n logger.debug(\"sMap: Getting meter data for %s between %s and %s\", apt_no, start_time, end_time)\n\n query = (\"select data in ('\" + str(start_time) + \"','\" + str(end_time) + \"'...
[ "0.54995227", "0.5438723", "0.5368502", "0.5285218", "0.5209991", "0.5202449", "0.51980925", "0.5175748", "0.5165017", "0.5150522", "0.51441115", "0.5076895", "0.50656265", "0.50650203", "0.5061612", "0.5057883", "0.50543916", "0.5050856", "0.49904716", "0.4989884", "0.498955...
0.806629
0
dataset_name should be 'train' or 'test'
def produce_and_cache_small_dataset_dict(dataset_name, n=500000, meter_types=const.METER_MAP.values()): small_dataset_cache_file = pathlib.Path(dataset_name + '_small_store_joined.h5') if small_dataset_cache_file.exists(): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_dataset(dataset_name):\n dataset_as_lower = dataset_name.lower()\n if dataset_as_lower in _datasets_from_keras.keys():\n data_details = _datasets_from_keras[dataset_as_lower]\n (x_train, y_train), (x_test, y_test) = data_details['data'].load_data()\n else:\n raise IOError(\...
[ "0.7228631", "0.70241654", "0.70175004", "0.70087993", "0.6977617", "0.6900802", "0.68130064", "0.6774054", "0.67727417", "0.67533123", "0.673811", "0.67380196", "0.6732079", "0.6708", "0.6619651", "0.6580174", "0.65409255", "0.6507022", "0.6495979", "0.6469288", "0.64399636"...
0.0
-1
Return the number of dimension of the state space
def getStatesDim(self): return 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def state_space_dimension(cls) -> int:\n return 1", "def n_dim(self):\n return self._n_dim", "def num_dim(self):\n return len(self._dimensions)", "def num_dim(self):\n return len(self._dimensions)", "def _get_observation_dimension(self):\n return len(self._get_observation...
[ "0.8514534", "0.78174734", "0.77158946", "0.77158946", "0.7706377", "0.77037966", "0.7664541", "0.76440984", "0.7634386", "0.7625269", "0.75904065", "0.7558156", "0.75198585", "0.7480286", "0.7480286", "0.7450488", "0.7439861", "0.74317974", "0.73826283", "0.7376509", "0.7371...
0.76861554
6
Returns the max and min values each dimension can take. These are returned as two tuples, `low` and `high`, where both are a list of as many elements as there is dimension to the state space.
def getStatesBounds(self): return (0, 0), (self._width - 1, self._height - 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_max_and_min(self):\n max_x = float('-inf')\n min_x = float('inf')\n max_y = float('-inf')\n min_y = float('inf')\n max_z = float('-inf')\n min_z = float('inf')\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n counter = 0\n for src, node in se...
[ "0.72885597", "0.6820047", "0.6709689", "0.6641923", "0.661468", "0.648229", "0.64780694", "0.6453788", "0.64332044", "0.6412223", "0.6407123", "0.63986015", "0.63950133", "0.63915455", "0.6389562", "0.63883317", "0.6370339", "0.63583744", "0.6325942", "0.629759", "0.62925905...
0.58876115
80
The agent take the given action and receives back the new state, reward, whether the episode is terminated and some nothingness.
def step(self, action): x, y = self._move(action, *self._currentPos) if chr(self._grid[x, y]) == CASE_TYPES.Wall: # error - previous state was already a wall self._done = True self._trajectory.append(self._currentPos) return self._currentPos, -1, self._do...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def agent_step(self, reward, state):\n self.sum_rewards += reward\n self.episode_steps += 1\n\n # Make state an array of shape (1, state_dim) to add a batch dimension and\n # to later match the get_action_values() and get_TD_update() functions\n state = np.array(state)\n\n ...
[ "0.77299076", "0.7505443", "0.7489188", "0.7451101", "0.7446967", "0.7423517", "0.7353827", "0.73296374", "0.732902", "0.7246652", "0.71973366", "0.71956855", "0.7170794", "0.7170417", "0.7162732", "0.71451074", "0.7140692", "0.71406287", "0.71278226", "0.71219754", "0.711092...
0.0
-1
Reset the state of the evironment for a new episode `setup` is used to let the reset function know when we're calling it from `setup`. If we don't, the 'random' init scheme should reset to the randomly choosen position instead of picking a new random one.
def reset(self, setup=False): self._done = False self._nbSteps = 0 x = None if (self.startPosX == 'random' and setup) or ( self.startPosX == 'episodeRandom'): x = random.randint(0, self._width - 1) elif (self.startPosX == 'random' and not setup): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n\n # initialize gym env variables\n self.finish = False\n self.curr_step = -1\n self.curr_episode += 1\n\n # initialize target position\n self.target = np.random.uniform(-10.0,10.0,size=(2))\n\n # initialize sheep positions\n if self.fixed_r...
[ "0.66944516", "0.6647286", "0.65143913", "0.6312656", "0.6290814", "0.62830704", "0.6277763", "0.62062037", "0.61978877", "0.6173854", "0.61685586", "0.61399466", "0.613353", "0.61265284", "0.6108667", "0.6104944", "0.60824627", "0.60824627", "0.6075161", "0.6065723", "0.6042...
0.6798604
0
Render the environment serverside
def render(self, mode="human", close=False): if close and self._viewer is None: if self._viewer is not None: self._viewer.close() self._viewer = None return screen_width = 600 screen_height = 600 if self._viewer is None: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render(self):\n self.env.render()", "def render(self):\n self.rendering = True\n self.env.render()", "def render(self, mode='human'):\n return self._env.render(mode)", "def _serve_environment(self, request):\n return http_util.Respond(\n request,\n {\n ...
[ "0.8079139", "0.7380633", "0.6701152", "0.6639087", "0.6605521", "0.6443869", "0.6436322", "0.64339954", "0.6354514", "0.6319063", "0.6319063", "0.6318727", "0.62650275", "0.6256661", "0.62470657", "0.62287277", "0.6225323", "0.6188681", "0.6188681", "0.61411625", "0.61068594...
0.0
-1
Fonction sans argument qui affiche un labyrinthe
def afficher_carte(self): print(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def simple():", "def simple():", "def sth():", "def saluda1(sujeto):\n print 'Hola '+sujeto+' !!'", "def plothub1():\r\n pass", "def saluda2(sujeto):\n print 'Hola %s !!' % sujeto", "def func():", "def getLabel(*args):", "def getLabel(*args):", "def getLabel(*args):", "def getLabel2(*a...
[ "0.63914204", "0.63914204", "0.5986804", "0.59826845", "0.59645784", "0.5792992", "0.5778979", "0.5771139", "0.5771139", "0.5771139", "0.57593817", "0.57593817", "0.57460934", "0.5673789", "0.5656487", "0.5655953", "0.56223005", "0.5616206", "0.560993", "0.56080663", "0.56043...
0.0
-1
do api request, parse error, return response.
def get_subtitleinfo(fileFullName): sys.stdout.write("Requesting subtitle info...\n") #接口获取字幕信息 response = requests.post( "https://www.shooter.cn/api/subapi.php", verify=False, params= { 'filehash': ComputeFileHash(fileFullName), 'pathinfo': os.path.realpath(...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _api_request(self,\n method: str,\n path_url: str,\n params: Dict[str, Any] = {}) -> Dict[str, Any]:\n base_url = f\"https://{global_config_map['gateway_api_host'].value}:\" \\\n f\"{global_config_map['...
[ "0.6998465", "0.697629", "0.6958306", "0.68044615", "0.67445", "0.673964", "0.67353", "0.67159045", "0.6714112", "0.6700891", "0.6693908", "0.66411114", "0.6610895", "0.6592531", "0.65848005", "0.6565488", "0.65573376", "0.6525482", "0.64771914", "0.6451753", "0.6419186", "...
0.0
-1
Returns Home Assistant base url without '/api' or trailing slash
def get_url(self, url): if not url: raise ValueError('Property "url" is missing in config') return url.replace("/api", "").rstrip("/")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def api_url(url_base):\n return f\"{url_base}/api/v2\"", "def get_api_url() -> str:\n\n site = pywikibot.Site()\n url = site.protocol() + \"://\" + site.hostname() + site.apipath()\n return url", "def api_url(self) -> httpx.URL:\n return self._client.base_url", "def get_api_url() -> str:\n...
[ "0.70845884", "0.69659054", "0.69256717", "0.69165707", "0.6896292", "0.68960935", "0.6873356", "0.68278384", "0.6792706", "0.67290175", "0.6711364", "0.6674965", "0.6651999", "0.6650969", "0.66333526", "0.65721595", "0.65439534", "0.653448", "0.6530355", "0.6518613", "0.6511...
0.6285166
37
Make sure the netcdf cc data handler operates correctly
def test_data_handling_nc_cc(): input_files = [os.path.join(TEST_DATA_DIR, 'ua_test.nc'), os.path.join(TEST_DATA_DIR, 'va_test.nc'), os.path.join(TEST_DATA_DIR, 'orog_test.nc'), os.path.join(TEST_DATA_DIR, 'zg_test.nc')] with xr.open_mfdataset(input_fil...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_data(self):\n # ================ CHECK DATA / CONNECT / SELECT ================\n N = self.xyz.shape[0]\n # Chech array :\n if (self.connect.shape != (N, N)) or not isinstance(self.connect,\n np.ndarray):\n ...
[ "0.60441834", "0.5924442", "0.5870361", "0.56942517", "0.5658238", "0.56382203", "0.5624735", "0.5600587", "0.5563595", "0.55417067", "0.54565823", "0.5366081", "0.5311197", "0.53014123", "0.5301123", "0.52937174", "0.5290408", "0.5289225", "0.52625626", "0.52489024", "0.5247...
0.74189216
0
Test solar data handling from CC data file with clearsky ratio calculated using clearsky ratio from NSRDB h5 file.
def test_solar_cc(): features = ['clearsky_ratio', 'rsds', 'clearsky_ghi'] input_files = [os.path.join(TEST_DATA_DIR, 'rsds_test.nc')] nsrdb_source_fp = os.path.join(TEST_DATA_DIR, 'test_nsrdb_co_2018.h5') with xr.open_mfdataset(input_files) as fh: min_lat = np.min(fh.lat.values) min_l...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n regexham = r'\\s+\\((\\d+,\\s*\\d+)\\)\\s+([\\-+]?\\d+\\.\\d+[eEdD]?[\\-+]?\\d+)' #to extract the Hamiltonian.\n root = '.'\n #fname = 'output_files/'\n ciffci = CIFlow_Reader('testfci.dat', regexp = regexham , read_ham= True)\n ciffcipar = CIFlow_Reader( 'psi0_output10outputfci.dat', ...
[ "0.6232863", "0.6190931", "0.6065787", "0.5965935", "0.59624934", "0.5858929", "0.5856982", "0.5837951", "0.5831565", "0.58128524", "0.5789938", "0.57884246", "0.5777758", "0.5774943", "0.57337826", "0.5721014", "0.569534", "0.5672289", "0.56514597", "0.5634904", "0.5580489",...
0.7407615
0
Perform a 'run' action on a module. Module should have status LOADED for a System to actually call this method.
def run(self, **kwargs) -> None: raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def module_runner(module):\n task_queue.put(1)\n result = sys.modules[module].run()\n task_queue.get()\n store_module_result(result) # Store the result in our repo", "def do_run(self, line: str):\n if self._real_module is None:\n print(\"'run' command depends on using a module. See...
[ "0.749783", "0.74799657", "0.70269644", "0.69892204", "0.68432194", "0.67123836", "0.6491966", "0.64515406", "0.6416656", "0.64033836", "0.63711715", "0.6279245", "0.6268681", "0.6221671", "0.6220211", "0.62159735", "0.6164637", "0.61534256", "0.6147704", "0.61080045", "0.608...
0.0
-1
keys_to_track order is important! Matches will be tested in this order.
def __init__(self, keys_to_track): self.keys_to_track = keys_to_track self.tracker = {} for key_to_track in self.keys_to_track: self.tracker[key_to_track] = {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)", "d...
[ "0.72052854", "0.6073225", "0.5684413", "0.5646537", "0.55177075", "0.54479295", "0.54022795", "0.5286637", "0.5255114", "0.52513975", "0.5236649", "0.52352864", "0.5173165", "0.5170619", "0.5168231", "0.51618785", "0.51518595", "0.51467997", "0.5143868", "0.51270205", "0.509...
0.6227405
1
Add obj as a match for match_dict values. Checks to make sure match_dict keys are valid.
def add(self, obj, match_dict): for match_key in match_dict.keys(): assert match_key in self.keys_to_track for key_to_track in self.keys_to_track: if match_dict.has_key(key_to_track): match_val = match_dict[key_to_track] if match_val is Non...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)", "d...
[ "0.72251153", "0.6223135", "0.612522", "0.6043846", "0.6039093", "0.5865672", "0.5826964", "0.5673978", "0.5657533", "0.56395197", "0.5595575", "0.55939347", "0.5582442", "0.55700904", "0.55490994", "0.54979956", "0.5410391", "0.5368418", "0.536791", "0.535772", "0.5288205", ...
0.84316427
0
Find a match using match_dict. Returns None if there is no match. Checks to make sure match_dict keys are valid.
def match(self, match_dict): for match_key in match_dict.keys(): assert match_key in self.keys_to_track for key_to_track in self.keys_to_track: if match_dict.has_key(key_to_track): match_val = match_dict[key_to_track] if self.tracker[key_to...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _find_match(needle: dict, haystack: list, keys: list):\n for item in haystack:\n for key in keys:\n if item.get(key) != needle[key]:\n break\n else:\n return item\n return None", "def dict_match(d, key, default=None):\n\n if key in d and \"[\" not i...
[ "0.6650081", "0.64368933", "0.62785393", "0.6271919", "0.606207", "0.60218126", "0.6009384", "0.59157956", "0.5891576", "0.586902", "0.5831622", "0.58111554", "0.58071977", "0.580484", "0.57333195", "0.5707258", "0.5707258", "0.57004094", "0.5668807", "0.561176", "0.5579269",...
0.8009377
0
Utility function to populate key_matcher from self.records.
def _add_matches(self): for record in self.records: match_dict={key_to_track: record.get(key_to_track) for key_to_track in self.key_matcher.keys()} self.key_matcher.add(obj=record, match_dict=match_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_source_key(self, match):\n raise NotImplementedError", "def __init__(self):\n self.key_to_record = {}\n self.mutation_to_key = {}\n self._innovation_key_generator = count(0)", "def test_toofewkeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n ...
[ "0.5777237", "0.5721886", "0.5531455", "0.548444", "0.54103255", "0.54072803", "0.5384197", "0.53711194", "0.5370351", "0.53101146", "0.5296817", "0.52691966", "0.5252932", "0.5242916", "0.5215903", "0.52110225", "0.5207979", "0.5202271", "0.5188836", "0.5164202", "0.51598597...
0.7259984
0
Check if the origin_imgs are flipped correctly.
def _check_flip(origin_imgs, result_imgs): h, w, c = origin_imgs.shape for i in range(h): for j in range(w): for k in range(c): if result_imgs[i, j, k] != origin_imgs[i, w - 1 - j, k]: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_flip(origin_imgs, result_imgs, flip_type):\n n, _, _, _ = np.shape(origin_imgs)\n if flip_type == 'horizontal':\n for i in range(n):\n if np.any(result_imgs[i] != np.fliplr(origin_imgs[i])):\n return False\n else:\n # yapf: disable\n for i in range(...
[ "0.7891626", "0.7111249", "0.588529", "0.5814684", "0.579397", "0.5752564", "0.5637771", "0.5609224", "0.5603559", "0.5602122", "0.55922", "0.5581342", "0.5511366", "0.5511366", "0.5508238", "0.54916257", "0.545488", "0.54529166", "0.5440112", "0.5437185", "0.5426509", "0.5...
0.8528314
0
make request to subredit and return numbers of subscribers
def recurse(subreddit, hot_list=[]): headers = {'User-Agent': 'Mauricio'} url = 'http://www.reddit.com/r/' + subreddit + '/hot/.json' r = requests.get(url, headers=headers, params=parameters) if r.status_code == 200: answer_list_10 = r.json().get('data').get('children') for top in range...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def number_of_subscribers(subreddit):\n\n url = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n headers = {\"User-Agent\": \"my-integration/1.2.3\"}\n\n response = get(url=url, headers=headers)\n\n if response.status_code == 200:\n # print(response.json())\n\n response_json...
[ "0.76823676", "0.7451481", "0.7448277", "0.73799473", "0.72858876", "0.7222261", "0.71943074", "0.7162595", "0.7158168", "0.7157379", "0.71367633", "0.71244204", "0.71223074", "0.71045095", "0.7061548", "0.70038885", "0.6953015", "0.6951698", "0.6925322", "0.69033", "0.682086...
0.0
-1
A cumulative metric is one that needs a prior value to calculate the next value. i.e. only the deltas for the current observed values are reported. A noncumulative metric is one where the absolute observed value is reported every time.
def is_cumulative(self): return self.name in ( "app.cpu", "app.uptime", "app.disk.bytes", "app.disk.requests", "app.mem.majflt", "app.io.wait", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cumulative_returns(self):\n return (1 + self.pct_change).cumprod()", "def get_cumulative_model(self):\n cm = None\n for ac in self.ac:\n if ac[0] is None:\n continue\n m = self.get_model(last=False, a=ac[0], c=ac[1])\n cm = m if cm is None ...
[ "0.6869948", "0.67523324", "0.64255375", "0.6341759", "0.63019013", "0.6291231", "0.62111056", "0.6121284", "0.61202884", "0.60518414", "0.6049944", "0.60443413", "0.59521824", "0.5944532", "0.594417", "0.5942714", "0.5922843", "0.5915984", "0.58872694", "0.58869976", "0.5872...
0.5677045
30
Initializes the base class.
def __init__(self, pid, monitor_id, logger, file_pattern): self._pid = pid self._id = monitor_id self._file_pattern = file_pattern # The file object to be read. We always keep this open and just seek to zero when we need to # re-read it. Some of the /proc files do better with t...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(cls):", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def initialize(self):\n\t\tpass", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def _initialize(self):\n pass", "def _initial...
[ "0.8141555", "0.78926814", "0.78926814", "0.7889723", "0.78779274", "0.78779274", "0.78618664", "0.78618664", "0.78618664", "0.78496444", "0.7829186", "0.78273886", "0.78159505", "0.7813773", "0.7813773", "0.7813773", "0.7813773", "0.7813773", "0.7808769", "0.7808769", "0.780...
0.0
-1
Runs a single cycle of the sample collection. It should read the monitored file and extract all metrics.
def run_single_cycle(self, collector=None): self._timestamp = int(time.time()) # There are certain error conditions, such as the system not supporting # a particular proc file type, that we will never recover from. So, # just always early exit. if self._failed: ret...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\r\n self.collect_data()", "def gather_sample(self, my_file, collector=None):\n\n pass", "def collect_samples(self):\n self.__running = True\n with open(self.__filename, 'a') as output:\n next_sample_time = time.time()\n while self.__running:\n ...
[ "0.6506419", "0.64478827", "0.6249575", "0.6136713", "0.6083549", "0.60691047", "0.5937353", "0.5804329", "0.5789543", "0.5710799", "0.5666293", "0.56556666", "0.5654352", "0.5652799", "0.5633741", "0.5620311", "0.5596868", "0.55945826", "0.5588704", "0.5581774", "0.5576543",...
0.71581453
0
Reads the metrics from the file and records them. Derived classes must override this method to perform the actual work of collecting their specific samples.
def gather_sample(self, my_file, collector=None): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_metrics(self):\n raise NotImplementedError()", "def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form())", "def walk(self):\n data = open(self.data_file_path, 'rb')\n read_metric = globals()...
[ "0.6955891", "0.69391584", "0.6471244", "0.63956815", "0.6393039", "0.63644856", "0.6352858", "0.6334597", "0.6094666", "0.6007557", "0.5965609", "0.59264725", "0.58482134", "0.5830391", "0.58277", "0.5824727", "0.58074886", "0.57781744", "0.57719123", "0.5734532", "0.5728365...
0.65097046
2
Closes any files held open by this reader.
def close(self): try: self._failed = True if self._file is not None: self._file.close() self._failed = False finally: self._file = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close( self ):\n \n for file in self._files:\n ir.file_hub.close( file )", "def close_all_files(self):\r\n while self.close_file():\r\n pass", "def close(self):\n if self.current_file_number is not None:\n self.fh_raw.close()\n self.cu...
[ "0.7826722", "0.77460366", "0.7503366", "0.72826236", "0.72478783", "0.7243321", "0.7204818", "0.7204593", "0.7146203", "0.7134031", "0.7112463", "0.71020526", "0.70459485", "0.7036263", "0.70164305", "0.7010969", "0.6980717", "0.6947164", "0.6919161", "0.68919945", "0.688708...
0.6589813
53
Returns the number of centiseconds (1/100ths secs) for the given number of jiffies (a weird timing unit used the kernel).
def __calculate_time_cs(self, jiffies): return int((jiffies * 100.0) / self._jiffies_per_sec)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_time_ms(self, jiffies):\n\n return int((jiffies * 1000.0) / self._jiffies_per_sec)", "def jiffies(_load_time=time.time()):\n return int(100*(time.time()-_load_time))", "def _nsec_to_usec_round(nsec):\n return (nsec + 500) // 10 ** 3", "def millis(): \n return int(round(mo...
[ "0.7653799", "0.652349", "0.6424848", "0.6171948", "0.6161103", "0.6105997", "0.5744971", "0.56402147", "0.5638903", "0.5606995", "0.5588372", "0.5540272", "0.5520585", "0.5486425", "0.54827213", "0.5427575", "0.5426606", "0.5411524", "0.54085886", "0.54085886", "0.54085886",...
0.7872766
0